summaryrefslogtreecommitdiff
path: root/linux-core
diff options
context:
space:
mode:
Diffstat (limited to 'linux-core')
-rw-r--r--linux-core/ati_pcigart.c69
-rw-r--r--linux-core/drmP.h3
-rw-r--r--linux-core/drm_compat.c107
-rw-r--r--linux-core/drm_compat.h23
-rw-r--r--linux-core/drm_fence.c75
-rw-r--r--linux-core/drm_vm.c81
-rw-r--r--linux-core/i915_fence.c18
7 files changed, 223 insertions, 153 deletions
diff --git a/linux-core/ati_pcigart.c b/linux-core/ati_pcigart.c
index bbdb841c..bb30dd74 100644
--- a/linux-core/ati_pcigart.c
+++ b/linux-core/ati_pcigart.c
@@ -33,41 +33,25 @@
#include "drmP.h"
-#if PAGE_SIZE == 65536
-# define ATI_PCIGART_TABLE_ORDER 0
-# define ATI_PCIGART_TABLE_PAGES (1 << 0)
-#elif PAGE_SIZE == 16384
-# define ATI_PCIGART_TABLE_ORDER 1
-# define ATI_PCIGART_TABLE_PAGES (1 << 1)
-#elif PAGE_SIZE == 8192
-# define ATI_PCIGART_TABLE_ORDER 2
-# define ATI_PCIGART_TABLE_PAGES (1 << 2)
-#elif PAGE_SIZE == 4096
-# define ATI_PCIGART_TABLE_ORDER 3
-# define ATI_PCIGART_TABLE_PAGES (1 << 3)
-#else
-# error - PAGE_SIZE not 64K, 16K, 8K or 4K
-#endif
-
-# define ATI_MAX_PCIGART_PAGES 8192 /**< 32 MB aperture, 4K pages */
# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
-static void *drm_ati_alloc_pcigart_table(void)
+static void *drm_ati_alloc_pcigart_table(int order)
{
unsigned long address;
struct page *page;
int i;
- DRM_DEBUG("%s\n", __FUNCTION__);
+
+ DRM_DEBUG("%s: alloc %d order\n", __FUNCTION__, order);
address = __get_free_pages(GFP_KERNEL | __GFP_COMP,
- ATI_PCIGART_TABLE_ORDER);
+ order);
if (address == 0UL) {
return NULL;
}
page = virt_to_page(address);
- for (i = 0; i < ATI_PCIGART_TABLE_PAGES; i++, page++) {
+ for (i = 0; i < order; i++, page++) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
get_page(page);
#endif
@@ -78,22 +62,23 @@ static void *drm_ati_alloc_pcigart_table(void)
return (void *)address;
}
-static void drm_ati_free_pcigart_table(void *address)
+static void drm_ati_free_pcigart_table(void *address, int order)
{
struct page *page;
int i;
+ int num_pages = 1 << order;
DRM_DEBUG("%s\n", __FUNCTION__);
page = virt_to_page((unsigned long)address);
- for (i = 0; i < ATI_PCIGART_TABLE_PAGES; i++, page++) {
+ for (i = 0; i < num_pages; i++, page++) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
__put_page(page);
#endif
ClearPageReserved(page);
}
- free_pages((unsigned long)address, ATI_PCIGART_TABLE_ORDER);
+ free_pages((unsigned long)address, order);
}
int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
@@ -101,6 +86,8 @@ int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
drm_sg_mem_t *entry = dev->sg;
unsigned long pages;
int i;
+ int order;
+ int num_pages, max_pages;
/* we need to support large memory configurations */
if (!entry) {
@@ -108,15 +95,19 @@ int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
return 0;
}
+ order = drm_order((gart_info->table_size + (PAGE_SIZE-1)) / PAGE_SIZE);
+ num_pages = 1 << order;
+
if (gart_info->bus_addr) {
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
pci_unmap_single(dev->pdev, gart_info->bus_addr,
- ATI_PCIGART_TABLE_PAGES * PAGE_SIZE,
+ num_pages * PAGE_SIZE,
PCI_DMA_TODEVICE);
}
- pages = (entry->pages <= ATI_MAX_PCIGART_PAGES)
- ? entry->pages : ATI_MAX_PCIGART_PAGES;
+ max_pages = (gart_info->table_size / sizeof(u32));
+ pages = (entry->pages <= max_pages)
+ ? entry->pages : max_pages;
for (i = 0; i < pages; i++) {
if (!entry->busaddr[i])
@@ -132,7 +123,8 @@ int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN
&& gart_info->addr) {
- drm_ati_free_pcigart_table(gart_info->addr);
+
+ drm_ati_free_pcigart_table(gart_info->addr, order);
gart_info->addr = NULL;
}
@@ -147,6 +139,9 @@ int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
unsigned long pages;
u32 *pci_gart, page_base, bus_address = 0;
int i, j, ret = 0;
+ int order;
+ int max_pages;
+ int num_pages;
if (!entry) {
DRM_ERROR("no scatter/gather memory!\n");
@@ -156,7 +151,9 @@ int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
- address = drm_ati_alloc_pcigart_table();
+ order = drm_order((gart_info->table_size + (PAGE_SIZE-1)) / PAGE_SIZE);
+ num_pages = 1 << order;
+ address = drm_ati_alloc_pcigart_table(order);
if (!address) {
DRM_ERROR("cannot allocate PCI GART page!\n");
goto done;
@@ -168,11 +165,12 @@ int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
}
bus_address = pci_map_single(dev->pdev, address,
- ATI_PCIGART_TABLE_PAGES *
- PAGE_SIZE, PCI_DMA_TODEVICE);
+ num_pages * PAGE_SIZE,
+ PCI_DMA_TODEVICE);
if (bus_address == 0) {
DRM_ERROR("unable to map PCIGART pages!\n");
- drm_ati_free_pcigart_table(address);
+ order = drm_order((gart_info->table_size + (PAGE_SIZE-1)) / PAGE_SIZE);
+ drm_ati_free_pcigart_table(address, order);
address = NULL;
goto done;
}
@@ -185,10 +183,11 @@ int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
pci_gart = (u32 *) address;
- pages = (entry->pages <= ATI_MAX_PCIGART_PAGES)
- ? entry->pages : ATI_MAX_PCIGART_PAGES;
+ max_pages = (gart_info->table_size / sizeof(u32));
+ pages = (entry->pages <= max_pages)
+ ? entry->pages : max_pages;
- memset(pci_gart, 0, ATI_MAX_PCIGART_PAGES * sizeof(u32));
+ memset(pci_gart, 0, max_pages * sizeof(u32));
for (i = 0; i < pages; i++) {
/* we need to support large memory configurations */
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index 9b5f5bdd..d1dbdc2d 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -593,6 +593,7 @@ typedef struct ati_pcigart_info {
void *addr;
dma_addr_t bus_addr;
drm_local_map_t mapping;
+ int table_size;
} drm_ati_pcigart_info;
@@ -847,7 +848,7 @@ static __inline__ int drm_core_check_feature(struct drm_device *dev,
}
#ifdef __alpha__
-#define drm_get_pci_domain(dev) dev->hose->bus->number
+#define drm_get_pci_domain(dev) dev->hose->index
#else
#define drm_get_pci_domain(dev) 0
#endif
diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c
index 4825f0c0..23441811 100644
--- a/linux-core/drm_compat.c
+++ b/linux-core/drm_compat.c
@@ -94,6 +94,11 @@ static struct {
} drm_np_retry =
{SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
+
+static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
+ struct fault_data *data);
+
+
struct page * get_nopage_retry(void)
{
if (atomic_read(&drm_np_retry.present) == 0) {
@@ -180,7 +185,7 @@ static int drm_pte_is_clear(struct vm_area_struct *vma,
return ret;
}
-int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
+static int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
{
int ret;
@@ -190,14 +195,106 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, vma->vm_page_prot);
return ret;
}
+
+static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
+ struct fault_data *data)
+{
+ unsigned long address = data->address;
+ drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
+ unsigned long page_offset;
+ struct page *page = NULL;
+ drm_ttm_t *ttm;
+ drm_device_t *dev;
+ unsigned long pfn;
+ int err;
+ unsigned long bus_base;
+ unsigned long bus_offset;
+ unsigned long bus_size;
+
+
+ mutex_lock(&bo->mutex);
+
+ err = drm_bo_wait(bo, 0, 1, 0);
+ if (err) {
+ data->type = (err == -EAGAIN) ?
+ VM_FAULT_MINOR : VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+
+
+ /*
+ * If buffer happens to be in a non-mappable location,
+ * move it to a mappable.
+ */
+
+ if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
+ unsigned long _end = jiffies + 3*DRM_HZ;
+ uint32_t new_mask = bo->mem.mask |
+ DRM_BO_FLAG_MAPPABLE |
+ DRM_BO_FLAG_FORCE_MAPPABLE;
+
+ do {
+ err = drm_bo_move_buffer(bo, new_mask, 0, 0);
+ } while((err == -EAGAIN) && !time_after_eq(jiffies, _end));
+
+ if (err) {
+ DRM_ERROR("Timeout moving buffer to mappable location.\n");
+ data->type = VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+ }
+
+ if (address > vma->vm_end) {
+ data->type = VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+
+ dev = bo->dev;
+ err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
+ &bus_size);
+
+ if (err) {
+ data->type = VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+
+ page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
+
+ if (bus_size) {
+ drm_mem_type_manager_t *man = &dev->bm.man[bo->mem.mem_type];
+
+ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
+ vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
+ } else {
+ ttm = bo->ttm;
+
+ drm_ttm_fixup_caching(ttm);
+ page = drm_ttm_get_page(ttm, page_offset);
+ if (!page) {
+ data->type = VM_FAULT_OOM;
+ goto out_unlock;
+ }
+ pfn = page_to_pfn(page);
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ }
+
+ err = vm_insert_pfn(vma, address, pfn);
+
+ if (!err || err == -EBUSY)
+ data->type = VM_FAULT_MINOR;
+ else
+ data->type = VM_FAULT_OOM;
+out_unlock:
+ mutex_unlock(&bo->mutex);
+ return NULL;
+}
+
#endif
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19) && !defined(DRM_FULL_MM_COMPAT))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
+ !defined(DRM_FULL_MM_COMPAT)
/**
- * While waiting for the fault() handler to appear in
- * we accomplish approximately
- * the same wrapping it with nopfn.
*/
unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma,
diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h
index 9692492d..bf5899fb 100644
--- a/linux-core/drm_compat.h
+++ b/linux-core/drm_compat.h
@@ -152,6 +152,13 @@ static __inline__ void *kcalloc(size_t nmemb, size_t size, int flags)
(tmp);})
#endif
+#ifndef list_for_each_entry_safe_reverse
+#define list_for_each_entry_safe_reverse(pos, n, head, member) \
+ for (pos = list_entry((head)->prev, typeof(*pos), member), \
+ n = list_entry(pos->member.prev, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = n, n = list_entry(n->member.prev, typeof(*n), member))
+#endif
#include <linux/mm.h>
#include <asm/page.h>
@@ -205,19 +212,10 @@ extern void free_nopage_retry(void);
#define NOPAGE_REFAULT get_nopage_retry()
#endif
-#if !defined(DRM_FULL_MM_COMPAT) && \
- ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) || \
- (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)))
-
-struct fault_data;
-extern struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
- struct fault_data *data);
-#endif
#ifndef DRM_FULL_MM_COMPAT
/*
- * Hopefully, real NOPAGE_RETRY functionality will be in 2.6.19.
* For now, just return a dummy page that we've allocated out of
* static space. The page will be put by do_nopage() since we've already
* filled out the pte.
@@ -232,15 +230,12 @@ struct fault_data {
int type;
};
-
-extern int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
- unsigned long pfn);
-
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
extern struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
unsigned long address,
int *type);
-#else
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
+ !defined(DRM_FULL_MM_COMPAT)
extern unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
unsigned long address);
#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) */
diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c
index 3e17a16d..6dd04a35 100644
--- a/linux-core/drm_fence.c
+++ b/linux-core/drm_fence.c
@@ -43,10 +43,29 @@ void drm_fence_handler(drm_device_t * dev, uint32_t class,
drm_fence_manager_t *fm = &dev->fm;
drm_fence_class_manager_t *fc = &fm->class[class];
drm_fence_driver_t *driver = dev->driver->fence_driver;
- struct list_head *list, *prev;
- drm_fence_object_t *fence;
+ struct list_head *head;
+ drm_fence_object_t *fence, *next;
int found = 0;
+ int is_exe = (type & DRM_FENCE_TYPE_EXE);
+ int ge_last_exe;
+
+
+
+ diff = (sequence - fc->exe_flush_sequence) & driver->sequence_mask;
+
+ if (fc->pending_exe_flush && is_exe && diff < driver->wrap_diff)
+ fc->pending_exe_flush = 0;
+
+ diff = (sequence - fc->last_exe_flush) & driver->sequence_mask;
+ ge_last_exe = diff < driver->wrap_diff;
+
+ if (ge_last_exe)
+ fc->pending_flush &= ~type;
+ if (is_exe && ge_last_exe) {
+ fc->last_exe_flush = sequence;
+ }
+
if (list_empty(&fc->ring))
return;
@@ -58,11 +77,11 @@ void drm_fence_handler(drm_device_t * dev, uint32_t class,
}
}
- list = (found) ? fence->ring.prev : fc->ring.prev;
- prev = list->prev;
+ head = (found) ? &fence->ring : &fc->ring;
- for (; list != &fc->ring; list = prev, prev = list->prev) {
- fence = list_entry(list, drm_fence_object_t, ring);
+ list_for_each_entry_safe_reverse(fence, next, head, ring) {
+ if (&fence->ring == &fc->ring)
+ break;
type |= fence->native_type;
relevant = type & fence->type;
@@ -90,12 +109,7 @@ void drm_fence_handler(drm_device_t * dev, uint32_t class,
}
}
-
- fc->pending_flush &= ~type;
- if (fc->pending_exe_flush && (type & DRM_FENCE_TYPE_EXE) &&
- ((sequence - fc->exe_flush_sequence) < driver->wrap_diff))
- fc->pending_exe_flush = 0;
-
+
if (wake) {
DRM_WAKEUP(&fc->fence_queue);
}
@@ -178,24 +192,6 @@ static void drm_fence_flush_exe(drm_fence_class_manager_t * fc,
uint32_t diff;
if (!fc->pending_exe_flush) {
- struct list_head *list;
-
- /*
- * Last_exe_flush is invalid. Find oldest sequence.
- */
-
- list = &fc->ring;
- if (list_empty(list)) {
- return;
- } else {
- drm_fence_object_t *fence =
- list_entry(list->next, drm_fence_object_t, ring);
- fc->last_exe_flush = (fence->sequence - 1) &
- driver->sequence_mask;
- }
- diff = (sequence - fc->last_exe_flush) & driver->sequence_mask;
- if (diff >= driver->wrap_diff)
- return;
fc->exe_flush_sequence = sequence;
fc->pending_exe_flush = 1;
} else {
@@ -261,14 +257,24 @@ void drm_fence_flush_old(drm_device_t * dev, uint32_t class, uint32_t sequence)
drm_fence_object_t *fence;
uint32_t diff;
+ write_lock_irqsave(&fm->lock, flags);
+ old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask;
+ diff = (old_sequence - fc->last_exe_flush) & driver->sequence_mask;
+
+ if ((diff < driver->wrap_diff) && !fc->pending_exe_flush) {
+ fc->pending_exe_flush = 1;
+ fc->exe_flush_sequence = sequence - (driver->flush_diff / 2);
+ }
+ write_unlock_irqrestore(&fm->lock, flags);
+
mutex_lock(&dev->struct_mutex);
read_lock_irqsave(&fm->lock, flags);
- if (fc->ring.next == &fc->ring) {
+
+ if (list_empty(&fc->ring)) {
read_unlock_irqrestore(&fm->lock, flags);
mutex_unlock(&dev->struct_mutex);
return;
}
- old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask;
fence = list_entry(fc->ring.next, drm_fence_object_t, ring);
atomic_inc(&fence->usage);
mutex_unlock(&dev->struct_mutex);
@@ -384,6 +390,7 @@ int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence,
{
drm_fence_manager_t *fm = &dev->fm;
drm_fence_driver_t *driver = dev->driver->fence_driver;
+ drm_fence_class_manager_t *fc = &fm->class[fence->class];
unsigned long flags;
uint32_t sequence;
uint32_t native_type;
@@ -402,7 +409,9 @@ int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence,
fence->signaled = 0x00;
fence->sequence = sequence;
fence->native_type = native_type;
- list_add_tail(&fence->ring, &fm->class[class].ring);
+ if (list_empty(&fc->ring))
+ fc->last_exe_flush = sequence - 1;
+ list_add_tail(&fence->ring, &fc->ring);
write_unlock_irqrestore(&fm->lock, flags);
return 0;
}
diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c
index f3b1088f..a4a55e37 100644
--- a/linux-core/drm_vm.c
+++ b/linux-core/drm_vm.c
@@ -718,28 +718,23 @@ EXPORT_SYMBOL(drm_mmap);
* \c Pagefault method for buffer objects.
*
* \param vma Virtual memory area.
- * \param data Fault data on failure or refault.
- * \return Always NULL as we insert pfns directly.
+ * \param address File offset.
+ * \return Error or refault. The pfn is manually inserted.
*
* It's important that pfns are inserted while holding the bo->mutex lock.
* otherwise we might race with unmap_mapping_range() which is always
* called with the bo->mutex lock held.
*
- * It's not pretty to modify the vma->vm_page_prot variable while not
- * holding the mm semaphore in write mode. However, we have it i read mode,
- * so we won't be racing with any other writers, and we only actually modify
- * it when no ptes are present so it shouldn't be a big deal.
+ * We're modifying the page attribute bits of the vma->vm_page_prot field,
+ * without holding the mmap_sem in write mode. Only in read mode.
+ * These bits are not used by the mm subsystem code, and we consider them
+ * protected by the bo->mutex lock.
*/
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19) || \
- LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
#ifdef DRM_FULL_MM_COMPAT
-static
-#endif
-struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
- struct fault_data *data)
+static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
+ unsigned long address)
{
- unsigned long address = data->address;
drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
unsigned long page_offset;
struct page *page = NULL;
@@ -750,66 +745,43 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
unsigned long bus_base;
unsigned long bus_offset;
unsigned long bus_size;
+ int ret = NOPFN_REFAULT;
-
- mutex_lock(&bo->mutex);
+ if (address > vma->vm_end)
+ return NOPFN_SIGBUS;
+
+ err = mutex_lock_interruptible(&bo->mutex);
+ if (err)
+ return NOPFN_REFAULT;
err = drm_bo_wait(bo, 0, 0, 0);
if (err) {
- data->type = (err == -EAGAIN) ?
- VM_FAULT_MINOR : VM_FAULT_SIGBUS;
+ ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
goto out_unlock;
}
-
-
+
/*
* If buffer happens to be in a non-mappable location,
* move it to a mappable.
*/
-#ifdef DRM_BO_FULL_COMPAT
if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
uint32_t new_mask = bo->mem.mask |
DRM_BO_FLAG_MAPPABLE |
DRM_BO_FLAG_FORCE_MAPPABLE;
err = drm_bo_move_buffer(bo, new_mask, 0, 0);
-
if (err) {
- data->type = (err == -EAGAIN) ?
- VM_FAULT_MINOR : VM_FAULT_SIGBUS;
+ ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
goto out_unlock;
}
}
-#else
- if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
- unsigned long _end = jiffies + 3*DRM_HZ;
- uint32_t new_mask = bo->mem.mask |
- DRM_BO_FLAG_MAPPABLE |
- DRM_BO_FLAG_FORCE_MAPPABLE;
-
- do {
- err = drm_bo_move_buffer(bo, new_mask, 0, 0);
- } while((err == -EAGAIN) && !time_after_eq(jiffies, _end));
-
- if (err) {
- DRM_ERROR("Timeout moving buffer to mappable location.\n");
- data->type = VM_FAULT_SIGBUS;
- goto out_unlock;
- }
- }
-#endif
-
- if (address > vma->vm_end) {
- data->type = VM_FAULT_SIGBUS;
- goto out_unlock;
- }
dev = bo->dev;
err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
&bus_size);
if (err) {
- data->type = VM_FAULT_SIGBUS;
+ ret = NOPFN_SIGBUS;
goto out_unlock;
}
@@ -826,7 +798,7 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
drm_ttm_fixup_caching(ttm);
page = drm_ttm_get_page(ttm, page_offset);
if (!page) {
- data->type = VM_FAULT_OOM;
+ ret = NOPFN_OOM;
goto out_unlock;
}
pfn = page_to_pfn(page);
@@ -834,14 +806,13 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
}
err = vm_insert_pfn(vma, address, pfn);
-
- if (!err || err == -EBUSY)
- data->type = VM_FAULT_MINOR;
- else
- data->type = VM_FAULT_OOM;
+ if (err) {
+ ret = (err != -EAGAIN) ? NOPFN_OOM : NOPFN_REFAULT;
+ goto out_unlock;
+ }
out_unlock:
mutex_unlock(&bo->mutex);
- return NULL;
+ return ret;
}
#endif
@@ -897,7 +868,7 @@ static void drm_bo_vm_close(struct vm_area_struct *vma)
static struct vm_operations_struct drm_bo_vm_ops = {
#ifdef DRM_FULL_MM_COMPAT
- .fault = drm_bo_vm_fault,
+ .nopfn = drm_bo_vm_nopfn,
#else
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
.nopfn = drm_bo_vm_nopfn,
diff --git a/linux-core/i915_fence.c b/linux-core/i915_fence.c
index 81d9b176..88daa57c 100644
--- a/linux-core/i915_fence.c
+++ b/linux-core/i915_fence.c
@@ -49,6 +49,7 @@ static void i915_perform_flush(drm_device_t * dev)
uint32_t i_status;
uint32_t diff;
uint32_t sequence;
+ int rwflush;
if (!dev_priv)
return;
@@ -65,14 +66,10 @@ static void i915_perform_flush(drm_device_t * dev)
drm_fence_handler(dev, 0, sequence, DRM_FENCE_TYPE_EXE);
}
- diff = sequence - fc->exe_flush_sequence;
- if (diff < driver->wrap_diff) {
- fc->pending_exe_flush = 0;
- if (dev_priv->fence_irq_on) {
- i915_user_irq_off(dev_priv);
- dev_priv->fence_irq_on = 0;
- }
- } else if (!dev_priv->fence_irq_on) {
+ if (dev_priv->fence_irq_on && !fc->pending_exe_flush) {
+ i915_user_irq_off(dev_priv);
+ dev_priv->fence_irq_on = 0;
+ } else if (!dev_priv->fence_irq_on && fc->pending_exe_flush) {
i915_user_irq_on(dev_priv);
dev_priv->fence_irq_on = 1;
}
@@ -89,13 +86,14 @@ static void i915_perform_flush(drm_device_t * dev)
}
}
- if (fc->pending_flush && !dev_priv->flush_pending) {
+ rwflush = fc->pending_flush & DRM_I915_FENCE_TYPE_RW;
+ if (rwflush && !dev_priv->flush_pending) {
dev_priv->flush_sequence = (uint32_t) READ_BREADCRUMB(dev_priv);
dev_priv->flush_flags = fc->pending_flush;
dev_priv->saved_flush_status = READ_HWSP(dev_priv, 0);
I915_WRITE(I915REG_INSTPM, (1 << 5) | (1 << 21));
dev_priv->flush_pending = 1;
- fc->pending_flush = 0;
+ fc->pending_flush &= ~DRM_I915_FENCE_TYPE_RW;
}
if (dev_priv->flush_pending) {