diff options
Diffstat (limited to 'linux-core')
-rw-r--r-- | linux-core/drmP.h | 3 | ||||
-rw-r--r-- | linux-core/drm_bo.c | 26 | ||||
-rw-r--r-- | linux-core/drm_fence.c | 7 | ||||
-rw-r--r-- | linux-core/drm_lock.c | 2 | ||||
-rw-r--r-- | linux-core/drm_ttm.c | 16 | ||||
-rw-r--r-- | linux-core/drm_vm.c | 9 |
6 files changed, 45 insertions, 18 deletions
diff --git a/linux-core/drmP.h b/linux-core/drmP.h index da14bdfd..835b295a 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -790,6 +790,7 @@ typedef struct drm_fence_manager{ typedef struct drm_buffer_manager{ int initialized; + drm_file_t *last_to_validate; int has_vram; int has_tt; int use_vram; @@ -803,6 +804,8 @@ typedef struct drm_buffer_manager{ struct list_head other; struct work_struct wq; uint32_t fence_flags; + unsigned long max_pages; + unsigned long cur_pages; } drm_buffer_manager_t; diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 74722b1b..3a9c2313 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -95,11 +95,11 @@ static void drm_bo_destroy_locked(drm_device_t * dev, drm_buffer_object_t * bo) if (bo->fence) { if (!drm_fence_object_signaled(bo->fence, bo->fence_flags)) { + drm_fence_object_flush(dev, bo->fence, bo->fence_flags); list_add_tail(&bo->ddestroy, &bm->ddestroy); - - schedule_delayed_work(&bm->wq, 2); - + schedule_delayed_work(&bm->wq, + ((DRM_HZ/100) < 1) ? 1 : DRM_HZ/100); return; } else { drm_fence_usage_deref_locked(dev, bo->fence); @@ -113,7 +113,7 @@ static void drm_bo_destroy_locked(drm_device_t * dev, drm_buffer_object_t * bo) list_del_init(&bo->head); if (bo->tt) { - drm_unbind_ttm_region(bo->ttm_region); + drm_unbind_ttm_region(bo->ttm_region); drm_mm_put_block(&bm->tt_manager, bo->tt); bo->tt = NULL; } @@ -170,7 +170,7 @@ static void drm_bo_delayed_workqueue(void *data) drm_bo_delayed_delete(dev); mutex_lock(&dev->struct_mutex); if (!list_empty(&bm->ddestroy)) { - schedule_delayed_work(&bm->wq, 2); + schedule_delayed_work(&bm->wq, ((DRM_HZ/100) < 1) ? 1 : DRM_HZ/100); } mutex_unlock(&dev->struct_mutex); } @@ -822,6 +822,11 @@ static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle, while (1) { if (atomic_inc_and_test(&bo->mapped)) { + if (no_wait && drm_bo_busy(bo)) { + atomic_dec(&bo->mapped); + ret = -EBUSY; + goto out; + } ret = drm_bo_wait(bo, 0, 0, no_wait); if (ret) { atomic_dec(&bo->mapped); @@ -1174,9 +1179,8 @@ static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo, bo->ttm_object = to; ttm = drm_ttm_from_object(to); ret = drm_create_ttm_region(ttm, bo->buffer_start >> PAGE_SHIFT, - bo->num_pages, 0, - - /* bo->mask & DRM_BO_FLAG_BIND_CACHED,*/ + bo->num_pages, + bo->mask & DRM_BO_FLAG_BIND_CACHED, &bo->ttm_region); if (ret) { drm_ttm_object_deref_unlocked(dev, to); @@ -1383,6 +1387,7 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS) break; case drm_bo_validate: rep.ret = drm_bo_lock_test(dev, filp); + if (rep.ret) break; rep.ret = @@ -1571,13 +1576,16 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS) INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev); bm->initialized = 1; - + bm->cur_pages = 0; + bm->max_pages = arg.req.max_locked_pages; break; case mm_takedown: if (drm_bo_clean_mm(dev)) { DRM_ERROR("Memory manager not clean. " "Delaying takedown\n"); } + DRM_DEBUG("We have %ld still locked pages\n", + bm->cur_pages); break; default: DRM_ERROR("Function not implemented yet\n"); diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index eaaf7f40..df5db702 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -359,12 +359,11 @@ int drm_fence_object_wait(drm_device_t * dev, drm_fence_object_t * fence, fence_signaled(dev, fence, mask, 1)); if (time_after_eq(jiffies, _end)) break; - } while (ret == -EINTR && ignore_signals); - + } while (ret == -EINTR && ignore_signals); if (time_after_eq(jiffies, _end) && (ret != 0)) ret = -EBUSY; - return ret; - + if (ret) + return ((ret == -EINTR) ? -EAGAIN : ret); } else { int signaled; do { diff --git a/linux-core/drm_lock.c b/linux-core/drm_lock.c index 91fad8bf..69ce2291 100644 --- a/linux-core/drm_lock.c +++ b/linux-core/drm_lock.c @@ -270,7 +270,7 @@ int drm_lock_free(drm_device_t * dev, prev = cmpxchg(lock, old, new); } while (prev != old); if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) { - DRM_ERROR("%d freed heavyweight lock held by %d\n", + DRM_DEBUG("%d freed heavyweight lock held by %d\n", context, _DRM_LOCKING_CONTEXT(old)); return 1; } diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c index a83d6401..8aba36ca 100644 --- a/linux-core/drm_ttm.c +++ b/linux-core/drm_ttm.c @@ -251,19 +251,24 @@ int drm_destroy_ttm(drm_ttm_t * ttm) } if (ttm->pages) { + drm_buffer_manager_t *bm = &ttm->dev->bm; + int do_tlbflush = 0; for (i = 0; i < ttm->num_pages; ++i) { cur_page = ttm->pages + i; if (ttm->page_flags && (ttm->page_flags[i] & DRM_TTM_PAGE_UNCACHED) && *cur_page && !PageHighMem(*cur_page)) { change_page_attr(*cur_page, 1, PAGE_KERNEL); + do_tlbflush = 1; } if (*cur_page) { ClearPageReserved(*cur_page); __free_page(*cur_page); + --bm->cur_pages; } } - global_flush_tlb(); + if (do_tlbflush) + global_flush_tlb(); ttm_free(ttm->pages, ttm->num_pages*sizeof(*ttm->pages), DRM_MEM_TTM, ttm->pages_vmalloc); ttm->pages = NULL; @@ -308,6 +313,7 @@ static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size) if (!ttm) return NULL; + ttm->dev = dev; ttm->lhandle = 0; atomic_set(&ttm->vma_count, 0); @@ -354,7 +360,6 @@ static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size) INIT_LIST_HEAD(&ttm->vma_list->head); ttm->lhandle = (unsigned long)ttm; - ttm->dev = dev; return ttm; } @@ -562,6 +567,7 @@ int drm_create_ttm_region(drm_ttm_t * ttm, unsigned long page_offset, drm_ttm_backend_list_t *entry; drm_ttm_backend_t *be; int ret, i; + drm_buffer_manager_t *bm = &ttm->dev->bm; if ((page_offset + n_pages) > ttm->num_pages || n_pages == 0) { DRM_ERROR("Region Doesn't fit ttm\n"); @@ -602,6 +608,11 @@ int drm_create_ttm_region(drm_ttm_t * ttm, unsigned long page_offset, for (i = 0; i < entry->num_pages; ++i) { cur_page = ttm->pages + (page_offset + i); if (!*cur_page) { + if (bm->cur_pages >= bm->max_pages) { + DRM_ERROR("Maximum locked page count exceeded\n"); + drm_destroy_ttm_region(entry); + return -ENOMEM; + } *cur_page = alloc_page(GFP_KERNEL); if (!*cur_page) { DRM_ERROR("Page allocation failed\n"); @@ -609,6 +620,7 @@ int drm_create_ttm_region(drm_ttm_t * ttm, unsigned long page_offset, return -ENOMEM; } SetPageReserved(*cur_page); + ++bm->cur_pages; } } diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 69391058..e7b808cc 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -253,6 +253,7 @@ static __inline__ struct page *drm_do_vm_ttm_nopage(struct vm_area_struct *vma, drm_ttm_t *ttm; pgprot_t default_prot; uint32_t page_flags; + drm_buffer_manager_t *bm; if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */ @@ -261,12 +262,18 @@ static __inline__ struct page *drm_do_vm_ttm_nopage(struct vm_area_struct *vma, map = (drm_map_t *) entry->map; ttm = (drm_ttm_t *) map->offset; + bm = &ttm->dev->bm; page_offset = (address - vma->vm_start) >> PAGE_SHIFT; page = ttm->pages[page_offset]; page_flags = ttm->page_flags[page_offset]; if (!page) { + if (bm->cur_pages >= bm->max_pages) { + DRM_ERROR("Maximum locked page count exceeded\n"); + return NOPAGE_OOM; + } + ++bm->cur_pages; page = ttm->pages[page_offset] = alloc_page(GFP_KERNEL); SetPageReserved(page); @@ -274,8 +281,6 @@ static __inline__ struct page *drm_do_vm_ttm_nopage(struct vm_area_struct *vma, if (!page) return NOPAGE_OOM; - get_page(page); - default_prot = vm_get_page_prot(vma->vm_flags); BUG_ON(page_flags & DRM_TTM_PAGE_UNCACHED); |