From e201511a0fbeb177a9ecd7f77d177fc88c1616fb Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 22 Aug 2006 11:57:08 +0200 Subject: More ttm cleanups. --- linux-core/drm_ttm.c | 25 ++++++++++++++----------- linux-core/drm_ttm.h | 9 ++------- linux-core/drm_vm.c | 24 ++++++++---------------- 3 files changed, 24 insertions(+), 34 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c index 493f1465..df4c312c 100644 --- a/linux-core/drm_ttm.c +++ b/linux-core/drm_ttm.c @@ -90,7 +90,7 @@ void drm_ttm_delete_mm(drm_ttm_t * ttm, struct mm_struct *mm) return; } } - BUG_ON(TRUE); + BUG_ON(1); } static void drm_ttm_lock_mm(drm_ttm_t * ttm, int mm_sem, int page_table) @@ -200,6 +200,7 @@ int drm_destroy_ttm(drm_ttm_t * ttm) return 0; if (atomic_read(&ttm->vma_count) > 0) { + ttm->destroy = 1; DRM_DEBUG("VMAs are still alive. Skipping destruction.\n"); return -EBUSY; } @@ -260,7 +261,7 @@ int drm_destroy_ttm(drm_ttm_t * ttm) * FIXME: Avoid using vmalloc for the page- and page_flags tables? */ -drm_ttm_t *drm_init_ttm(struct drm_device * dev, unsigned long size) +static drm_ttm_t *drm_init_ttm(struct drm_device * dev, unsigned long size) { drm_ttm_t *ttm; @@ -274,6 +275,7 @@ drm_ttm_t *drm_init_ttm(struct drm_device * dev, unsigned long size) ttm->lhandle = 0; atomic_set(&ttm->vma_count, 0); + ttm->destroy = 0; ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; ttm->page_flags = vmalloc(ttm->num_pages * sizeof(*ttm->page_flags)); @@ -315,6 +317,7 @@ drm_ttm_t *drm_init_ttm(struct drm_device * dev, unsigned long size) ttm->lhandle = (unsigned long)ttm; ttm->dev = dev; + return ttm; } @@ -395,9 +398,9 @@ static int drm_set_caching(drm_ttm_t * ttm, unsigned long page_offset, struct page **cur_page; pgprot_t attr = (noncached) ? PAGE_KERNEL_NOCACHE : PAGE_KERNEL; - drm_ttm_lock_mm(ttm, FALSE, TRUE); + drm_ttm_lock_mm(ttm, 0, 1); unmap_vma_pages(ttm, page_offset, num_pages); - drm_ttm_unlock_mm(ttm, FALSE, TRUE); + drm_ttm_unlock_mm(ttm, 0, 1); for (i = 0; i < num_pages; ++i) { cur = page_offset + i; @@ -440,17 +443,17 @@ int drm_evict_ttm_region(drm_ttm_backend_list_t * entry) ret = drm_ttm_lock_mmap_sem(ttm); if (ret) return ret; - drm_ttm_lock_mm(ttm, FALSE, TRUE); + drm_ttm_lock_mm(ttm, 0, 1); unmap_vma_pages(ttm, entry->page_offset, entry->num_pages); global_flush_tlb(); - drm_ttm_unlock_mm(ttm, FALSE, TRUE); + drm_ttm_unlock_mm(ttm, 0, 1); } be->unbind(entry->be); if (ttm && be->needs_cache_adjust(be)) { drm_set_caching(ttm, entry->page_offset, entry->num_pages, 0, 1); - drm_ttm_unlock_mm(ttm, TRUE, FALSE); + drm_ttm_unlock_mm(ttm, 1, 0); } break; default: @@ -489,7 +492,7 @@ void drm_destroy_ttm_region(drm_ttm_backend_list_t * entry) drm_set_caching(ttm, entry->page_offset, entry->num_pages, 0, 1); if (!ret) - drm_ttm_unlock_mm(ttm, TRUE, FALSE); + drm_ttm_unlock_mm(ttm, 1, 0); } be->destroy(be); } @@ -600,14 +603,14 @@ int drm_bind_ttm_region(drm_ttm_backend_list_t * region, if (ret) return ret; drm_set_caching(ttm, region->page_offset, region->num_pages, - DRM_TTM_PAGE_UNCACHED, TRUE); + DRM_TTM_PAGE_UNCACHED, 1); } else { DRM_DEBUG("Binding cached\n"); } if ((ret = be->bind(be, aper_offset))) { if (ttm && be->needs_cache_adjust(be)) - drm_ttm_unlock_mm(ttm, TRUE, FALSE); + drm_ttm_unlock_mm(ttm, 1, 0); drm_unbind_ttm_region(region); DRM_ERROR("Couldn't bind backend.\n"); return ret; @@ -623,7 +626,7 @@ int drm_bind_ttm_region(drm_ttm_backend_list_t * region, if (ttm && be->needs_cache_adjust(be)) { ioremap_vmas(ttm, region->page_offset, region->num_pages, aper_offset); - drm_ttm_unlock_mm(ttm, TRUE, FALSE); + drm_ttm_unlock_mm(ttm, 1, 0); } region->state = ttm_bound; diff --git a/linux-core/drm_ttm.h b/linux-core/drm_ttm.h index f695fcce..ea9a8372 100644 --- a/linux-core/drm_ttm.h +++ b/linux-core/drm_ttm.h @@ -98,15 +98,10 @@ typedef struct drm_ttm { drm_ttm_backend_list_t *be_list; atomic_t vma_count; int mmap_sem_locked; + int destroy; } drm_ttm_t; -/* - * Initialize a ttm. Currently the size is fixed. Currently drmAddMap calls this function - * and creates a DRM map of type _DRM_TTM, and returns a reference to that map to the - * caller. - */ - -drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size); +int drm_add_ttm(struct drm_device * dev, unsigned size, drm_map_list_t ** maplist); /* * Bind a part of the ttm starting at page_offset size n_pages into the GTT, at diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 9f48f297..85b39490 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -640,7 +640,7 @@ static int drm_vm_ttm_open(struct vm_area_struct *vma) { *entry = *tmp_vma; map = (drm_map_t *) entry->map; ttm = (drm_ttm_t *) map->offset; - /* ret = drm_ttm_add_mm_to_list(ttm, vma->vm_mm); */ + ret = drm_ttm_add_mm_to_list(ttm, vma->vm_mm); if (!ret) { atomic_inc(&ttm->vma_count); INIT_LIST_HEAD(&entry->head); @@ -706,6 +706,7 @@ static void drm_vm_ttm_close(struct vm_area_struct *vma) int found_maps; struct list_head *list; drm_device_t *dev; + int ret; drm_vm_close(vma); if (ttm_vma) { @@ -713,24 +714,15 @@ static void drm_vm_ttm_close(struct vm_area_struct *vma) ttm = (drm_ttm_t *) map->offset; dev = ttm->dev; mutex_lock(&dev->struct_mutex); - list_del(&ttm_vma->head); - /* drm_ttm_delete_mm(ttm, vma->vm_mm); */ + drm_ttm_delete_mm(ttm, vma->vm_mm); drm_free(ttm_vma, sizeof(*ttm_vma), DRM_MEM_VMAS); - atomic_dec(&ttm->vma_count); - found_maps = 0; - list = NULL; -#if 0 /* Reimplement with vma_count */ - list_for_each(list, &ttm->owner->ttms) { - r_list = list_entry(list, drm_map_list_t, head); - if (r_list->map == map) - found_maps++; - } - if (!found_maps) { - if (drm_destroy_ttm(ttm) != -EBUSY) { - drm_free(map, sizeof(*map), DRM_MEM_MAPS); + if (atomic_dec_and_test(&ttm->vma_count)) { + if (ttm->destroy) { + ret = drm_destroy_ttm(ttm); + BUG_ON(ret); + drm_free(map, sizeof(*map), DRM_MEM_TTM); } } -#endif mutex_unlock(&dev->struct_mutex); } return; -- cgit v1.2.3