diff options
author | Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> | 2006-10-11 13:40:35 +0200 |
---|---|---|
committer | Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> | 2006-10-11 13:40:35 +0200 |
commit | f2db76e2f206d2017f710eaddc4b33add4498898 (patch) | |
tree | a245512bc51f70c4458af047507605a27fae5d02 /linux-core | |
parent | c58574c60505a699e19e1ed59e1b441be2594e53 (diff) |
Big update:
Adapt for new functions in the 2.6.19 kernel.
Remove the ability to have multiple regions in one TTM.
This simplifies a lot of code.
Remove the ability to access TTMs from user space.
We don't need it anymore without ttm regions.
Don't change caching policy for evicted buffers. Instead change it only
when the buffer is accessed by the CPU (on the first page fault).
This tremendously speeds up eviction rates.
Current code is safe for kernels <= 2.6.14.
Should also be OK with 2.6.19 and above.
Diffstat (limited to 'linux-core')
-rw-r--r-- | linux-core/drmP.h | 2 | ||||
-rw-r--r-- | linux-core/drm_agpsupport.c | 2 | ||||
-rw-r--r-- | linux-core/drm_bo.c | 51 | ||||
-rw-r--r-- | linux-core/drm_compat.c | 50 | ||||
-rw-r--r-- | linux-core/drm_compat.h | 29 | ||||
-rw-r--r-- | linux-core/drm_drv.c | 1 | ||||
-rw-r--r-- | linux-core/drm_ttm.c | 649 | ||||
-rw-r--r-- | linux-core/drm_ttm.h | 84 | ||||
-rw-r--r-- | linux-core/drm_vm.c | 255 |
9 files changed, 327 insertions, 796 deletions
diff --git a/linux-core/drmP.h b/linux-core/drmP.h index bc57bd5c..1b6d94e4 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -1012,7 +1012,7 @@ typedef struct drm_buffer_object{ atomic_t usage; drm_ttm_object_t *ttm_object; - drm_ttm_backend_list_t *ttm_region; + drm_ttm_t *ttm; unsigned long num_pages; unsigned long buffer_start; drm_bo_type_t type; diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c index 2dd80162..77994d5c 100644 --- a/linux-core/drm_agpsupport.c +++ b/linux-core/drm_agpsupport.c @@ -683,6 +683,7 @@ drm_ttm_backend_t *drm_agp_init_ttm_uncached(struct drm_device *dev, agp_be->unbind = drm_agp_unbind_ttm; agp_be->destroy = drm_agp_destroy_ttm; agp_be->needs_free = (backend == NULL); + agp_be->drm_map_type = _DRM_AGP; return agp_be; } EXPORT_SYMBOL(drm_agp_init_ttm_uncached); @@ -720,6 +721,7 @@ drm_ttm_backend_t *drm_agp_init_ttm_cached(struct drm_device *dev, agp_be->unbind = drm_agp_unbind_ttm; agp_be->destroy = drm_agp_destroy_ttm; agp_be->needs_free = (backend == NULL); + agp_be->drm_map_type = _DRM_AGP; return agp_be; } EXPORT_SYMBOL(drm_agp_init_ttm_cached); diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index d1989e49..d8cab2ad 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -63,7 +63,7 @@ * bo locked. */ -static int drm_move_tt_to_local(drm_buffer_object_t * buf) +static int drm_move_tt_to_local(drm_buffer_object_t * buf, int evict) { drm_device_t *dev = buf->dev; drm_buffer_manager_t *bm = &dev->bm; @@ -71,7 +71,10 @@ static int drm_move_tt_to_local(drm_buffer_object_t * buf) BUG_ON(!buf->tt); mutex_lock(&dev->struct_mutex); - drm_unbind_ttm_region(buf->ttm_region); + if (evict) + drm_evict_ttm(buf->ttm); + else + drm_unbind_ttm(buf->ttm); drm_mm_put_block(&bm->tt_manager, buf->tt); buf->tt = NULL; @@ -129,7 +132,7 @@ static void drm_bo_destroy_locked(drm_device_t * dev, drm_buffer_object_t * bo) * This temporarily unlocks struct_mutex. */ - drm_unbind_ttm_region(bo->ttm_region); + drm_unbind_ttm(bo->ttm); drm_mm_put_block(&bm->tt_manager, bo->tt); bo->tt = NULL; } @@ -137,9 +140,6 @@ static void drm_bo_destroy_locked(drm_device_t * dev, drm_buffer_object_t * bo) drm_mm_put_block(&bm->vram_manager, bo->vram); bo->vram = NULL; } - if (bo->ttm_region) { - drm_destroy_ttm_region(bo->ttm_region); - } if (bo->ttm_object) { drm_ttm_object_deref_locked(dev, bo->ttm_object); } @@ -428,7 +428,7 @@ static int drm_bo_evict(drm_buffer_object_t * bo, int tt, int no_wait) } if (tt) { - ret = drm_move_tt_to_local(bo); + ret = drm_move_tt_to_local(bo, 1); } #if 0 else { @@ -522,7 +522,7 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait) return ret; DRM_DEBUG("Flipping in to AGP 0x%08lx\n", bo->tt->start); mutex_lock(&dev->struct_mutex); - ret = drm_bind_ttm_region(bo->ttm_region, bo->tt->start); + ret = drm_bind_ttm(bo->ttm, bo->tt->start); if (ret) { drm_mm_put_block(&bm->tt_manager, bo->tt); } @@ -530,7 +530,7 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait) if (ret) return ret; - be = bo->ttm_region->be; + be = bo->ttm->be; if (be->needs_cache_adjust(be)) bo->flags &= ~DRM_BO_FLAG_CACHED; bo->flags &= ~DRM_BO_MASK_MEM; @@ -1023,7 +1023,7 @@ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_flags, if (ret) return ret; } else { - drm_move_tt_to_local(bo); + drm_move_tt_to_local(bo, 0); } return 0; @@ -1203,34 +1203,24 @@ static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle, * Call bo->mutex locked. */ -static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo, - uint32_t ttm_handle) +static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo) { drm_device_t *dev = bo->dev; drm_ttm_object_t *to = NULL; - drm_ttm_t *ttm; int ret = 0; uint32_t ttm_flags = 0; bo->ttm_object = NULL; - bo->ttm_region = NULL; + bo->ttm = NULL; switch (bo->type) { case drm_bo_type_dc: mutex_lock(&dev->struct_mutex); ret = drm_ttm_object_create(dev, bo->num_pages * PAGE_SIZE, + bo->mask & DRM_BO_FLAG_BIND_CACHED, ttm_flags, &to); mutex_unlock(&dev->struct_mutex); break; - case drm_bo_type_ttm: - mutex_lock(&dev->struct_mutex); - to = drm_lookup_ttm_object(priv, ttm_handle, 1); - mutex_unlock(&dev->struct_mutex); - if (!to) { - DRM_ERROR("Could not find TTM object\n"); - ret = -EINVAL; - } - break; case drm_bo_type_user: case drm_bo_type_fake: break; @@ -1246,14 +1236,7 @@ static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo, if (to) { bo->ttm_object = to; - ttm = drm_ttm_from_object(to); - ret = drm_create_ttm_region(ttm, bo->buffer_start >> PAGE_SHIFT, - bo->num_pages, - bo->mask & DRM_BO_FLAG_BIND_CACHED, - &bo->ttm_region); - if (ret) { - drm_ttm_object_deref_unlocked(dev, to); - } + bo->ttm = drm_ttm_from_object(to); } return ret; } @@ -1261,7 +1244,6 @@ static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo, int drm_buffer_object_create(drm_file_t * priv, unsigned long size, drm_bo_type_t type, - uint32_t ttm_handle, uint32_t mask, uint32_t hint, unsigned long buffer_start, @@ -1318,7 +1300,7 @@ int drm_buffer_object_create(drm_file_t * priv, 1, &new_flags, &bo->mask); if (ret) goto out_err; - ret = drm_bo_add_ttm(priv, bo, ttm_handle); + ret = drm_bo_add_ttm(priv, bo); if (ret) goto out_err; @@ -1394,7 +1376,6 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS) rep.ret = drm_buffer_object_create(priv, req->size, req->type, - req->arg_handle, req->mask, req->hint, req->buffer_start, &entry); @@ -1659,7 +1640,7 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS) if (arg.req.tt_p_size) { ret = drm_mm_init(&bm->tt_manager, arg.req.tt_p_offset, - arg.req.tt_p_size); + 3000 /* arg.req.tt_p_size */); bm->has_tt = 1; bm->use_tt = 1; diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 2b449e90..1aa835ca 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -63,8 +63,10 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags) #endif }; -int drm_pte_is_clear(struct vm_area_struct *vma, - unsigned long addr) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) + +static int drm_pte_is_clear(struct vm_area_struct *vma, + unsigned long addr) { struct mm_struct *mm = vma->vm_mm; int ret = 1; @@ -77,7 +79,7 @@ int drm_pte_is_clear(struct vm_area_struct *vma, #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) spin_lock(&mm->page_table_lock); #else - spinlock_t ptl; + spinlock_t *ptl; #endif pgd = pgd_offset(mm, addr); @@ -92,7 +94,7 @@ int drm_pte_is_clear(struct vm_area_struct *vma, #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) pte = pte_offset_map(pmd, addr); #else - pte = pte_offset_map_lock(mm, pmd, addr, &ptl); + pte = pte_offset_map_lock(mm, pmd, addr, &ptl); #endif if (!pte) goto unlock; @@ -108,6 +110,17 @@ int drm_pte_is_clear(struct vm_area_struct *vma, return ret; } +int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn, pgprot_t pgprot) +{ + int ret; + if (!drm_pte_is_clear(vma, addr)) + return -EBUSY; + + ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, pgprot); + return ret; +} + static struct { spinlock_t lock; @@ -141,3 +154,32 @@ void free_nopage_retry(void) spin_unlock(&drm_np_retry.lock); } } +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) + +struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma, + unsigned long address, + int *type) +{ + struct fault_data data; + + if (type) + *type = VM_FAULT_MINOR; + + data.address = address; + data.vma = vma; + drm_vm_ttm_fault(vma, &data); + switch (data.type) { + case VM_FAULT_OOM: + return NOPAGE_OOM; + case VM_FAULT_SIGBUS: + return NOPAGE_SIGBUS; + default: + break; + } + + return NOPAGE_REFAULT; +} + +#endif diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h index 784b9a7d..4e95679d 100644 --- a/linux-core/drm_compat.h +++ b/linux-core/drm_compat.h @@ -278,19 +278,30 @@ extern int drm_map_page_into_agp(struct page *page); * static space. The page will be put by do_nopage() since we've already * filled out the pte. */ -extern struct page * get_nopage_retry(void); + +struct fault_data { + struct vm_area_struct *vma; + unsigned long address; + pgoff_t pgoff; + unsigned int flags; + + int type; +}; + +extern struct page *get_nopage_retry(void); extern void free_nopage_retry(void); -#define NOPAGE_RETRY get_nopage_retry() +#define NOPAGE_REFAULT get_nopage_retry() -#endif +extern int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn, pgprot_t pgprot); -/* - * Is the PTE for this address really clear so that we can use - * io_remap_pfn_range? - */ +extern struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma, + unsigned long address, + int *type); -int drm_pte_is_clear(struct vm_area_struct *vma, - unsigned long addr); +extern struct page *drm_vm_ttm_fault(struct vm_area_struct *vma, + struct fault_data *data); #endif +#endif diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 11228363..c7f0f485 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -120,7 +120,6 @@ static drm_ioctl_desc_t drm_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0}, [DRM_IOCTL_NR(DRM_IOCTL_FENCE)] = {drm_fence_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_TTM)] = {drm_ttm_ioctl, DRM_AUTH}, [DRM_IOCTL_NR(DRM_IOCTL_BUFOBJ)] = {drm_bo_ioctl, DRM_AUTH}, [DRM_IOCTL_NR(DRM_IOCTL_MM_INIT)] = {drm_mm_init_ioctl, DRM_AUTH }, diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c index 51e28ac4..297d4f71 100644 --- a/linux-core/drm_ttm.c +++ b/linux-core/drm_ttm.c @@ -27,20 +27,6 @@ **************************************************************************/ #include "drmP.h" -#include <asm/tlbflush.h> - -typedef struct p_mm_entry { - struct list_head head; - struct mm_struct *mm; - atomic_t refcount; -} p_mm_entry_t; - -typedef struct drm_val_action { - int needs_rx_flush; - int evicted_tt; - int evicted_vram; - int validated; -} drm_val_action_t; /* * Use kmalloc if possible. Otherwise fall back to vmalloc. @@ -75,20 +61,52 @@ static void ttm_free(void *pointer, unsigned long size, int type) * Unmap all vma pages from vmas mapping this ttm. */ -static int unmap_vma_pages(drm_ttm_t * ttm, unsigned long page_offset, - unsigned long num_pages) +static int unmap_vma_pages(drm_ttm_t * ttm) { drm_device_t *dev = ttm->dev; - loff_t offset = ((loff_t) ttm->mapping_offset + page_offset) - << PAGE_SHIFT; - loff_t holelen = num_pages << PAGE_SHIFT; + loff_t offset = ((loff_t) ttm->mapping_offset) << PAGE_SHIFT; + loff_t holelen = ((loff_t) ttm->num_pages) << PAGE_SHIFT; - unmap_mapping_range(dev->dev_mapping, offset, holelen, 1); return 0; } /* + * Change caching policy for the linear kernel map + * for range of pages in a ttm. + */ + +static int drm_set_caching(drm_ttm_t * ttm, int noncached) +{ + int i; + struct page **cur_page; + int do_tlbflush = 0; + + if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached) + return 0; + + for (i = 0; i < ttm->num_pages; ++i) { + cur_page = ttm->pages + i; + if (*cur_page) { + if (!PageHighMem(*cur_page)) { + if (noncached) { + map_page_into_agp(*cur_page); + } else { + unmap_page_from_agp(*cur_page); + } + do_tlbflush = 1; + } + } + } + if (do_tlbflush) + flush_agp_mappings(); + + DRM_MASK_VAL(ttm->page_flags, DRM_TTM_PAGE_UNCACHED, noncached); + + return 0; +} + +/* * Free all resources associated with a ttm. */ @@ -96,8 +114,8 @@ int drm_destroy_ttm(drm_ttm_t * ttm) { int i; - struct list_head *list, *next; struct page **cur_page; + drm_ttm_backend_t *be; if (!ttm) return 0; @@ -110,30 +128,26 @@ int drm_destroy_ttm(drm_ttm_t * ttm) DRM_DEBUG("Destroying a ttm\n"); - if (ttm->be_list) { - list_for_each_safe(list, next, &ttm->be_list->head) { - drm_ttm_backend_list_t *entry = - list_entry(list, drm_ttm_backend_list_t, head); - drm_destroy_ttm_region(entry); - } + be = ttm->be; - drm_free(ttm->be_list, sizeof(*ttm->be_list), DRM_MEM_TTM); - ttm->be_list = NULL; + if (be) { + be->destroy(be); + ttm->be = NULL; } if (ttm->pages) { drm_buffer_manager_t *bm = &ttm->dev->bm; - int do_tlbflush = 0; + if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) + drm_set_caching(ttm, 0); + for (i = 0; i < ttm->num_pages; ++i) { cur_page = ttm->pages + i; - if (ttm->page_flags && - (ttm->page_flags[i] & DRM_TTM_PAGE_UNCACHED) && - *cur_page && !PageHighMem(*cur_page)) { - unmap_page_from_agp(*cur_page); - do_tlbflush = 1; - } if (*cur_page) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)) unlock_page(*cur_page); +#else + ClearPageReserved(*cur_page); +#endif if (page_count(*cur_page) != 1) { DRM_ERROR("Erroneous page count. " "Leaking pages.\n"); @@ -151,47 +165,66 @@ int drm_destroy_ttm(drm_ttm_t * ttm) --bm->cur_pages; } } - if (do_tlbflush) - flush_agp_mappings(); ttm_free(ttm->pages, ttm->num_pages*sizeof(*ttm->pages), DRM_MEM_TTM); ttm->pages = NULL; } - if (ttm->page_flags) { - ttm_free(ttm->page_flags, ttm->num_pages*sizeof(*ttm->page_flags), - DRM_MEM_TTM); - ttm->page_flags = NULL; - } - - if (ttm->vma_list) { - list_for_each_safe(list, next, &ttm->vma_list->head) { - drm_ttm_vma_list_t *entry = - list_entry(list, drm_ttm_vma_list_t, head); - list_del(list); - entry->vma->vm_private_data = NULL; - drm_free(entry, sizeof(*entry), DRM_MEM_TTM); - } - drm_free(ttm->vma_list, sizeof(*ttm->vma_list), DRM_MEM_TTM); - ttm->vma_list = NULL; - } - drm_free(ttm, sizeof(*ttm), DRM_MEM_TTM); return 0; } +static int drm_ttm_populate(drm_ttm_t *ttm) +{ + struct page *page; + unsigned long i; + drm_buffer_manager_t *bm; + drm_ttm_backend_t *be; + + + if (ttm->state != ttm_unpopulated) + return 0; + + bm = &ttm->dev->bm; + be = ttm->be; + for (i=0; i<ttm->num_pages; ++i) { + page = ttm->pages[i]; + if (!page) { + if (bm->cur_pages >= bm->max_pages) { + DRM_ERROR("Maximum locked page count exceeded\n"); + return -ENOMEM; + } + page = drm_alloc_gatt_pages(0); + if (!page) + return -ENOMEM; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)) + SetPageLocked(page); +#else + SetPageReserved(page); +#endif + ttm->pages[i] = page; + ++bm->cur_pages; + } + } + be->populate(be, ttm->num_pages, ttm->pages); + ttm->state = ttm_unbound; + return 0; +} + + + /* * Initialize a ttm. - * FIXME: Avoid using vmalloc for the page- and page_flags tables? */ -static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size) +static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size, + int cached) { - + drm_bo_driver_t *bo_driver = dev->driver->bo_driver; drm_ttm_t *ttm; - if (!dev->driver->bo_driver) + if (!bo_driver) return NULL; ttm = drm_calloc(1, sizeof(*ttm), DRM_MEM_TTM); @@ -199,21 +232,12 @@ static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size) return NULL; ttm->dev = dev; - ttm->lhandle = 0; atomic_set(&ttm->vma_count, 0); ttm->destroy = 0; ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; - ttm->page_flags = ttm_alloc(ttm->num_pages * sizeof(*ttm->page_flags), - DRM_MEM_TTM); - if (!ttm->page_flags) { - drm_destroy_ttm(ttm); - DRM_ERROR("Failed allocating page_flags table\n"); - return NULL; - } - memset(ttm->page_flags, 0, ttm->num_pages * sizeof(*ttm->page_flags)); - + ttm->page_flags = 0; ttm->pages = ttm_alloc(ttm->num_pages * sizeof(*ttm->pages), DRM_MEM_TTM); if (!ttm->pages) { @@ -222,382 +246,86 @@ static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size) return NULL; } memset(ttm->pages, 0, ttm->num_pages * sizeof(*ttm->pages)); - - ttm->be_list = drm_calloc(1, sizeof(*ttm->be_list), DRM_MEM_TTM); - if (!ttm->be_list) { - DRM_ERROR("Alloc be regions failed\n"); - drm_destroy_ttm(ttm); - return NULL; - } - - INIT_LIST_HEAD(&ttm->be_list->head); - INIT_LIST_HEAD(&ttm->p_mm_list); - atomic_set(&ttm->shared_count, 0); - ttm->mm_list_seq = 0; - - ttm->vma_list = drm_calloc(1, sizeof(*ttm->vma_list), DRM_MEM_TTM); - if (!ttm->vma_list) { - DRM_ERROR("Alloc vma list failed\n"); + ttm->be = bo_driver->create_ttm_backend_entry(dev, cached); + if (!ttm->be) { drm_destroy_ttm(ttm); + DRM_ERROR("Failed creating ttm backend entry\n"); return NULL; } - - INIT_LIST_HEAD(&ttm->vma_list->head); - - ttm->lhandle = (unsigned long)ttm; - + ttm->state = ttm_unpopulated; return ttm; } /* - * Change caching policy for the linear kernel map - * for range of pages in a ttm. + * Unbind a ttm region from the aperture. */ -static int drm_set_caching(drm_ttm_t * ttm, unsigned long page_offset, - unsigned long num_pages, int noncached) +int drm_evict_ttm(drm_ttm_t * ttm) { - int i, cur; - struct page **cur_page; - int do_tlbflush = 0; + drm_ttm_backend_t *be = ttm->be; - for (i = 0; i < num_pages; ++i) { - cur = page_offset + i; - cur_page = ttm->pages + cur; - if (*cur_page) { - if (PageHighMem(*cur_page)) { - if (noncached - && page_address(*cur_page) != NULL) { - DRM_ERROR - ("Illegal mapped HighMem Page\n"); - return -EINVAL; - } - } else if ((ttm->page_flags[cur] & - DRM_TTM_PAGE_UNCACHED) != noncached) { - DRM_MASK_VAL(ttm->page_flags[cur], - DRM_TTM_PAGE_UNCACHED, noncached); - if (noncached) { - map_page_into_agp(*cur_page); - } else { - unmap_page_from_agp(*cur_page); - } - do_tlbflush = 1; - } + switch (ttm->state) { + case ttm_bound: + if (be->needs_cache_adjust(be)) { + unmap_vma_pages(ttm); } + be->unbind(be); + break; + default: + break; } - if (do_tlbflush) - flush_agp_mappings(); + ttm->state = ttm_evicted; return 0; } -/* - * Unbind a ttm region from the aperture. - */ - -int drm_evict_ttm_region(drm_ttm_backend_list_t * entry) +void drm_fixup_ttm_caching(drm_ttm_t * ttm) { - drm_ttm_backend_t *be = entry->be; - drm_ttm_t *ttm = entry->owner; - if (be) { - switch (entry->state) { - case ttm_bound: - if (ttm && be->needs_cache_adjust(be)) { - unmap_vma_pages(ttm, entry->page_offset, - entry->num_pages); - } - be->unbind(entry->be); - if (ttm && be->needs_cache_adjust(be)) { - drm_set_caching(ttm, entry->page_offset, - entry->num_pages, 0); - } - break; - default: - break; + if (ttm->state == ttm_evicted) { + drm_ttm_backend_t *be = ttm->be; + if (be->needs_cache_adjust(be)) { + drm_set_caching(ttm, 0); } + ttm->state = ttm_unbound; } - entry->state = ttm_evicted; - return 0; } + -void drm_unbind_ttm_region(drm_ttm_backend_list_t * entry) +void drm_unbind_ttm(drm_ttm_t * ttm) { - drm_evict_ttm_region(entry); - entry->state = ttm_unbound; -} - -/* - * Destroy and clean up all resources associated with a ttm region. - * FIXME: release pages to OS when doing this operation. - */ - -void drm_destroy_ttm_region(drm_ttm_backend_list_t * entry) -{ - drm_ttm_backend_t *be = entry->be; - drm_ttm_t *ttm = entry->owner; - uint32_t *cur_page_flags; - int i; - - DRM_DEBUG("Destroying a TTM region\n"); - list_del_init(&entry->head); + if (ttm->state == ttm_bound) + drm_evict_ttm(ttm); - drm_unbind_ttm_region(entry); - if (be) { - be->clear(be); - be->destroy(be); - } - cur_page_flags = ttm->page_flags + entry->page_offset; - for (i = 0; i < entry->num_pages; ++i) { - DRM_MASK_VAL(*cur_page_flags, DRM_TTM_PAGE_USED, 0); - cur_page_flags++; - } - - drm_free(entry, sizeof(*entry), DRM_MEM_TTM); + drm_fixup_ttm_caching(ttm); } -/* - * Create a ttm region from a range of ttm pages. - */ - -int drm_create_ttm_region(drm_ttm_t * ttm, unsigned long page_offset, - unsigned long n_pages, int cached, - drm_ttm_backend_list_t ** region) +int drm_bind_ttm(drm_ttm_t * ttm, + unsigned long aper_offset) { - struct page **cur_page; - uint32_t *cur_page_flags; - drm_ttm_backend_list_t *entry; - drm_ttm_backend_t *be; - int ret, i; - drm_buffer_manager_t *bm = &ttm->dev->bm; - if ((page_offset + n_pages) > ttm->num_pages || n_pages == 0) { - DRM_ERROR("Region Doesn't fit ttm\n"); - return -EINVAL; - } - - cur_page_flags = ttm->page_flags + page_offset; - for (i = 0; i < n_pages; ++i, ++cur_page_flags) { - if (*cur_page_flags & DRM_TTM_PAGE_USED) { - DRM_ERROR("TTM region overlap\n"); - return -EINVAL; - } else { - DRM_MASK_VAL(*cur_page_flags, DRM_TTM_PAGE_USED, - DRM_TTM_PAGE_USED); - } - } - - entry = drm_calloc(1, sizeof(*entry), DRM_MEM_TTM); - if (!entry) - return -ENOMEM; - - be = ttm->dev->driver->bo_driver->create_ttm_backend_entry(ttm->dev, - cached); - if (!be) { - drm_free(entry, sizeof(*entry), DRM_MEM_TTM); - DRM_ERROR("Couldn't create backend.\n"); - return -EINVAL; - } - entry->state = ttm_unbound; - entry->page_offset = page_offset; - entry->num_pages = n_pages; - entry->be = be; - entry->owner = ttm; - - INIT_LIST_HEAD(&entry->head); - list_add_tail(&entry->head, &ttm->be_list->head); - - for (i = 0; i < entry->num_pages; ++i) { - cur_page = ttm->pages + (page_offset + i); - if (!*cur_page) { - if (bm->cur_pages >= bm->max_pages) { - DRM_ERROR("Maximum locked page count exceeded\n"); - drm_destroy_ttm_region(entry); - return -ENOMEM; - } - *cur_page = drm_alloc_gatt_pages(0); - if (!*cur_page) { - DRM_ERROR("Page allocation failed\n"); - drm_destroy_ttm_region(entry); - return -ENOMEM; - } - SetPageLocked(*cur_page); - ++bm->cur_pages; - } - } - - if ((ret = be->populate(be, n_pages, ttm->pages + page_offset))) { - drm_destroy_ttm_region(entry); - DRM_ERROR("Couldn't populate backend.\n"); - return ret; - } - ttm->aperture_base = be->aperture_base; - - *region = entry; - return 0; -} - -/* - * Bind a ttm region. Set correct caching policy. - */ - -int drm_bind_ttm_region(drm_ttm_backend_list_t * region, - unsigned long aper_offset) -{ - - int i; - uint32_t *cur_page_flag; int ret = 0; drm_ttm_backend_t *be; - drm_ttm_t *ttm; - if (!region || region->state == ttm_bound) + if (!ttm) return -EINVAL; + if (ttm->state == ttm_bound) + return 0; - be = region->be; - ttm = region->owner; - - if (ttm && be->needs_cache_adjust(be)) { - if (ret) - return ret; - - unmap_vma_pages(ttm, region->page_offset, - region->num_pages); - drm_set_caching(ttm, region->page_offset, region->num_pages, - DRM_TTM_PAGE_UNCACHED); - } else { - DRM_DEBUG("Binding cached\n"); - } - + be = ttm->be; + + drm_ttm_populate(ttm); + if (ttm->state == ttm_unbound && be->needs_cache_adjust(be)) { + unmap_vma_pages(ttm); + drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED); + } if ((ret = be->bind(be, aper_offset))) { - drm_unbind_ttm_region(region); + drm_unbind_ttm(ttm); DRM_ERROR("Couldn't bind backend.\n"); return ret; } - cur_page_flag = ttm->page_flags + region->page_offset; - for (i = 0; i < region->num_pages; ++i) { - DRM_MASK_VAL(*cur_page_flag, DRM_TTM_MASK_PFN, - (i + aper_offset) << PAGE_SHIFT); - cur_page_flag++; - } - - region->state = ttm_bound; - return 0; -} - -int drm_rebind_ttm_region(drm_ttm_backend_list_t * entry, - unsigned long aper_offset) -{ - return drm_bind_ttm_region(entry, aper_offset); - -} - -/* - * Destroy an anonymous ttm region. - */ - -void drm_user_destroy_region(drm_ttm_backend_list_t * entry) -{ - drm_ttm_backend_t *be; - struct page **cur_page; - int i; - - if (!entry || entry->owner) - return; - - be = entry->be; - if (!be) { - drm_free(entry, sizeof(*entry), DRM_MEM_TTM); - return; - } - - be->unbind(be); - - if (entry->anon_pages) { - cur_page = entry->anon_pages; - for (i = 0; i < entry->anon_locked; ++i) { - if (!PageReserved(*cur_page)) - SetPageDirty(*cur_page); - page_cache_release(*cur_page); - cur_page++; - } - ttm_free(entry->anon_pages, - sizeof(*entry->anon_pages)*entry->anon_locked, - DRM_MEM_TTM); - } - - be->destroy(be); - drm_free(entry, sizeof(*entry), DRM_MEM_TTM); - return; -} - -/* - * Create a ttm region from an arbitrary region of user pages. - * Since this region has no backing ttm, it's owner is set to - * null, and it is registered with the file of the caller. - * Gets destroyed when the file is closed. We call this an - * anonymous ttm region. - */ - -int drm_user_create_region(drm_device_t * dev, unsigned long start, int len, - drm_ttm_backend_list_t ** entry) -{ - drm_ttm_backend_list_t *tmp; - drm_ttm_backend_t *be; - int ret; - - if (len <= 0) - return -EINVAL; - if (!dev->driver->bo_driver->create_ttm_backend_entry) - return -EFAULT; - - tmp = drm_calloc(1, sizeof(*tmp), DRM_MEM_TTM); - - if (!tmp) - return -ENOMEM; - - be = dev->driver->bo_driver->create_ttm_backend_entry(dev, 1); - tmp->be = be; - - if (!be) { - drm_user_destroy_region(tmp); - return -ENOMEM; - } - if (be->needs_cache_adjust(be)) { - drm_user_destroy_region(tmp); - return -EFAULT; - } - - tmp->anon_pages = ttm_alloc(sizeof(*(tmp->anon_pages)) * len, - DRM_MEM_TTM); - - if (!tmp->anon_pages) { - drm_user_destroy_region(tmp); - return -ENOMEM; - } - - down_read(¤t->mm->mmap_sem); - ret = get_user_pages(current, current->mm, start, len, 1, 0, - tmp->anon_pages, NULL); - up_read(¤t->mm->mmap_sem); - - if (ret != len) { - drm_user_destroy_region(tmp); - DRM_ERROR("Could not lock %d pages. Return code was %d\n", - len, ret); - return -EPERM; - } - tmp->anon_locked = len; - - ret = be->populate(be, len, tmp->anon_pages); - - if (ret) { - drm_user_destroy_region(tmp); - return ret; - } - - tmp->state = ttm_unbound; - *entry = tmp; + ttm->aper_offset = aper_offset; + ttm->state = ttm_bound; return 0; } @@ -652,28 +380,17 @@ void drm_ttm_object_deref_unlocked(drm_device_t * dev, drm_ttm_object_t * to) } /* - * dev->struct_mutex locked. - */ -static void drm_ttm_user_deref_locked(drm_file_t * priv, - drm_user_object_t * base) -{ - drm_ttm_object_deref_locked(priv->head->dev, - drm_user_object_entry(base, - drm_ttm_object_t, - base)); -} - -/* * Create a ttm and add it to the drm book-keeping. * dev->struct_mutex locked. */ int drm_ttm_object_create(drm_device_t * dev, unsigned long size, - uint32_t flags, drm_ttm_object_t ** ttm_object) + uint32_t flags, int cached, + drm_ttm_object_t ** ttm_object) { drm_ttm_object_t *object; drm_map_list_t *list; - drm_map_t *map; + drm_local_map_t *map; drm_ttm_t *ttm; object = drm_calloc(1, sizeof(*object), DRM_MEM_TTM); @@ -689,14 +406,14 @@ int drm_ttm_object_create(drm_device_t * dev, unsigned long size, } map = list->map; - ttm = drm_init_ttm(dev, size); + ttm = drm_init_ttm(dev, size, cached); if (!ttm) { DRM_ERROR("Could not create ttm\n"); drm_ttm_object_remove(dev, object); return -ENOMEM; } - map->offset = ttm->lhandle; + map->offset = (unsigned long) ttm; map->type = _DRM_TTM; map->flags = _DRM_REMOVABLE; map->size = ttm->num_pages * PAGE_SIZE; @@ -725,87 +442,3 @@ int drm_ttm_object_create(drm_device_t * dev, unsigned long size, *ttm_object = object; return 0; } - -drm_ttm_object_t *drm_lookup_ttm_object(drm_file_t * priv, uint32_t handle, - int check_owner) -{ - drm_user_object_t *uo; - drm_ttm_object_t *to; - - uo = drm_lookup_user_object(priv, handle); - - if (!uo || (uo->type != drm_ttm_type)) - return NULL; - - if (check_owner && priv != uo->owner) { - if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE)) - return NULL; - } - - to = drm_user_object_entry(uo, drm_ttm_object_t, base); - atomic_inc(&to->usage); - return to; -} - -int drm_ttm_ioctl(DRM_IOCTL_ARGS) -{ - DRM_DEVICE; - drm_ttm_arg_t arg; - drm_ttm_object_t *entry; - drm_user_object_t *uo; - unsigned long size; - int ret; - - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - - switch (arg.op) { - case drm_ttm_create: - mutex_lock(&dev->struct_mutex); - size = arg.size; - ret = drm_ttm_object_create(dev, size, arg.flags, &entry); - if (ret) { - mutex_unlock(&dev->struct_mutex); - return ret; - } - ret = drm_add_user_object(priv, &entry->base, - arg.flags & DRM_TTM_FLAG_SHAREABLE); - if (ret) { - drm_ttm_object_remove(dev, entry); - mutex_unlock(&dev->struct_mutex); - return ret; - } - entry->base.remove = drm_ttm_user_deref_locked; - entry->base.type = drm_ttm_type; - entry->base.ref_struct_locked = NULL; - entry->base.unref = NULL; - atomic_inc(&entry->usage); - break; - case drm_ttm_reference: - ret = drm_user_object_ref(priv, arg.handle, drm_ttm_type, &uo); - if (ret) - return ret; - mutex_lock(&dev->struct_mutex); - entry = drm_lookup_ttm_object(priv, arg.handle, 0); - break; - case drm_ttm_unreference: - return drm_user_object_unref(priv, arg.handle, drm_ttm_type); - case drm_ttm_destroy: - mutex_lock(&dev->struct_mutex); - uo = drm_lookup_user_object(priv, arg.handle); - if (!uo || (uo->type != drm_ttm_type) || uo->owner != priv) { - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } - ret = drm_remove_user_object(priv, uo); - mutex_unlock(&dev->struct_mutex); - return ret; - } - arg.handle = entry->base.hash.key; - arg.user_token = entry->map_list.user_token; - arg.size = entry->map_list.map->size; - drm_ttm_object_deref_locked(dev, entry); - mutex_unlock(&dev->struct_mutex); - - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); - return 0; -} diff --git a/linux-core/drm_ttm.h b/linux-core/drm_ttm.h index fcac06b5..19c1df51 100644 --- a/linux-core/drm_ttm.h +++ b/linux-core/drm_ttm.h @@ -48,6 +48,7 @@ typedef struct drm_ttm_backend { unsigned long aperture_base; void *private; int needs_free; + uint32_t drm_map_type; int (*needs_cache_adjust) (struct drm_ttm_backend * backend); int (*populate) (struct drm_ttm_backend * backend, unsigned long num_pages, struct page ** pages); @@ -57,61 +58,32 @@ typedef struct drm_ttm_backend { void (*destroy) (struct drm_ttm_backend * backend); } drm_ttm_backend_t; -#define DRM_FLUSH_READ (0x01) -#define DRM_FLUSH_WRITE (0x02) -#define DRM_FLUSH_EXE (0x04) - -typedef struct drm_ttm_backend_list { - uint32_t flags; - struct list_head head; - drm_ttm_backend_t *be; - unsigned page_offset; - unsigned num_pages; - struct drm_ttm *owner; - drm_file_t *anon_owner; - struct page **anon_pages; - int anon_locked; - enum { - ttm_bound, - ttm_evicted, - ttm_unbound - } state; -} drm_ttm_backend_list_t; - -typedef struct drm_ttm_vma_list { - struct list_head head; - pgprot_t orig_protection; - struct vm_area_struct *vma; - drm_map_t *map; -} drm_ttm_vma_list_t; - typedef struct drm_ttm { - struct list_head p_mm_list; - atomic_t shared_count; - uint32_t mm_list_seq; - unsigned long aperture_base; struct page **pages; - uint32_t *page_flags; - unsigned long lhandle; + uint32_t page_flags; unsigned long num_pages; - drm_ttm_vma_list_t *vma_list; + unsigned long aper_offset; + atomic_t vma_count; struct drm_device *dev; - drm_ttm_backend_list_t *be_list; - atomic_t vma_count; - int mmap_sem_locked; int destroy; uint32_t mapping_offset; + drm_ttm_backend_t *be; + enum { + ttm_bound, + ttm_evicted, + ttm_unbound, + ttm_unpopulated, + } state; } drm_ttm_t; typedef struct drm_ttm_object { - drm_user_object_t base; atomic_t usage; uint32_t flags; drm_map_list_t map_list; } drm_ttm_object_t; extern int drm_ttm_object_create(struct drm_device *dev, unsigned long size, - uint32_t flags, + uint32_t flags, int cached, drm_ttm_object_t ** ttm_object); extern void drm_ttm_object_deref_locked(struct drm_device *dev, drm_ttm_object_t * to); @@ -120,41 +92,18 @@ extern void drm_ttm_object_deref_unlocked(struct drm_device *dev, extern drm_ttm_object_t *drm_lookup_ttm_object(drm_file_t * priv, uint32_t handle, int check_owner); - -/* - * Bind a part of the ttm starting at page_offset size n_pages into the GTT, at - * aperture offset aper_offset. The region handle will be used to reference this - * bound region in the future. Note that the region may be the whole ttm. - * Regions should not overlap. - * This function sets all affected pages as noncacheable and flushes cashes and TLB. - */ - -int drm_create_ttm_region(drm_ttm_t * ttm, unsigned long page_offset, - unsigned long n_pages, int cached, - drm_ttm_backend_list_t ** region); - -int drm_bind_ttm_region(drm_ttm_backend_list_t * region, +extern int drm_bind_ttm(drm_ttm_t * ttm, unsigned long aper_offset); -/* - * Unbind a ttm region. Restores caching policy. Flushes caches and TLB. - */ - -void drm_unbind_ttm_region(drm_ttm_backend_list_t * entry); -void drm_destroy_ttm_region(drm_ttm_backend_list_t * entry); +extern void drm_unbind_ttm(drm_ttm_t * ttm); /* * Evict a ttm region. Keeps Aperture caching policy. */ -int drm_evict_ttm_region(drm_ttm_backend_list_t * entry); - -/* - * Rebind an already evicted region into a possibly new location in the aperture. - */ +extern int drm_evict_ttm(drm_ttm_t * ttm); +extern void drm_fixup_ttm_caching(drm_ttm_t *ttm); -int drm_rebind_ttm_region(drm_ttm_backend_list_t * entry, - unsigned long aper_offset); /* * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this, @@ -163,7 +112,6 @@ int drm_rebind_ttm_region(drm_ttm_backend_list_t * entry, */ extern int drm_destroy_ttm(drm_ttm_t * ttm); -extern void drm_user_destroy_region(drm_ttm_backend_list_t * entry); extern int drm_ttm_ioctl(DRM_IOCTL_ARGS); static __inline__ drm_ttm_t *drm_ttm_from_object(drm_ttm_object_t * to) diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 5fbbaadd..45951156 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -159,120 +159,48 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, } #endif /* __OS_HAS_AGP */ - -static int drm_ttm_remap_bound_pfn(struct vm_area_struct *vma, - unsigned long address, - unsigned long size) -{ - unsigned long - page_offset = (address - vma->vm_start) >> PAGE_SHIFT; - unsigned long - num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; - drm_ttm_vma_list_t *entry = (drm_ttm_vma_list_t *) - vma->vm_private_data; - drm_map_t *map = entry->map; - drm_ttm_t *ttm = (drm_ttm_t *) map->offset; - unsigned long i, cur_pfn; - unsigned long start = 0; - unsigned long end = 0; - unsigned long last_pfn = 0; - unsigned long start_pfn = 0; - int bound_sequence = FALSE; - int ret = 0; - uint32_t cur_flags; - - for (i=page_offset; i<page_offset + num_pages; ++i) { - cur_flags = ttm->page_flags[i]; - - if (!bound_sequence && (cur_flags & DRM_TTM_PAGE_UNCACHED)) { - - start = i; - end = i; - last_pfn = (cur_flags & DRM_TTM_MASK_PFN) >> PAGE_SHIFT; - start_pfn = last_pfn; - bound_sequence = TRUE; - - } else if (bound_sequence) { - - cur_pfn = (cur_flags & DRM_TTM_MASK_PFN) >> PAGE_SHIFT; - - if ( !(cur_flags & DRM_TTM_PAGE_UNCACHED) || - (cur_pfn != last_pfn + 1)) { - - ret = io_remap_pfn_range(vma, - vma->vm_start + (start << PAGE_SHIFT), - (ttm->aperture_base >> PAGE_SHIFT) - + start_pfn, - (end - start + 1) << PAGE_SHIFT, - drm_io_prot(_DRM_AGP, vma)); - - if (ret) - break; - - bound_sequence = (cur_flags & DRM_TTM_PAGE_UNCACHED); - if (!bound_sequence) - continue; - - start = i; - end = i; - last_pfn = cur_pfn; - start_pfn = last_pfn; - - } else { - - end++; - last_pfn = cur_pfn; - - } - } - } - - if (!ret && bound_sequence) { - ret = io_remap_pfn_range(vma, - vma->vm_start + (start << PAGE_SHIFT), - (ttm->aperture_base >> PAGE_SHIFT) - + start_pfn, - (end - start + 1) << PAGE_SHIFT, - drm_io_prot(_DRM_AGP, vma)); - } - - if (ret) { - DRM_ERROR("Map returned %c\n", ret); - } - return ret; -} - -static __inline__ struct page *drm_do_vm_ttm_nopage(struct vm_area_struct *vma, - unsigned long address) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) +static +#endif +struct page *drm_vm_ttm_fault(struct vm_area_struct *vma, + struct fault_data *data) { - drm_ttm_vma_list_t *entry = (drm_ttm_vma_list_t *) - vma->vm_private_data; - drm_map_t *map; + unsigned long address = data->address; + drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data; unsigned long page_offset; struct page *page; drm_ttm_t *ttm; - pgprot_t default_prot; - uint32_t page_flags; drm_buffer_manager_t *bm; drm_device_t *dev; + unsigned long pfn; + int err; + pgprot_t pgprot; - if (address > vma->vm_end) - return NOPAGE_SIGBUS; /* Disallow mremap */ - if (!entry) - return NOPAGE_OOM; /* Nothing allocated */ + if (!map) { + data->type = VM_FAULT_OOM; + return NULL; + } + + if (address > vma->vm_end) { + data->type = VM_FAULT_SIGBUS; + return NULL; + } - map = (drm_map_t *) entry->map; ttm = (drm_ttm_t *) map->offset; dev = ttm->dev; + + /* + * Perhaps retry here? + */ + mutex_lock(&dev->struct_mutex); + drm_fixup_ttm_caching(ttm); bm = &dev->bm; page_offset = (address - vma->vm_start) >> PAGE_SHIFT; page = ttm->pages[page_offset]; - page_flags = ttm->page_flags[page_offset]; - if (!page) { if (bm->cur_pages >= bm->max_pages) { DRM_ERROR("Maximum locked page count exceeded\n"); @@ -281,40 +209,65 @@ static __inline__ struct page *drm_do_vm_ttm_nopage(struct vm_area_struct *vma, } ++bm->cur_pages; page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0); - if (page) { - SetPageLocked(page); - } else { - page = NOPAGE_OOM; + if (!page) { + data->type = VM_FAULT_OOM; + goto out; } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)) + SetPageLocked(page); +#else + SetPageReserved(page); +#endif } - if (page_flags & DRM_TTM_PAGE_UNCACHED) { + if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) { /* - * This makes sure we don't race with another - * drm_ttm_remap_bound_pfn(); + * FIXME: Check can't map aperture flag. */ - if (!drm_pte_is_clear(vma, address)) { - page = NOPAGE_RETRY; - goto out1; - } - - drm_ttm_remap_bound_pfn(vma, address, PAGE_SIZE); - page = NOPAGE_RETRY; - goto out1; + pfn = ttm->aper_offset + page_offset + + (ttm->be->aperture_base >> PAGE_SHIFT); + pgprot = drm_io_prot(ttm->be->drm_map_type, vma); + } else { + pfn = page_to_pfn(page); + pgprot = vma->vm_page_prot; } - get_page(page); - out1: - default_prot = vm_get_page_prot(vma->vm_flags); - vma->vm_page_prot = default_prot; + err = vm_insert_pfn(vma, address, pfn, pgprot); + if (!err && (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) && + ttm->num_pages > 1) { + + /* + * FIXME: Check can't map aperture flag. + */ + + /* + * Since we're not racing with anybody else, + * we might as well populate the whole object space. + * Note that we're touching vma->vm_flags with this + * operation, but we are not changing them, so we should be + * OK. + */ + + BUG_ON(ttm->state == ttm_unpopulated); + err = io_remap_pfn_range(vma, address + PAGE_SIZE, pfn+1, + (ttm->num_pages - 1) * PAGE_SIZE, + pgprot); + } + + + if (!err || err == -EBUSY) + data->type = VM_FAULT_MINOR; + else + data->type = VM_FAULT_OOM; out: mutex_unlock(&dev->struct_mutex); - return page; + return NULL; } + /** * \c nopage method for shared virtual memory. * @@ -547,14 +500,6 @@ static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma, return drm_do_vm_sg_nopage(vma, address); } -static struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma, - unsigned long address, int *type) -{ - if (type) - *type = VM_FAULT_MINOR; - return drm_do_vm_ttm_nopage(vma, address); -} - #else /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */ @@ -582,13 +527,6 @@ static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma, return drm_do_vm_sg_nopage(vma, address); } -static struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma, - unsigned long address, int unused) -{ - return drm_do_vm_ttm_nopage(vma, address); -} - - #endif /** AGP virtual memory operations */ @@ -619,11 +557,19 @@ static struct vm_operations_struct drm_vm_sg_ops = { .close = drm_vm_close, }; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) static struct vm_operations_struct drm_vm_ttm_ops = { .nopage = drm_vm_ttm_nopage, .open = drm_vm_ttm_open_wrapper, .close = drm_vm_ttm_close, }; +#else +static struct vm_operations_struct drm_vm_ttm_ops = { + .fault = drm_vm_ttm_fault, + .open = drm_vm_ttm_open_wrapper, + .close = drm_vm_ttm_close, +}; +#endif /** * \c open method for shared virtual memory. @@ -656,36 +602,17 @@ static void drm_vm_open(struct vm_area_struct *vma) static int drm_vm_ttm_open(struct vm_area_struct *vma) { - drm_ttm_vma_list_t *entry, *tmp_vma = - (drm_ttm_vma_list_t *) vma->vm_private_data; - drm_map_t *map; + drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data; drm_ttm_t *ttm; drm_file_t *priv = vma->vm_file->private_data; drm_device_t *dev = priv->head->dev; - int ret = 0; drm_vm_open(vma); mutex_lock(&dev->struct_mutex); - entry = drm_calloc(1, sizeof(*entry), DRM_MEM_VMAS); - if (entry) { - *entry = *tmp_vma; - map = (drm_map_t *) entry->map; - ttm = (drm_ttm_t *) map->offset; - if (!ret) { - atomic_inc(&ttm->vma_count); - INIT_LIST_HEAD(&entry->head); - entry->vma = vma; - entry->orig_protection = vma->vm_page_prot; - list_add_tail(&entry->head, &ttm->vma_list->head); - vma->vm_private_data = (void *) entry; - DRM_DEBUG("Added VMA to ttm at 0x%016lx\n", - (unsigned long) ttm); - } - } else { - ret = -ENOMEM; - } + ttm = (drm_ttm_t *) map->offset; + atomic_inc(&ttm->vma_count); mutex_unlock(&dev->struct_mutex); - return ret; + return 0; } static void drm_vm_ttm_open_wrapper(struct vm_area_struct *vma) @@ -729,21 +656,16 @@ static void drm_vm_close(struct vm_area_struct *vma) static void drm_vm_ttm_close(struct vm_area_struct *vma) { - drm_ttm_vma_list_t *ttm_vma = - (drm_ttm_vma_list_t *) vma->vm_private_data; - drm_map_t *map; + drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data; drm_ttm_t *ttm; drm_device_t *dev; int ret; drm_vm_close(vma); - if (ttm_vma) { - map = (drm_map_t *) ttm_vma->map; + if (map) { ttm = (drm_ttm_t *) map->offset; dev = ttm->dev; mutex_lock(&dev->struct_mutex); - list_del(&ttm_vma->head); - drm_free(ttm_vma, sizeof(*ttm_vma), DRM_MEM_VMAS); if (atomic_dec_and_test(&ttm->vma_count)) { if (ttm->destroy) { ret = drm_destroy_ttm(ttm); @@ -951,17 +873,10 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) #endif break; case _DRM_TTM: { - drm_ttm_vma_list_t tmp_vma; - tmp_vma.orig_protection = vma->vm_page_prot; - tmp_vma.map = map; vma->vm_ops = &drm_vm_ttm_ops; - vma->vm_private_data = (void *) &tmp_vma; + vma->vm_private_data = (void *) map; vma->vm_file = filp; vma->vm_flags |= VM_RESERVED | VM_IO; - if (drm_ttm_remap_bound_pfn(vma, - vma->vm_start, - vma->vm_end - vma->vm_start)) - return -EAGAIN; if (drm_vm_ttm_open(vma)) return -EAGAIN; return 0; |