From 30703893674b3da5b862dee2acd6efca13424398 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 11 Oct 2006 22:21:01 +0200 Subject: Compatibility code for 2.6.15-2.6.18. It is ugly but a little comfort is that it will go away in the mainstream kernel. Some bugfixes, mainly in error paths. --- linux-core/drm_compat.c | 236 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 236 insertions(+) (limited to 'linux-core/drm_compat.c') diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 1aa835ca..5287614d 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -183,3 +183,239 @@ struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma, } #endif + +#ifdef DRM_ODD_MM_COMPAT + +typedef struct p_mm_entry { + struct list_head head; + struct mm_struct *mm; + atomic_t refcount; + int locked; +} p_mm_entry_t; + +typedef struct vma_entry { + struct list_head head; + struct vm_area_struct *vma; +} vma_entry_t; + + +struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma, + unsigned long address, + int *type) +{ + drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data; + unsigned long page_offset; + struct page *page; + drm_ttm_t *ttm; + drm_buffer_manager_t *bm; + drm_device_t *dev; + + /* + * FIXME: Check can't map aperture flag. + */ + + if (type) + *type = VM_FAULT_MINOR; + + if (!map) + return NOPAGE_OOM; + + if (address > vma->vm_end) + return NOPAGE_SIGBUS; + + ttm = (drm_ttm_t *) map->offset; + dev = ttm->dev; + mutex_lock(&dev->struct_mutex); + drm_fixup_ttm_caching(ttm); + BUG_ON(ttm->page_flags & DRM_TTM_PAGE_UNCACHED); + + bm = &dev->bm; + page_offset = (address - vma->vm_start) >> PAGE_SHIFT; + page = ttm->pages[page_offset]; + + if (!page) { + if (bm->cur_pages >= bm->max_pages) { + DRM_ERROR("Maximum locked page count exceeded\n"); + page = NOPAGE_OOM; + goto out; + } + page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0); + if (!page) { + page = NOPAGE_OOM; + goto out; + } + ++bm->cur_pages; + SetPageLocked(page); + } + + get_page(page); + out: + mutex_unlock(&dev->struct_mutex); + return page; +} + + + + +int drm_ttm_map_bound(struct vm_area_struct *vma) +{ + drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data; + drm_ttm_t *ttm = (drm_ttm_t *) map->offset; + int ret = 0; + + if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) { + unsigned long pfn = ttm->aper_offset + + (ttm->be->aperture_base >> PAGE_SHIFT); + pgprot_t pgprot = drm_io_prot(ttm->be->drm_map_type, vma); + + ret = io_remap_pfn_range(vma, vma->vm_start, pfn, + vma->vm_end - vma->vm_start, + pgprot); + } + return ret; +} + + +int drm_ttm_add_vma(drm_ttm_t * ttm, struct vm_area_struct *vma) +{ + p_mm_entry_t *entry, *n_entry; + vma_entry_t *v_entry; + drm_local_map_t *map = (drm_local_map_t *) + vma->vm_private_data; + struct mm_struct *mm = vma->vm_mm; + + v_entry = drm_alloc(sizeof(*v_entry), DRM_MEM_TTM); + if (!v_entry) { + DRM_ERROR("Allocation of vma pointer entry failed\n"); + return -ENOMEM; + } + v_entry->vma = vma; + map->handle = (void *) v_entry; + list_add_tail(&v_entry->head, &ttm->vma_list); + + list_for_each_entry(entry, &ttm->p_mm_list, head) { + if (mm == entry->mm) { + atomic_inc(&entry->refcount); + return 0; + } else if ((unsigned long)mm < (unsigned long)entry->mm) ; + } + + n_entry = drm_alloc(sizeof(*n_entry), DRM_MEM_TTM); + if (!n_entry) { + DRM_ERROR("Allocation of process mm pointer entry failed\n"); + return -ENOMEM; + } + INIT_LIST_HEAD(&n_entry->head); + n_entry->mm = mm; + n_entry->locked = 0; + atomic_set(&n_entry->refcount, 0); + list_add_tail(&n_entry->head, &entry->head); + + return 0; +} + +void drm_ttm_delete_vma(drm_ttm_t * ttm, struct vm_area_struct *vma) +{ + p_mm_entry_t *entry, *n; + vma_entry_t *v_entry, *v_n; + int found = 0; + struct mm_struct *mm = vma->vm_mm; + + list_for_each_entry_safe(v_entry, v_n, &ttm->vma_list, head) { + if (v_entry->vma == vma) { + found = 1; + list_del(&v_entry->head); + drm_free(v_entry, sizeof(*v_entry), DRM_MEM_TTM); + break; + } + } + BUG_ON(!found); + + list_for_each_entry_safe(entry, n, &ttm->p_mm_list, head) { + if (mm == entry->mm) { + if (atomic_add_negative(-1, &entry->refcount)) { + list_del(&entry->head); + BUG_ON(entry->locked); + drm_free(entry, sizeof(*entry), DRM_MEM_TTM); + } + return; + } + } + BUG_ON(1); +} + + + +int drm_ttm_lock_mm(drm_ttm_t * ttm) +{ + p_mm_entry_t *entry; + int lock_ok = 1; + + list_for_each_entry(entry, &ttm->p_mm_list, head) { + BUG_ON(entry->locked); + if (!down_write_trylock(&entry->mm->mmap_sem)) { + lock_ok = 0; + break; + } + entry->locked = 1; + } + + if (lock_ok) + return 0; + + list_for_each_entry(entry, &ttm->p_mm_list, head) { + if (!entry->locked) + break; + up_write(&entry->mm->mmap_sem); + entry->locked = 0; + } + + /* + * Possible deadlock. Try again. Our callers should handle this + * and restart. + */ + + return -EAGAIN; +} + +void drm_ttm_unlock_mm(drm_ttm_t * ttm) +{ + p_mm_entry_t *entry; + + list_for_each_entry(entry, &ttm->p_mm_list, head) { + BUG_ON(!entry->locked); + up_write(&entry->mm->mmap_sem); + entry->locked = 0; + } +} + +int drm_ttm_remap_bound(drm_ttm_t *ttm) +{ + vma_entry_t *v_entry; + int ret = 0; + + list_for_each_entry(v_entry, &ttm->vma_list, head) { + ret = drm_ttm_map_bound(v_entry->vma); + if (ret) + break; + } + + drm_ttm_unlock_mm(ttm); + return ret; +} + +void drm_ttm_finish_unmap(drm_ttm_t *ttm) +{ + vma_entry_t *v_entry; + + if (!(ttm->page_flags & DRM_TTM_PAGE_UNCACHED)) + return; + + list_for_each_entry(v_entry, &ttm->vma_list, head) { + v_entry->vma->vm_flags &= ~VM_PFNMAP; + } + drm_ttm_unlock_mm(ttm); +} + +#endif + -- cgit v1.2.3