diff options
-rw-r--r-- | linux-core/Makefile.kernel | 2 | ||||
-rw-r--r-- | linux-core/drm_compat.c | 1 | ||||
-rw-r--r-- | linux-core/drm_compat.h | 19 | ||||
-rw-r--r-- | linux-core/drm_vm.c | 142 | ||||
-rw-r--r-- | linux-core/drm_vm_nopage_compat.c | 267 |
5 files changed, 349 insertions, 82 deletions
diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index f012262d..7a477a10 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -14,7 +14,7 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \ drm_memory_debug.o ati_pcigart.o drm_sman.o \ drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \ drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_bo_lock.o \ - drm_regman.o + drm_regman.o drm_vm_nopage_compat.o tdfx-objs := tdfx_drv.o r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 23e5028a..3d082e7e 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -758,3 +758,4 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, EXPORT_SYMBOL(kmap_atomic_prot_pfn); #endif + diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h index 3b1287e1..30834f33 100644 --- a/linux-core/drm_compat.h +++ b/linux-core/drm_compat.h @@ -343,4 +343,23 @@ extern void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : (1ULL<<(n)) - 1) #endif +#ifndef VM_CAN_NONLINEAR +#define DRM_VM_NOPAGE 1 +#endif + +#ifdef DRM_VM_NOPAGE + +extern struct page *drm_vm_nopage(struct vm_area_struct *vma, + unsigned long address, int *type); + +extern struct page *drm_vm_shm_nopage(struct vm_area_struct *vma, + unsigned long address, int *type); + +extern struct page *drm_vm_dma_nopage(struct vm_area_struct *vma, + unsigned long address, int *type); + +extern struct page *drm_vm_sg_nopage(struct vm_area_struct *vma, + unsigned long address, int *type); +#endif + #endif diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index b85b4c13..6618c0ae 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -81,8 +81,9 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma) return tmp; } +#ifndef DRM_VM_NOPAGE /** - * \c nopage method for AGP virtual memory. + * \c fault method for AGP virtual memory. * * \param vma virtual memory area. * \param address access address. @@ -92,8 +93,7 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma) * map, get the page, increment the use count and return it. */ #if __OS_HAS_AGP -static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, - unsigned long address) +static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct drm_file *priv = vma->vm_file->private_data; struct drm_device *dev = priv->minor->dev; @@ -105,19 +105,24 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, * Find the right map */ if (!drm_core_has_AGP(dev)) - goto vm_nopage_error; + goto vm_fault_error; if (!dev->agp || !dev->agp->cant_use_aperture) - goto vm_nopage_error; + goto vm_fault_error; if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) - goto vm_nopage_error; + goto vm_fault_error; r_list = drm_hash_entry(hash, struct drm_map_list, hash); map = r_list->map; if (map && map->type == _DRM_AGP) { - unsigned long offset = address - vma->vm_start; + /* + * Using vm_pgoff as a selector forces us to use this unusual + * addressing scheme. + */ + unsigned long offset = (unsigned long)vmf->virtual_address - + vma->vm_start; unsigned long baddr = map->offset + offset; struct drm_agp_mem *agpmem; struct page *page; @@ -139,7 +144,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, } if (!agpmem) - goto vm_nopage_error; + goto vm_fault_error; /* * Get the page, inc the use count, and return it @@ -147,25 +152,21 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, offset = (baddr - agpmem->bound) >> PAGE_SHIFT; page = virt_to_page(__va(agpmem->memory->memory[offset])); get_page(page); + vmf->page = page; -#if 0 - /* page_count() not defined everywhere */ DRM_DEBUG ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n", baddr, __va(agpmem->memory->memory[offset]), offset, page_count(page)); -#endif - - return page; + return 0; } - vm_nopage_error: - return NOPAGE_SIGBUS; /* Disallow mremap */ +vm_fault_error: + return VM_FAULT_SIGBUS; /* Disallow mremap */ } #else /* __OS_HAS_AGP */ -static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, - unsigned long address) +static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { - return NOPAGE_SIGBUS; + return VM_FAULT_SIGBUS; } #endif /* __OS_HAS_AGP */ @@ -179,29 +180,28 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, * Get the mapping, find the real physical page to map, get the page, and * return it. */ -static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, - unsigned long address) +static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct drm_map *map = (struct drm_map *) vma->vm_private_data; unsigned long offset; unsigned long i; struct page *page; - if (address > vma->vm_end) - return NOPAGE_SIGBUS; /* Disallow mremap */ if (!map) - return NOPAGE_SIGBUS; /* Nothing allocated */ + return VM_FAULT_SIGBUS; /* Nothing allocated */ - offset = address - vma->vm_start; + offset = (unsigned long)vmf->virtual_address - vma->vm_start; i = (unsigned long)map->handle + offset; page = vmalloc_to_page((void *)i); if (!page) - return NOPAGE_SIGBUS; + return VM_FAULT_SIGBUS; get_page(page); + vmf->page = page; - DRM_DEBUG("0x%lx\n", address); - return page; + DRM_DEBUG("shm_fault 0x%lx\n", offset); + return 0; } +#endif /** * \c close method for shared virtual memory. @@ -283,8 +283,9 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) mutex_unlock(&dev->struct_mutex); } +#ifndef DRM_VM_NOPAGE /** - * \c nopage method for DMA virtual memory. + * \c fault method for DMA virtual memory. * * \param vma virtual memory area. * \param address access address. @@ -292,8 +293,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) * * Determine the page number from the page offset and get it from drm_device_dma::pagelist. */ -static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma, - unsigned long address) +static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct drm_file *priv = vma->vm_file->private_data; struct drm_device *dev = priv->minor->dev; @@ -303,24 +303,23 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma, struct page *page; if (!dma) - return NOPAGE_SIGBUS; /* Error */ - if (address > vma->vm_end) - return NOPAGE_SIGBUS; /* Disallow mremap */ + return VM_FAULT_SIGBUS; /* Error */ if (!dma->pagelist) - return NOPAGE_SIGBUS; /* Nothing allocated */ + return VM_FAULT_SIGBUS; /* Nothing allocated */ - offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */ - page_nr = offset >> PAGE_SHIFT; + offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */ + page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */ page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK)))); get_page(page); + vmf->page = page; - DRM_DEBUG("0x%lx (page %lu)\n", address, page_nr); - return page; + DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr); + return 0; } /** - * \c nopage method for scatter-gather virtual memory. + * \c fault method for scatter-gather virtual memory. * * \param vma virtual memory area. * \param address access address. @@ -328,8 +327,7 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma, * * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist. */ -static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, - unsigned long address) +static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct drm_map *map = (struct drm_map *) vma->vm_private_data; struct drm_file *priv = vma->vm_file->private_data; @@ -340,80 +338,62 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, unsigned long page_offset; struct page *page; - DRM_DEBUG("\n"); if (!entry) - return NOPAGE_SIGBUS; /* Error */ - if (address > vma->vm_end) - return NOPAGE_SIGBUS; /* Disallow mremap */ + return VM_FAULT_SIGBUS; /* Error */ if (!entry->pagelist) - return NOPAGE_SIGBUS; /* Nothing allocated */ + return VM_FAULT_SIGBUS; /* Nothing allocated */ - offset = address - vma->vm_start; + offset = (unsigned long)vmf->virtual_address - vma->vm_start; map_offset = map->offset - (unsigned long)dev->sg->virtual; page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); page = entry->pagelist[page_offset]; get_page(page); + vmf->page = page; - return page; -} - -static struct page *drm_vm_nopage(struct vm_area_struct *vma, - unsigned long address, int *type) -{ - if (type) - *type = VM_FAULT_MINOR; - return drm_do_vm_nopage(vma, address); -} - -static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma, - unsigned long address, int *type) -{ - if (type) - *type = VM_FAULT_MINOR; - return drm_do_vm_shm_nopage(vma, address); -} - -static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma, - unsigned long address, int *type) -{ - if (type) - *type = VM_FAULT_MINOR; - return drm_do_vm_dma_nopage(vma, address); -} - -static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma, - unsigned long address, int *type) -{ - if (type) - *type = VM_FAULT_MINOR; - return drm_do_vm_sg_nopage(vma, address); + return 0; } - +#endif /** AGP virtual memory operations */ static struct vm_operations_struct drm_vm_ops = { +#ifdef DRM_VM_NOPAGE .nopage = drm_vm_nopage, +#else + .fault = drm_do_vm_fault, +#endif .open = drm_vm_open, .close = drm_vm_close, }; /** Shared virtual memory operations */ static struct vm_operations_struct drm_vm_shm_ops = { +#ifdef DRM_VM_NOPAGE .nopage = drm_vm_shm_nopage, +#else + .fault = drm_do_vm_shm_fault, +#endif .open = drm_vm_open, .close = drm_vm_shm_close, }; /** DMA virtual memory operations */ static struct vm_operations_struct drm_vm_dma_ops = { +#ifdef DRM_VM_NOPAGE .nopage = drm_vm_dma_nopage, +#else + .fault = drm_do_vm_dma_fault, +#endif .open = drm_vm_open, .close = drm_vm_close, }; /** Scatter-gather virtual memory operations */ static struct vm_operations_struct drm_vm_sg_ops = { +#ifdef DRM_VM_NOPAGE .nopage = drm_vm_sg_nopage, +#else + .fault = drm_do_vm_sg_fault, +#endif .open = drm_vm_open, .close = drm_vm_close, }; diff --git a/linux-core/drm_vm_nopage_compat.c b/linux-core/drm_vm_nopage_compat.c new file mode 100644 index 00000000..be8d6c1c --- /dev/null +++ b/linux-core/drm_vm_nopage_compat.c @@ -0,0 +1,267 @@ +/** + * \file drm_vm.c + * Memory mapping for DRM + * + * \author Rickard E. (Rik) Faith <faith@valinux.com> + * \author Gareth Hughes <gareth@valinux.com> + */ + +/* + * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "drmP.h" + +#ifdef DRM_VM_NOPAGE +/** + * \c nopage method for AGP virtual memory. + * + * \param vma virtual memory area. + * \param address access address. + * \return pointer to the page structure. + * + * Find the right map and if it's AGP memory find the real physical page to + * map, get the page, increment the use count and return it. + */ +#if __OS_HAS_AGP +static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, + unsigned long address) +{ + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->minor->dev; + struct drm_map *map = NULL; + struct drm_map_list *r_list; + struct drm_hash_item *hash; + + /* + * Find the right map + */ + if (!drm_core_has_AGP(dev)) + goto vm_nopage_error; + + if (!dev->agp || !dev->agp->cant_use_aperture) + goto vm_nopage_error; + + if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) + goto vm_nopage_error; + + r_list = drm_hash_entry(hash, struct drm_map_list, hash); + map = r_list->map; + + if (map && map->type == _DRM_AGP) { + unsigned long offset = address - vma->vm_start; + unsigned long baddr = map->offset + offset; + struct drm_agp_mem *agpmem; + struct page *page; + +#ifdef __alpha__ + /* + * Adjust to a bus-relative address + */ + baddr -= dev->hose->mem_space->start; +#endif + + /* + * It's AGP memory - find the real physical page to map + */ + list_for_each_entry(agpmem, &dev->agp->memory, head) { + if (agpmem->bound <= baddr && + agpmem->bound + agpmem->pages * PAGE_SIZE > baddr) + break; + } + + if (!agpmem) + goto vm_nopage_error; + + /* + * Get the page, inc the use count, and return it + */ + offset = (baddr - agpmem->bound) >> PAGE_SHIFT; + page = virt_to_page(__va(agpmem->memory->memory[offset])); + get_page(page); + +#if 0 + /* page_count() not defined everywhere */ + DRM_DEBUG + ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n", + baddr, __va(agpmem->memory->memory[offset]), offset, + page_count(page)); +#endif + + return page; + } + vm_nopage_error: + return NOPAGE_SIGBUS; /* Disallow mremap */ +} +#else /* __OS_HAS_AGP */ +static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, + unsigned long address) +{ + return NOPAGE_SIGBUS; +} +#endif /* __OS_HAS_AGP */ + +/** + * \c nopage method for shared virtual memory. + * + * \param vma virtual memory area. + * \param address access address. + * \return pointer to the page structure. + * + * Get the mapping, find the real physical page to map, get the page, and + * return it. + */ +static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, + unsigned long address) +{ + struct drm_map *map = (struct drm_map *) vma->vm_private_data; + unsigned long offset; + unsigned long i; + struct page *page; + + if (address > vma->vm_end) + return NOPAGE_SIGBUS; /* Disallow mremap */ + if (!map) + return NOPAGE_SIGBUS; /* Nothing allocated */ + + offset = address - vma->vm_start; + i = (unsigned long)map->handle + offset; + page = vmalloc_to_page((void *)i); + if (!page) + return NOPAGE_SIGBUS; + get_page(page); + + DRM_DEBUG("0x%lx\n", address); + return page; +} + +/** + * \c nopage method for DMA virtual memory. + * + * \param vma virtual memory area. + * \param address access address. + * \return pointer to the page structure. + * + * Determine the page number from the page offset and get it from drm_device_dma::pagelist. + */ +static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma, + unsigned long address) +{ + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->minor->dev; + struct drm_device_dma *dma = dev->dma; + unsigned long offset; + unsigned long page_nr; + struct page *page; + + if (!dma) + return NOPAGE_SIGBUS; /* Error */ + if (address > vma->vm_end) + return NOPAGE_SIGBUS; /* Disallow mremap */ + if (!dma->pagelist) + return NOPAGE_SIGBUS; /* Nothing allocated */ + + offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */ + page_nr = offset >> PAGE_SHIFT; + page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK)))); + + get_page(page); + + DRM_DEBUG("0x%lx (page %lu)\n", address, page_nr); + return page; +} + +/** + * \c nopage method for scatter-gather virtual memory. + * + * \param vma virtual memory area. + * \param address access address. + * \return pointer to the page structure. + * + * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist. + */ +static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, + unsigned long address) +{ + struct drm_map *map = (struct drm_map *) vma->vm_private_data; + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->minor->dev; + struct drm_sg_mem *entry = dev->sg; + unsigned long offset; + unsigned long map_offset; + unsigned long page_offset; + struct page *page; + + DRM_DEBUG("\n"); + if (!entry) + return NOPAGE_SIGBUS; /* Error */ + if (address > vma->vm_end) + return NOPAGE_SIGBUS; /* Disallow mremap */ + if (!entry->pagelist) + return NOPAGE_SIGBUS; /* Nothing allocated */ + + offset = address - vma->vm_start; + map_offset = map->offset - (unsigned long)dev->sg->virtual; + page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); + page = entry->pagelist[page_offset]; + get_page(page); + + return page; +} + + +struct page *drm_vm_nopage(struct vm_area_struct *vma, + unsigned long address, int *type) +{ + if (type) + *type = VM_FAULT_MINOR; + return drm_do_vm_nopage(vma, address); +} + +struct page *drm_vm_shm_nopage(struct vm_area_struct *vma, + unsigned long address, int *type) +{ + if (type) + *type = VM_FAULT_MINOR; + return drm_do_vm_shm_nopage(vma, address); +} + +struct page *drm_vm_dma_nopage(struct vm_area_struct *vma, + unsigned long address, int *type) +{ + if (type) + *type = VM_FAULT_MINOR; + return drm_do_vm_dma_nopage(vma, address); +} + +struct page *drm_vm_sg_nopage(struct vm_area_struct *vma, + unsigned long address, int *type) +{ + if (type) + *type = VM_FAULT_MINOR; + return drm_do_vm_sg_nopage(vma, address); +} +#endif |