From 333c6af47a906461678b5a8b2af415936d30babc Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 1 Feb 2007 00:38:57 +0100 Subject: Protect drm_mmap against disappearing maps. The map lists and hash tables are protected using dev->struct_mutex, but drm_mmap strangely never locked this mutex. --- linux-core/drm_vm.c | 65 ++++++++++++++++++++++++++++++++--------------------- 1 file changed, 39 insertions(+), 26 deletions(-) (limited to 'linux-core/drm_vm.c') diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 827a7bdb..b11e09f8 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -42,8 +42,7 @@ static void drm_vm_open(struct vm_area_struct *vma); static void drm_vm_close(struct vm_area_struct *vma); static void drm_vm_ttm_close(struct vm_area_struct *vma); -static int drm_vm_ttm_open(struct vm_area_struct *vma); -static void drm_vm_ttm_open_wrapper(struct vm_area_struct *vma); +static void drm_vm_ttm_open(struct vm_area_struct *vma); pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma) @@ -511,13 +510,13 @@ static struct vm_operations_struct drm_vm_sg_ops = { #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) static struct vm_operations_struct drm_vm_ttm_ops = { .nopage = drm_vm_ttm_nopage, - .open = drm_vm_ttm_open_wrapper, + .open = drm_vm_ttm_open, .close = drm_vm_ttm_close, }; #else static struct vm_operations_struct drm_vm_ttm_ops = { .fault = drm_vm_ttm_fault, - .open = drm_vm_ttm_open_wrapper, + .open = drm_vm_ttm_open, .close = drm_vm_ttm_close, }; #endif @@ -530,7 +529,7 @@ static struct vm_operations_struct drm_vm_ttm_ops = { * Create a new drm_vma_entry structure as the \p vma private data entry and * add it to drm_device::vmalist. */ -static void drm_vm_open(struct vm_area_struct *vma) +static void drm_vm_open_locked(struct vm_area_struct *vma) { drm_file_t *priv = vma->vm_file->private_data; drm_device_t *dev = priv->head->dev; @@ -542,36 +541,43 @@ static void drm_vm_open(struct vm_area_struct *vma) vma_entry = drm_ctl_alloc(sizeof(*vma_entry), DRM_MEM_VMAS); if (vma_entry) { - mutex_lock(&dev->struct_mutex); vma_entry->vma = vma; vma_entry->next = dev->vmalist; vma_entry->pid = current->pid; dev->vmalist = vma_entry; - mutex_unlock(&dev->struct_mutex); } } -static int drm_vm_ttm_open(struct vm_area_struct *vma) { - - drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data; - drm_ttm_t *ttm; +static void drm_vm_open(struct vm_area_struct *vma) +{ drm_file_t *priv = vma->vm_file->private_data; drm_device_t *dev = priv->head->dev; - drm_vm_open(vma); mutex_lock(&dev->struct_mutex); + drm_vm_open_locked(vma); + mutex_unlock(&dev->struct_mutex); +} + +static void drm_vm_ttm_open_locked(struct vm_area_struct *vma) { + + drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data; + drm_ttm_t *ttm; + + drm_vm_open_locked(vma); ttm = (drm_ttm_t *) map->offset; atomic_inc(&ttm->vma_count); #ifdef DRM_ODD_MM_COMPAT drm_ttm_add_vma(ttm, vma); #endif - mutex_unlock(&dev->struct_mutex); - return 0; } -static void drm_vm_ttm_open_wrapper(struct vm_area_struct *vma) -{ - drm_vm_ttm_open(vma); +static void drm_vm_ttm_open(struct vm_area_struct *vma) { + drm_file_t *priv = vma->vm_file->private_data; + drm_device_t *dev = priv->head->dev; + + mutex_lock(&dev->struct_mutex); + drm_vm_ttm_open_locked(vma); + mutex_unlock(&dev->struct_mutex); } /** @@ -653,7 +659,6 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) drm_device_dma_t *dma; unsigned long length = vma->vm_end - vma->vm_start; - lock_kernel(); dev = priv->head->dev; dma = dev->dma; DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", @@ -664,7 +669,6 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) unlock_kernel(); return -EINVAL; } - unlock_kernel(); if (!capable(CAP_SYS_ADMIN) && (dma->flags & _DRM_DMA_USE_PCI_RO)) { @@ -686,7 +690,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) vma->vm_flags |= VM_RESERVED; /* Don't swap */ vma->vm_file = filp; /* Needed for drm_vm_open() */ - drm_vm_open(vma); + drm_vm_open_locked(vma); return 0; } @@ -719,7 +723,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs); * according to the mapping type and remaps the pages. Finally sets the file * pointer and calls vm_open(). */ -int drm_mmap(struct file *filp, struct vm_area_struct *vma) +static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; @@ -839,12 +843,8 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) vma->vm_file = filp; vma->vm_flags |= VM_RESERVED | VM_IO; #ifdef DRM_ODD_MM_COMPAT - mutex_lock(&dev->struct_mutex); drm_ttm_map_bound(vma); - mutex_unlock(&dev->struct_mutex); #endif - if (drm_vm_ttm_open(vma)) - return -EAGAIN; return 0; } default: @@ -853,7 +853,20 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) vma->vm_flags |= VM_RESERVED; /* Don't swap */ vma->vm_file = filp; /* Needed for drm_vm_open() */ - drm_vm_open(vma); + drm_vm_open_locked(vma); return 0; } + +int drm_mmap(struct file *filp, struct vm_area_struct *vma) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->head->dev; + int ret; + + mutex_lock(&dev->struct_mutex); + ret = drm_mmap_locked(filp, vma); + mutex_unlock(&dev->struct_mutex); + + return ret; +} EXPORT_SYMBOL(drm_mmap); -- cgit v1.2.3 From dd733dea3856e7ddbba7c4c3928ccaba909b4535 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 1 Feb 2007 13:19:05 +0100 Subject: Fix missing ttm_open_vma call from previous commit. Honour the ttm backend cant-use-aperture flag. --- linux-core/drm_vm.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'linux-core/drm_vm.c') diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index b11e09f8..63cf6f56 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -222,11 +222,8 @@ struct page *drm_vm_ttm_fault(struct vm_area_struct *vma, #endif } - if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) { - - /* - * FIXME: Check can't map aperture flag. - */ + if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) && + !(ttm->be->flags & DRM_BE_FLAG_CMA)) { pfn = ttm->aper_offset + page_offset + (ttm->be->aperture_base >> PAGE_SHIFT); @@ -845,6 +842,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) #ifdef DRM_ODD_MM_COMPAT drm_ttm_map_bound(vma); #endif + drm_vm_ttm_open_locked(vma); return 0; } default: -- cgit v1.2.3 From c269d560e4d71448cfc9c2ea51eee3d5feafaad4 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 2 Feb 2007 14:47:44 +0100 Subject: Make vm handle buffer objects instead of ttm objects. Remove ttm objects. Make vm aware of PCI memory type buffer objects. (Only works for pre 2.6.16 kernels for now). --- linux-core/drm_vm.c | 345 +++++++++++++++++++++++++++------------------------- 1 file changed, 181 insertions(+), 164 deletions(-) (limited to 'linux-core/drm_vm.c') diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 63cf6f56..93d1c0b8 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -41,8 +41,9 @@ static void drm_vm_open(struct vm_area_struct *vma); static void drm_vm_close(struct vm_area_struct *vma); -static void drm_vm_ttm_close(struct vm_area_struct *vma); -static void drm_vm_ttm_open(struct vm_area_struct *vma); +static int drm_bo_mmap_locked(struct vm_area_struct *vma, + struct file *filp, + drm_local_map_t *map); pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma) @@ -158,93 +159,6 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, } #endif /* __OS_HAS_AGP */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) || \ - LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)) -static -#endif -struct page *drm_vm_ttm_fault(struct vm_area_struct *vma, - struct fault_data *data) -{ - unsigned long address = data->address; - drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data; - unsigned long page_offset; - struct page *page; - drm_ttm_t *ttm; - drm_buffer_manager_t *bm; - drm_device_t *dev; - unsigned long pfn; - int err; - pgprot_t pgprot; - - if (!map) { - data->type = VM_FAULT_OOM; - return NULL; - } - - if (address > vma->vm_end) { - data->type = VM_FAULT_SIGBUS; - return NULL; - } - - ttm = (drm_ttm_t *) map->offset; - - dev = ttm->dev; - - /* - * Perhaps retry here? - */ - - mutex_lock(&dev->struct_mutex); - drm_fixup_ttm_caching(ttm); - - bm = &dev->bm; - page_offset = (address - vma->vm_start) >> PAGE_SHIFT; - page = ttm->pages[page_offset]; - - if (!page) { - if (drm_alloc_memctl(PAGE_SIZE)) { - data->type = VM_FAULT_OOM; - goto out; - } - page = ttm->pages[page_offset] = - alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); - if (!page) { - drm_free_memctl(PAGE_SIZE); - data->type = VM_FAULT_OOM; - goto out; - } - ++bm->cur_pages; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)) - SetPageLocked(page); -#else - SetPageReserved(page); -#endif - } - - if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) && - !(ttm->be->flags & DRM_BE_FLAG_CMA)) { - - pfn = ttm->aper_offset + page_offset + - (ttm->be->aperture_base >> PAGE_SHIFT); - pgprot = drm_io_prot(ttm->be->drm_map_type, vma); - } else { - pfn = page_to_pfn(page); - pgprot = vma->vm_page_prot; - } - - err = vm_insert_pfn(vma, address, pfn, pgprot); - - if (!err || err == -EBUSY) - data->type = VM_FAULT_MINOR; - else - data->type = VM_FAULT_OOM; - out: - mutex_unlock(&dev->struct_mutex); - return NULL; -} -#endif - /** * \c nopage method for shared virtual memory. * @@ -504,20 +418,6 @@ static struct vm_operations_struct drm_vm_sg_ops = { .close = drm_vm_close, }; -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) -static struct vm_operations_struct drm_vm_ttm_ops = { - .nopage = drm_vm_ttm_nopage, - .open = drm_vm_ttm_open, - .close = drm_vm_ttm_close, -}; -#else -static struct vm_operations_struct drm_vm_ttm_ops = { - .fault = drm_vm_ttm_fault, - .open = drm_vm_ttm_open, - .close = drm_vm_ttm_close, -}; -#endif - /** * \c open method for shared virtual memory. * @@ -555,28 +455,6 @@ static void drm_vm_open(struct vm_area_struct *vma) mutex_unlock(&dev->struct_mutex); } -static void drm_vm_ttm_open_locked(struct vm_area_struct *vma) { - - drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data; - drm_ttm_t *ttm; - - drm_vm_open_locked(vma); - ttm = (drm_ttm_t *) map->offset; - atomic_inc(&ttm->vma_count); -#ifdef DRM_ODD_MM_COMPAT - drm_ttm_add_vma(ttm, vma); -#endif -} - -static void drm_vm_ttm_open(struct vm_area_struct *vma) { - drm_file_t *priv = vma->vm_file->private_data; - drm_device_t *dev = priv->head->dev; - - mutex_lock(&dev->struct_mutex); - drm_vm_ttm_open_locked(vma); - mutex_unlock(&dev->struct_mutex); -} - /** * \c close method for all virtual memory types. * @@ -611,34 +489,6 @@ static void drm_vm_close(struct vm_area_struct *vma) } -static void drm_vm_ttm_close(struct vm_area_struct *vma) -{ - drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data; - drm_ttm_t *ttm; - drm_device_t *dev; - int ret; - - drm_vm_close(vma); - if (map) { - ttm = (drm_ttm_t *) map->offset; - dev = ttm->dev; - mutex_lock(&dev->struct_mutex); -#ifdef DRM_ODD_MM_COMPAT - drm_ttm_delete_vma(ttm, vma); -#endif - if (atomic_dec_and_test(&ttm->vma_count)) { - if (ttm->destroy) { - ret = drm_destroy_ttm(ttm); - BUG_ON(ret); - drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM); - } - } - mutex_unlock(&dev->struct_mutex); - } - return; -} - - /** * mmap DMA memory. * @@ -834,17 +684,8 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) vma->vm_private_data = (void *)map; vma->vm_flags |= VM_RESERVED; break; - case _DRM_TTM: { - vma->vm_ops = &drm_vm_ttm_ops; - vma->vm_private_data = (void *) map; - vma->vm_file = filp; - vma->vm_flags |= VM_RESERVED | VM_IO; -#ifdef DRM_ODD_MM_COMPAT - drm_ttm_map_bound(vma); -#endif - drm_vm_ttm_open_locked(vma); - return 0; - } + case _DRM_TTM: + return drm_bo_mmap_locked(vma, filp, map); default: return -EINVAL; /* This should never happen. */ } @@ -868,3 +709,179 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) return ret; } EXPORT_SYMBOL(drm_mmap); + +/** + * buffer object vm functions. + */ + +/** + * \c Pagefault method for buffer objects. + * + * \param vma Virtual memory area. + * \param data Fault data on failure or refault. + * \return Always NULL as we insert pfns directly. + */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) || \ + LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)) +static +#endif +struct page *drm_bo_vm_fault(struct vm_area_struct *vma, + struct fault_data *data) +{ + unsigned long address = data->address; + drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + drm_local_map_t *map; + unsigned long page_offset; + struct page *page = NULL; + drm_ttm_t *ttm; + drm_buffer_manager_t *bm; + drm_device_t *dev; + unsigned long pfn; + int err; + pgprot_t pgprot; + unsigned long bus_base; + unsigned long bus_offset; + unsigned long bus_size; + + + mutex_lock(&bo->mutex); + map = bo->map_list.map; + + if (!map) { + data->type = VM_FAULT_OOM; + goto out_unlock; + } + + if (address > vma->vm_end) { + data->type = VM_FAULT_SIGBUS; + goto out_unlock; + } + + dev = bo->dev; + err = drm_bo_pci_offset(bo, &bus_base, &bus_offset, &bus_size); + + if (err) { + data->type = VM_FAULT_SIGBUS; + goto out_unlock; + } + + page_offset = (address - vma->vm_start) >> PAGE_SHIFT; + + if (bus_size) { + pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset; + pgprot = drm_io_prot(_DRM_AGP, vma); + } else { + bm = &dev->bm; + ttm = bo->ttm; + + page = ttm->pages[page_offset]; + if (!page) { + page = drm_ttm_alloc_page(); + if (!page) { + data->type = VM_FAULT_OOM; + goto out_unlock; + } + ttm->pages[page_offset] = page; + ++bm->cur_pages; + } + pfn = page_to_pfn(page); + pgprot = vma->vm_page_prot; + } + + err = vm_insert_pfn(vma, address, pfn, pgprot); + + if (!err || err == -EBUSY) + data->type = VM_FAULT_MINOR; + else + data->type = VM_FAULT_OOM; +out_unlock: + mutex_unlock(&bo->mutex); + return NULL; +} +#endif + +static void drm_bo_vm_open_locked(struct vm_area_struct *vma) +{ + drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + + drm_vm_open_locked(vma); + atomic_inc(&bo->usage); +#ifdef DRM_MM_ODD_COMPAT + drm_bo_vm_add_vma(bo, vma); +#endif +} + +/** + * \c vma open method for buffer objects. + * + * \param vma virtual memory area. + */ + +static void drm_bo_vm_open(struct vm_area_struct *vma) +{ + drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + drm_device_t *dev = bo->dev; + + mutex_lock(&dev->struct_mutex); + drm_bo_vm_open_locked(vma); + mutex_unlock(&dev->struct_mutex); +} + +/** + * \c vma close method for buffer objects. + * + * \param vma virtual memory area. + */ + +static void drm_bo_vm_close(struct vm_area_struct *vma) +{ + drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + drm_device_t *dev = bo->dev; + + drm_vm_close(vma); + if (bo) { + mutex_lock(&dev->struct_mutex); +#ifdef DRM_MM_ODD_COMPAT + drm_bo_vm_delete_vma(bo, vma); +#endif + drm_bo_usage_deref_locked(bo); + mutex_unlock(&dev->struct_mutex); + } + return; +} + +static struct vm_operations_struct drm_bo_vm_ops = { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) + .nopage = drm_bo_vm_nopage, +#else + .fault = drm_bo_vm_fault, +#endif + .open = drm_bo_vm_open, + .close = drm_bo_vm_close, +}; + +/** + * mmap buffer object memory. + * + * \param vma virtual memory area. + * \param filp file pointer. + * \param map The buffer object drm map. + * \return zero on success or a negative number on failure. + */ + +int drm_bo_mmap_locked(struct vm_area_struct *vma, + struct file *filp, + drm_local_map_t *map) +{ + vma->vm_ops = &drm_bo_vm_ops; + vma->vm_private_data = map->handle; + vma->vm_file = filp; + vma->vm_flags |= VM_RESERVED | VM_IO; + drm_bo_vm_open_locked(vma); +#ifdef DRM_ODD_MM_COMPAT + drm_ttm_map_bound(vma); +#endif + return 0; +} -- cgit v1.2.3 From 63f2abd721c40f1cddae555c79b4ab4c55aae006 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 2 Feb 2007 19:49:11 +0100 Subject: Make also later kernels work with buffer object vm and clean up some function names. --- linux-core/drm_vm.c | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) (limited to 'linux-core/drm_vm.c') diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 93d1c0b8..a4a9b09d 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -747,12 +747,6 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma, mutex_lock(&bo->mutex); - map = bo->map_list.map; - - if (!map) { - data->type = VM_FAULT_OOM; - goto out_unlock; - } if (address > vma->vm_end) { data->type = VM_FAULT_SIGBUS; @@ -808,8 +802,8 @@ static void drm_bo_vm_open_locked(struct vm_area_struct *vma) drm_vm_open_locked(vma); atomic_inc(&bo->usage); -#ifdef DRM_MM_ODD_COMPAT - drm_bo_vm_add_vma(bo, vma); +#ifdef DRM_ODD_MM_COMPAT + drm_bo_add_vma(bo, vma); #endif } @@ -843,8 +837,8 @@ static void drm_bo_vm_close(struct vm_area_struct *vma) drm_vm_close(vma); if (bo) { mutex_lock(&dev->struct_mutex); -#ifdef DRM_MM_ODD_COMPAT - drm_bo_vm_delete_vma(bo, vma); +#ifdef DRM_ODD_MM_COMPAT + drm_bo_delete_vma(bo, vma); #endif drm_bo_usage_deref_locked(bo); mutex_unlock(&dev->struct_mutex); @@ -881,7 +875,7 @@ int drm_bo_mmap_locked(struct vm_area_struct *vma, vma->vm_flags |= VM_RESERVED | VM_IO; drm_bo_vm_open_locked(vma); #ifdef DRM_ODD_MM_COMPAT - drm_ttm_map_bound(vma); + drm_bo_map_bound(vma); #endif return 0; } -- cgit v1.2.3 From 71b9e876f99db219fcbf4e3ab977b64b068cc2b4 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 6 Feb 2007 16:59:45 +0100 Subject: Simplify pci map vs no pci map choice. --- linux-core/drm_vm.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'linux-core/drm_vm.c') diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index a4a9b09d..843fc362 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -754,7 +754,8 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma, } dev = bo->dev; - err = drm_bo_pci_offset(bo, &bus_base, &bus_offset, &bus_size); + err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset, + &bus_size); if (err) { data->type = VM_FAULT_SIGBUS; @@ -770,6 +771,7 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma, bm = &dev->bm; ttm = bo->ttm; + drm_ttm_fixup_caching(ttm); page = ttm->pages[page_offset]; if (!page) { page = drm_ttm_alloc_page(); -- cgit v1.2.3 From af24465b2eddfcc5296edc830ea5ed86065a4abd Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 7 Feb 2007 12:52:23 +0100 Subject: Fix a stray unlock_kernel() in drm_vm.c Add a file for memory move helpers, drm_bo_move.c Implement generic memory move. Cached, no_move and unmapped memory temporarily broken. --- linux-core/drm_vm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'linux-core/drm_vm.c') diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 843fc362..416ac4ae 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -513,7 +513,6 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) /* Length must match exact page count */ if (!dma || (length >> PAGE_SHIFT) != dma->page_count) { - unlock_kernel(); return -EINVAL; } @@ -588,6 +587,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) * the AGP mapped at physical address 0 * --BenH. */ + if (!vma->vm_pgoff #if __OS_HAS_AGP && (!dev->agp -- cgit v1.2.3 From 1257907fa9a24de7aa95485e1b3ab509fdc4d4e6 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 8 Feb 2007 13:29:08 +0100 Subject: Simplify external ttm page allocation. Implement a memcpy fallback for copying between buffers. --- linux-core/drm_vm.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) (limited to 'linux-core/drm_vm.c') diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 416ac4ae..25779eca 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -736,7 +736,6 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma, unsigned long page_offset; struct page *page = NULL; drm_ttm_t *ttm; - drm_buffer_manager_t *bm; drm_device_t *dev; unsigned long pfn; int err; @@ -768,19 +767,13 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma, pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset; pgprot = drm_io_prot(_DRM_AGP, vma); } else { - bm = &dev->bm; ttm = bo->ttm; drm_ttm_fixup_caching(ttm); - page = ttm->pages[page_offset]; + page = drm_ttm_get_page(ttm, page_offset); if (!page) { - page = drm_ttm_alloc_page(); - if (!page) { - data->type = VM_FAULT_OOM; - goto out_unlock; - } - ttm->pages[page_offset] = page; - ++bm->cur_pages; + data->type = VM_FAULT_OOM; + goto out_unlock; } pfn = page_to_pfn(page); pgprot = vma->vm_page_prot; -- cgit v1.2.3 From bf8f46d4c64eb5b66814223f7e5ddb8d8e7a555e Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 8 Feb 2007 18:59:02 +0100 Subject: Fix mm_block leak. Some other minor fixes. --- linux-core/drm_vm.c | 1 - 1 file changed, 1 deletion(-) (limited to 'linux-core/drm_vm.c') diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 25779eca..5afa9800 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -732,7 +732,6 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma, { unsigned long address = data->address; drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; - drm_local_map_t *map; unsigned long page_offset; struct page *page = NULL; drm_ttm_t *ttm; -- cgit v1.2.3 From 6a49d9a8abd9f168211017c2d585d0d64e89c530 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 9 Feb 2007 00:02:02 +0100 Subject: Fix evict_mutex locking range. Implement unmappable buffers. (fault moves them to mappable when needed). Various bugfixes. --- linux-core/drm_vm.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'linux-core/drm_vm.c') diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 5afa9800..4a41e761 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -746,6 +746,30 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma, mutex_lock(&bo->mutex); + /* + * If buffer happens to be in a non-mappable location, + * move it to a mappable. + */ + + if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) { + uint32_t mask_save = bo->mem.mask; + uint32_t new_mask = bo->mem.mask | + DRM_BO_FLAG_MAPPABLE | + DRM_BO_FLAG_FORCE_MAPPABLE; + + err = drm_bo_move_buffer(bo, new_mask, 0, 0); + bo->mem.mask = mask_save; + + if (!err) + err = drm_bo_wait(bo, 0, 0, 0); + + if (err) { + data->type = (err == -EAGAIN) ? + VM_FAULT_MINOR : VM_FAULT_SIGBUS; + goto out_unlock; + } + } + if (address > vma->vm_end) { data->type = VM_FAULT_SIGBUS; goto out_unlock; -- cgit v1.2.3 From 85ee2a8d044cd4d8de4894a794151af9471648e3 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Sat, 10 Feb 2007 12:06:36 +0100 Subject: Various bugfixes. --- linux-core/drm_vm.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'linux-core/drm_vm.c') diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 4a41e761..17778c26 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -746,6 +746,14 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma, mutex_lock(&bo->mutex); + err = drm_bo_wait(bo, 0, 0, 0); + if (err) { + data->type = (err == -EAGAIN) ? + VM_FAULT_MINOR : VM_FAULT_SIGBUS; + goto out_unlock; + } + + /* * If buffer happens to be in a non-mappable location, * move it to a mappable. @@ -760,16 +768,13 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma, err = drm_bo_move_buffer(bo, new_mask, 0, 0); bo->mem.mask = mask_save; - if (!err) - err = drm_bo_wait(bo, 0, 0, 0); - if (err) { data->type = (err == -EAGAIN) ? VM_FAULT_MINOR : VM_FAULT_SIGBUS; goto out_unlock; } } - + if (address > vma->vm_end) { data->type = VM_FAULT_SIGBUS; goto out_unlock; -- cgit v1.2.3 From 7bcb62b45d18ab7b48ad3cb5d13aec3bc577678e Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 14 Feb 2007 10:49:37 +0100 Subject: Rework buffer object vm code to use nopfn() for kernels >= 2.6.19. --- linux-core/drm_vm.c | 55 +++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 43 insertions(+), 12 deletions(-) (limited to 'linux-core/drm_vm.c') diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 17778c26..4a340b57 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -720,11 +720,20 @@ EXPORT_SYMBOL(drm_mmap); * \param vma Virtual memory area. * \param data Fault data on failure or refault. * \return Always NULL as we insert pfns directly. + * + * It's important that pfns are inserted while holding the bo->mutex lock. + * otherwise we might race with unmap_mapping_range() which is always + * called with the bo->mutex lock held. + * + * It's not pretty to modify the vma->vm_page_prot variable while not + * holding the mm semaphore in write mode. However, we have it i read mode, + * so we won't be racing with any other writers, and we only actually modify + * it when no ptes are present so it shouldn't be a big deal. */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) || \ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19) || \ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)) +#ifdef DRM_FULL_MM_COMPAT static #endif struct page *drm_bo_vm_fault(struct vm_area_struct *vma, @@ -738,7 +747,6 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma, drm_device_t *dev; unsigned long pfn; int err; - pgprot_t pgprot; unsigned long bus_base; unsigned long bus_offset; unsigned long bus_size; @@ -759,14 +767,12 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma, * move it to a mappable. */ +#ifdef DRM_BO_FULL_COMPAT if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) { - uint32_t mask_save = bo->mem.mask; uint32_t new_mask = bo->mem.mask | DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_FORCE_MAPPABLE; - err = drm_bo_move_buffer(bo, new_mask, 0, 0); - bo->mem.mask = mask_save; if (err) { data->type = (err == -EAGAIN) ? @@ -774,6 +780,24 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma, goto out_unlock; } } +#else + if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) { + unsigned long _end = jiffies + 3*DRM_HZ; + uint32_t new_mask = bo->mem.mask | + DRM_BO_FLAG_MAPPABLE | + DRM_BO_FLAG_FORCE_MAPPABLE; + + do { + err = drm_bo_move_buffer(bo, new_mask, 0, 0); + } while((err == -EAGAIN) && !time_after_eq(jiffies, _end)); + + if (err) { + DRM_ERROR("Timeout moving buffer to mappable location.\n"); + data->type = VM_FAULT_SIGBUS; + goto out_unlock; + } + } +#endif if (address > vma->vm_end) { data->type = VM_FAULT_SIGBUS; @@ -793,7 +817,7 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma, if (bus_size) { pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset; - pgprot = drm_io_prot(_DRM_AGP, vma); + vma->vm_page_prot = drm_io_prot(_DRM_AGP, vma); } else { ttm = bo->ttm; @@ -804,10 +828,10 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma, goto out_unlock; } pfn = page_to_pfn(page); - pgprot = vma->vm_page_prot; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); } - err = vm_insert_pfn(vma, address, pfn, pgprot); + err = vm_insert_pfn(vma, address, pfn); if (!err || err == -EBUSY) data->type = VM_FAULT_MINOR; @@ -870,10 +894,14 @@ static void drm_bo_vm_close(struct vm_area_struct *vma) } static struct vm_operations_struct drm_bo_vm_ops = { -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) - .nopage = drm_bo_vm_nopage, -#else +#ifdef DRM_FULL_MM_COMPAT .fault = drm_bo_vm_fault, +#else +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) + .nopfn = drm_bo_vm_nopfn, +#else + .nopage = drm_bo_vm_nopage, +#endif #endif .open = drm_bo_vm_open, .close = drm_bo_vm_close, @@ -896,6 +924,9 @@ int drm_bo_mmap_locked(struct vm_area_struct *vma, vma->vm_private_data = map->handle; vma->vm_file = filp; vma->vm_flags |= VM_RESERVED | VM_IO; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) + vma->vm_flags |= VM_PFNMAP; +#endif drm_bo_vm_open_locked(vma); #ifdef DRM_ODD_MM_COMPAT drm_bo_map_bound(vma); -- cgit v1.2.3 From 04760563b88c8e94f3ae448710d1ab8b350c2e5f Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 14 Feb 2007 12:39:02 +0100 Subject: Set the drm bus map type for each buffer object memory type. --- linux-core/drm_vm.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'linux-core/drm_vm.c') diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 4a340b57..f3b1088f 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -816,8 +816,10 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma, page_offset = (address - vma->vm_start) >> PAGE_SHIFT; if (bus_size) { + drm_mem_type_manager_t *man = &dev->bm.man[bo->mem.mem_type]; + pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset; - vma->vm_page_prot = drm_io_prot(_DRM_AGP, vma); + vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma); } else { ttm = bo->ttm; -- cgit v1.2.3