diff options
author | Nian Wu <nian@graphics.(none)> | 2007-02-25 17:06:13 -0800 |
---|---|---|
committer | Nian Wu <nian@graphics.(none)> | 2007-02-25 17:06:13 -0800 |
commit | df2fc3ec62d6799a3266cfb18b1279a126892f44 (patch) | |
tree | 94d39b9172bce8c93cb035e40d12e635dfaeb12f /linux-core/drm_bo.c | |
parent | 80095ffe01efe79983c2124ecc99ce979d7ac6a9 (diff) | |
parent | 9d8ba2d0d479f53b996c0a0e366acfee52daab3b (diff) |
Merge git://proxy01.pd.intel.com:9419/git/mesa/drm into crestline
Diffstat (limited to 'linux-core/drm_bo.c')
-rw-r--r-- | linux-core/drm_bo.c | 1542 |
1 files changed, 947 insertions, 595 deletions
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 2b960c75..548ce14f 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA + * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -10,6 +10,10 @@ * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, @@ -18,11 +22,6 @@ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * * **************************************************************************/ /* @@ -32,34 +31,28 @@ #include "drmP.h" /* - * Buffer object locking policy: - * Lock dev->struct_mutex; - * Increase usage - * Unlock dev->struct_mutex; - * Lock buffer->mutex; - * Do whatever you want; - * Unlock buffer->mutex; - * Decrease usage. Call destruction if zero. + * Locking may look a bit complicated but isn't really: + * + * The buffer usage atomic_t needs to be protected by dev->struct_mutex + * when there is a chance that it can be zero before or after the operation. + * + * dev->struct_mutex also protects all lists and list heads. Hash tables and hash + * heads. * - * User object visibility ups usage just once, since it has its own - * refcounting. + * bo->mutex protects the buffer object itself excluding the usage field. + * bo->mutex does also protect the buffer list heads, so to manipulate those, we need + * both the bo->mutex and the dev->struct_mutex. * - * Destruction: - * lock dev->struct_mutex; - * Verify that usage is zero. Otherwise unlock and continue. - * Destroy object. - * unlock dev->struct_mutex; + * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit + * complicated. When dev->struct_mutex is released to grab bo->mutex, the list + * traversal will, in general, need to be restarted. * - * Mutex and spinlock locking orders: - * 1.) Buffer mutex - * 2.) Refer to ttm locking orders. */ -static void drm_bo_destroy_locked(drm_buffer_object_t *bo); - -#define DRM_FLAG_MASKED(_old, _new, _mask) {\ -(_old) ^= (((_old) ^ (_new)) & (_mask)); \ -} +static void drm_bo_destroy_locked(drm_buffer_object_t * bo); +static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo); +static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo); +static void drm_bo_unmap_virtual(drm_buffer_object_t * bo); static inline uint32_t drm_bo_type_flags(unsigned type) { @@ -70,76 +63,189 @@ static inline uint32_t drm_bo_type_flags(unsigned type) * bo locked. dev->struct_mutex locked. */ -static void drm_bo_add_to_lru(drm_buffer_object_t * bo, - drm_buffer_manager_t * bm) +void drm_bo_add_to_pinned_lru(drm_buffer_object_t * bo) { - struct list_head *list; - bo->mem_type = 0; + drm_mem_type_manager_t *man; - switch(bo->flags & DRM_BO_MASK_MEM) { - case DRM_BO_FLAG_MEM_TT: - bo->mem_type = DRM_BO_MEM_TT; - break; - case DRM_BO_FLAG_MEM_VRAM: - bo->mem_type = DRM_BO_MEM_VRAM; - break; - case DRM_BO_FLAG_MEM_LOCAL: - bo->mem_type = DRM_BO_MEM_LOCAL; - break; - default: - BUG_ON(1); + man = &bo->dev->bm.man[bo->pinned_mem_type]; + list_add_tail(&bo->pinned_lru, &man->pinned); +} + +void drm_bo_add_to_lru(drm_buffer_object_t * bo) +{ + drm_mem_type_manager_t *man; + + if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) { + man = &bo->dev->bm.man[bo->mem.mem_type]; + list_add_tail(&bo->lru, &man->lru); + } else { + INIT_LIST_HEAD(&bo->lru); } - list = (bo->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ? - &bm->pinned[bo->mem_type] : &bm->lru[bo->mem_type]; - list_add_tail(&bo->lru, list); - return; +} + +static int drm_bo_vm_pre_move(drm_buffer_object_t * bo, int old_is_pci) +{ +#ifdef DRM_ODD_MM_COMPAT + int ret; + + ret = drm_bo_lock_kmm(bo); + if (ret) + return ret; + drm_bo_unmap_virtual(bo); + if (old_is_pci) + drm_bo_finish_unmap(bo); +#else + drm_bo_unmap_virtual(bo); +#endif + return 0; +} + +static void drm_bo_vm_post_move(drm_buffer_object_t * bo) +{ +#ifdef DRM_ODD_MM_COMPAT + int ret; + + ret = drm_bo_remap_bound(bo); + if (ret) { + DRM_ERROR("Failed to remap a bound buffer object.\n" + "\tThis might cause a sigbus later.\n"); + } + drm_bo_unlock_kmm(bo); +#endif } /* - * bo locked. + * Call bo->mutex locked. */ -static int drm_move_tt_to_local(drm_buffer_object_t * bo, int evict, - int force_no_move) +static int drm_bo_add_ttm(drm_buffer_object_t * bo) { drm_device_t *dev = bo->dev; - int ret; + int ret = 0; + bo->ttm = NULL; - if (bo->mm_node) { - mutex_lock(&dev->struct_mutex); - if (evict) - ret = drm_evict_ttm(bo->ttm); - else - ret = drm_unbind_ttm(bo->ttm); + switch (bo->type) { + case drm_bo_type_dc: + bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT); + if (!bo->ttm) + ret = -ENOMEM; + break; + case drm_bo_type_user: + case drm_bo_type_fake: + break; + default: + DRM_ERROR("Illegal buffer object type\n"); + ret = -EINVAL; + break; + } - if (ret) { - mutex_unlock(&dev->struct_mutex); - if (ret == -EAGAIN) - schedule(); - return ret; - } + return ret; +} + +static int drm_bo_handle_move_mem(drm_buffer_object_t * bo, + drm_bo_mem_reg_t * mem, + int evict, int no_wait) +{ + drm_device_t *dev = bo->dev; + drm_buffer_manager_t *bm = &dev->bm; + int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem); + int new_is_pci = drm_mem_reg_is_pci(dev, mem); + drm_mem_type_manager_t *old_man = &bm->man[bo->mem.mem_type]; + drm_mem_type_manager_t *new_man = &bm->man[mem->mem_type]; + int ret = 0; - if (!(bo->flags & DRM_BO_FLAG_NO_MOVE) || force_no_move) { - drm_mm_put_block(bo->mm_node); - bo->mm_node = NULL; + if (old_is_pci || new_is_pci) + ret = drm_bo_vm_pre_move(bo, old_is_pci); + if (ret) + return ret; + + /* + * Create and bind a ttm if required. + */ + + if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) { + ret = drm_bo_add_ttm(bo); + if (ret) + goto out_err; + + if (mem->mem_type != DRM_BO_MEM_LOCAL) { + ret = drm_bind_ttm(bo->ttm, new_man->flags & + DRM_BO_FLAG_CACHED, + mem->mm_node->start); + if (ret) + goto out_err; } - mutex_unlock(&dev->struct_mutex); } - bo->flags &= ~DRM_BO_FLAG_MEM_TT; - bo->flags |= DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED; + if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) { + + drm_bo_mem_reg_t *old_mem = &bo->mem; + uint32_t save_flags = old_mem->flags; + uint32_t save_mask = old_mem->mask; + + *old_mem = *mem; + mem->mm_node = NULL; + old_mem->mask = save_mask; + DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE); + + } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && + !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { + + ret = drm_bo_move_ttm(bo, evict, no_wait, mem); + + } else if (dev->driver->bo_driver->move) { + ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem); + + } else { + + ret = drm_bo_move_memcpy(bo, evict, no_wait, mem); + + } + + if (ret) + goto out_err; + + if (old_is_pci || new_is_pci) + drm_bo_vm_post_move(bo); + + if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) { + ret = + dev->driver->bo_driver->invalidate_caches(dev, + bo->mem.flags); + if (ret) + DRM_ERROR("Can not flush read caches\n"); + } + + DRM_FLAG_MASKED(bo->priv_flags, + (evict) ? _DRM_BO_FLAG_EVICTED : 0, + _DRM_BO_FLAG_EVICTED); + + if (bo->mem.mm_node) + bo->offset = bo->mem.mm_node->start << PAGE_SHIFT; return 0; -} + out_err: + if (old_is_pci || new_is_pci) + drm_bo_vm_post_move(bo); + + new_man = &bm->man[bo->mem.mem_type]; + if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) { + drm_ttm_unbind(bo->ttm); + drm_destroy_ttm(bo->ttm); + bo->ttm = NULL; + } + + return ret; +} /* * Call bo->mutex locked. * Wait until the buffer is idle. */ -static int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, - int no_wait) +int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, + int no_wait) { drm_fence_object_t *fence = bo->fence; @@ -168,14 +274,44 @@ static int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, return 0; } +static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors) +{ + drm_device_t *dev = bo->dev; + drm_buffer_manager_t *bm = &dev->bm; + + if (bo->fence) { + if (bm->nice_mode) { + unsigned long _end = jiffies + 3 * DRM_HZ; + int ret; + do { + ret = drm_bo_wait(bo, 0, 1, 0); + if (ret && allow_errors) + return ret; + + } while (ret && !time_after_eq(jiffies, _end)); + + if (bo->fence) { + bm->nice_mode = 0; + DRM_ERROR("Detected GPU lockup or " + "fence driver was taken down. " + "Evicting buffer.\n"); + } + } + if (bo->fence) { + drm_fence_usage_deref_unlocked(dev, bo->fence); + bo->fence = NULL; + } + } + return 0; +} + /* * Call dev->struct_mutex locked. * Attempts to remove all private references to a buffer by expiring its * fence object and removing from lru lists and memory managers. */ - -static void drm_bo_cleanup_refs(drm_buffer_object_t *bo, int remove_all) +static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all) { drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; @@ -186,32 +322,14 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t *bo, int remove_all) DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); - if (bo->fence && drm_fence_object_signaled(bo->fence, - bo->fence_type)) { + if (bo->fence && drm_fence_object_signaled(bo->fence, bo->fence_type)) { drm_fence_usage_deref_locked(dev, bo->fence); bo->fence = NULL; } - if (bo->fence && remove_all) { - if (bm->nice_mode) { - unsigned long _end = jiffies + 3 * DRM_HZ; - int ret; - do { - ret = drm_bo_wait(bo, 0, 1, 0); - } while (ret && !time_after_eq(jiffies, _end)); + if (bo->fence && remove_all) + (void)drm_bo_expire_fence(bo, 0); - if (bo->fence) { - bm->nice_mode = 0; - DRM_ERROR("Detected GPU lockup or " - "fence driver was taken down. " - "Evicting waiting buffers.\n"); - } - if (bo->fence) { - drm_fence_usage_deref_unlocked(dev, bo->fence); - bo->fence = NULL; - } - } - } mutex_lock(&dev->struct_mutex); if (!atomic_dec_and_test(&bo->usage)) { @@ -220,9 +338,16 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t *bo, int remove_all) if (!bo->fence) { list_del_init(&bo->lru); - if (bo->mm_node) { - drm_mm_put_block(bo->mm_node); - bo->mm_node = NULL; + if (bo->mem.mm_node) { + drm_mm_put_block(bo->mem.mm_node); + if (bo->pinned_node == bo->mem.mm_node) + bo->pinned_node = NULL; + bo->mem.mm_node = NULL; + } + list_del_init(&bo->pinned_lru); + if (bo->pinned_node) { + drm_mm_put_block(bo->pinned_node); + bo->pinned_node = NULL; } list_del_init(&bo->ddestroy); mutex_unlock(&bo->mutex); @@ -234,50 +359,42 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t *bo, int remove_all) drm_fence_object_flush(dev, bo->fence, bo->fence_type); list_add_tail(&bo->ddestroy, &bm->ddestroy); schedule_delayed_work(&bm->wq, - ((DRM_HZ / 100) < - 1) ? 1 : DRM_HZ / 100); + ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100); } -out: + out: mutex_unlock(&bo->mutex); return; } - /* * Verify that refcount is 0 and that there are no internal references * to the buffer object. Then destroy it. */ -static void drm_bo_destroy_locked(drm_buffer_object_t *bo) +static void drm_bo_destroy_locked(drm_buffer_object_t * bo) { drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; - if (list_empty(&bo->lru) && bo->mm_node == NULL && atomic_read(&bo->usage) == 0) { - BUG_ON(bo->fence != NULL); - - if (bo->ttm) { - unsigned long _end = jiffies + DRM_HZ; - int ret; - - do { - ret = drm_unbind_ttm(bo->ttm); - if (ret == -EAGAIN) { - mutex_unlock(&dev->struct_mutex); - schedule(); - mutex_lock(&dev->struct_mutex); - } - } while (ret == -EAGAIN && !time_after_eq(jiffies, _end)); - - if (ret) { - DRM_ERROR("Couldn't unbind TTM region while destroying a buffer. " - "Bad. Continuing anyway\n"); - } + if (list_empty(&bo->lru) && bo->mem.mm_node == NULL && + list_empty(&bo->pinned_lru) && bo->pinned_node == NULL && + list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) { + if (bo->fence != NULL) { + DRM_ERROR("Fence was non-zero.\n"); + drm_bo_cleanup_refs(bo, 0); + return; } - if (bo->ttm_object) { - drm_ttm_object_deref_locked(dev, bo->ttm_object); +#ifdef DRM_ODD_MM_COMPAT + BUG_ON(!list_empty(&bo->vma_list)); + BUG_ON(!list_empty(&bo->p_mm_list)); +#endif + + if (bo->ttm) { + drm_ttm_unbind(bo->ttm); + drm_destroy_ttm(bo->ttm); + bo->ttm = NULL; } atomic_dec(&bm->count); @@ -297,7 +414,6 @@ static void drm_bo_destroy_locked(drm_buffer_object_t *bo) return; } - /* * Call dev->struct_mutex locked. */ @@ -325,7 +441,6 @@ static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all) atomic_dec(&nentry->usage); } } - } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) @@ -338,11 +453,11 @@ static void drm_bo_delayed_workqueue(struct work_struct *work) drm_device_t *dev = (drm_device_t *) data; drm_buffer_manager_t *bm = &dev->bm; #else - drm_buffer_manager_t *bm = container_of(work, drm_buffer_manager_t, wq.work); + drm_buffer_manager_t *bm = + container_of(work, drm_buffer_manager_t, wq.work); drm_device_t *dev = container_of(bm, drm_device_t, bm); #endif - DRM_DEBUG("Delayed delete Worker\n"); mutex_lock(&dev->struct_mutex); @@ -358,7 +473,7 @@ static void drm_bo_delayed_workqueue(struct work_struct *work) mutex_unlock(&dev->struct_mutex); } -static void drm_bo_usage_deref_locked(drm_buffer_object_t * bo) +void drm_bo_usage_deref_locked(drm_buffer_object_t * bo) { if (atomic_dec_and_test(&bo->usage)) { drm_bo_destroy_locked(bo); @@ -367,8 +482,11 @@ static void drm_bo_usage_deref_locked(drm_buffer_object_t * bo) static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo) { - drm_bo_usage_deref_locked(drm_user_object_entry(uo, drm_buffer_object_t, - base)); + drm_buffer_object_t *bo = + drm_user_object_entry(uo, drm_buffer_object_t, base); + + drm_bo_takedown_vm_locked(bo); + drm_bo_usage_deref_locked(bo); } static void drm_bo_usage_deref_unlocked(drm_buffer_object_t * bo) @@ -443,7 +561,7 @@ int drm_fence_buffer_objects(drm_file_t * priv, } } else { mutex_unlock(&dev->struct_mutex); - ret = drm_fence_object_create(dev, fence_type, + ret = drm_fence_object_create(dev, 0, fence_type, fence_flags | DRM_FENCE_FLAG_EMIT, &fence); mutex_lock(&dev->struct_mutex); @@ -454,6 +572,7 @@ int drm_fence_buffer_objects(drm_file_t * priv, count = 0; l = f_list.next; while (l != &f_list) { + prefetch(l->next); entry = list_entry(l, drm_buffer_object_t, lru); atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); @@ -468,7 +587,7 @@ int drm_fence_buffer_objects(drm_file_t * priv, DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); DRM_WAKEUP(&entry->event_queue); - drm_bo_add_to_lru(entry, bm); + drm_bo_add_to_lru(entry); } mutex_unlock(&entry->mutex); drm_bo_usage_deref_locked(entry); @@ -489,11 +608,11 @@ EXPORT_SYMBOL(drm_fence_buffer_objects); */ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, - int no_wait, int force_no_move) + int no_wait) { int ret = 0; drm_device_t *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; + drm_bo_mem_reg_t evict_mem; /* * Someone might have modified the buffer before we took the buffer mutex. @@ -501,70 +620,93 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) goto out; - if (!(bo->flags & drm_bo_type_flags(mem_type))) + if (bo->mem.mem_type != mem_type) goto out; ret = drm_bo_wait(bo, 0, 0, no_wait); + if (ret && ret != -EAGAIN) { + DRM_ERROR("Failed to expire fence before " + "buffer eviction.\n"); + goto out; + } + + evict_mem = bo->mem; + evict_mem.mm_node = NULL; + + if (bo->type == drm_bo_type_fake) { + bo->mem.mem_type = DRM_BO_MEM_LOCAL; + bo->mem.mm_node = NULL; + goto out1; + } + + evict_mem = bo->mem; + evict_mem.mask = dev->driver->bo_driver->evict_mask(bo); + ret = drm_bo_mem_space(bo, &evict_mem, no_wait); + if (ret) { if (ret != -EAGAIN) - DRM_ERROR("Failed to expire fence before " - "buffer eviction.\n"); + DRM_ERROR("Failed to find memory space for " + "buffer 0x%p eviction.\n", bo); goto out; } - if (mem_type == DRM_BO_MEM_TT) { - ret = drm_move_tt_to_local(bo, 1, force_no_move); - if (ret) - goto out; - mutex_lock(&dev->struct_mutex); - list_del_init(&bo->lru); - drm_bo_add_to_lru(bo, bm); - mutex_unlock(&dev->struct_mutex); - } + ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait); - if (ret) + if (ret) { + if (ret != -EAGAIN) + DRM_ERROR("Buffer eviction failed\n"); goto out; + } + + out1: + mutex_lock(&dev->struct_mutex); + if (evict_mem.mm_node) { + if (evict_mem.mm_node != bo->pinned_node) + drm_mm_put_block(evict_mem.mm_node); + evict_mem.mm_node = NULL; + } + list_del(&bo->lru); + drm_bo_add_to_lru(bo); + mutex_unlock(&dev->struct_mutex); DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED, _DRM_BO_FLAG_EVICTED); + out: return ret; } -/* - * bo->mutex locked. - */ - -int drm_bo_alloc_space(drm_buffer_object_t * bo, unsigned mem_type, - int no_wait) +static int drm_bo_mem_force_space(drm_device_t * dev, + drm_bo_mem_reg_t * mem, + uint32_t mem_type, int no_wait) { - drm_device_t *dev = bo->dev; drm_mm_node_t *node; drm_buffer_manager_t *bm = &dev->bm; drm_buffer_object_t *entry; - drm_mm_t *mm = &bm->manager[mem_type]; + drm_mem_type_manager_t *man = &bm->man[mem_type]; struct list_head *lru; - unsigned long size = bo->num_pages; + unsigned long num_pages = mem->num_pages; int ret; mutex_lock(&dev->struct_mutex); do { - node = drm_mm_search_free(mm, size, bo->page_alignment, 1); + node = drm_mm_search_free(&man->manager, num_pages, + mem->page_alignment, 1); if (node) break; - lru = &bm->lru[mem_type]; + lru = &man->lru; if (lru->next == lru) break; entry = list_entry(lru->next, drm_buffer_object_t, lru); - atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); mutex_lock(&entry->mutex); - BUG_ON(bo->flags & DRM_BO_FLAG_NO_MOVE); - ret = drm_bo_evict(entry, mem_type, no_wait, 0); + BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)); + + ret = drm_bo_evict(entry, mem_type, no_wait); mutex_unlock(&entry->mutex); drm_bo_usage_deref_unlocked(entry); if (ret) @@ -573,146 +715,163 @@ int drm_bo_alloc_space(drm_buffer_object_t * bo, unsigned mem_type, } while (1); if (!node) { - DRM_ERROR("Out of videoram / aperture space\n"); mutex_unlock(&dev->struct_mutex); return -ENOMEM; } - node = drm_mm_get_block(node, size, bo->page_alignment); + node = drm_mm_get_block(node, num_pages, mem->page_alignment); mutex_unlock(&dev->struct_mutex); - BUG_ON(!node); - node->private = (void *)bo; - - bo->mm_node = node; - bo->offset = node->start * PAGE_SIZE; + mem->mm_node = node; + mem->mem_type = mem_type; return 0; } -static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait) +static int drm_bo_mt_compatible(drm_mem_type_manager_t * man, + uint32_t mem_type, + uint32_t mask, uint32_t * res_mask) { - drm_device_t *dev = bo->dev; - drm_ttm_backend_t *be; - int ret; + uint32_t cur_flags = drm_bo_type_flags(mem_type); + uint32_t flag_diff; - if (!(bo->mm_node && (bo->flags & DRM_BO_FLAG_NO_MOVE))) { - BUG_ON(bo->mm_node); - ret = drm_bo_alloc_space(bo, DRM_BO_MEM_TT, no_wait); - if (ret) - return ret; - } + if (man->flags & _DRM_FLAG_MEMTYPE_CACHED) + cur_flags |= DRM_BO_FLAG_CACHED; + if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE) + cur_flags |= DRM_BO_FLAG_MAPPABLE; + if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT) + DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED); - DRM_DEBUG("Flipping in to AGP 0x%08lx\n", bo->mm_node->start); - - mutex_lock(&dev->struct_mutex); - ret = drm_bind_ttm(bo->ttm, bo->flags & DRM_BO_FLAG_BIND_CACHED, - bo->mm_node->start); - if (ret) { - drm_mm_put_block(bo->mm_node); - bo->mm_node = NULL; - } - mutex_unlock(&dev->struct_mutex); + if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0) + return 0; - if (ret) { - return ret; + if (mem_type == DRM_BO_MEM_LOCAL) { + *res_mask = cur_flags; + return 1; } - be = bo->ttm->be; - if (be->needs_ub_cache_adjust(be)) - bo->flags &= ~DRM_BO_FLAG_CACHED; - bo->flags &= ~DRM_BO_MASK_MEM; - bo->flags |= DRM_BO_FLAG_MEM_TT; + flag_diff = (mask ^ cur_flags); + if ((flag_diff & DRM_BO_FLAG_CACHED) && + (!(mask & DRM_BO_FLAG_CACHED) || + (mask & DRM_BO_FLAG_FORCE_CACHING))) + return 0; - if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) { - ret = dev->driver->bo_driver->invalidate_caches(dev, bo->flags); - if (ret) - DRM_ERROR("Could not flush read caches\n"); - } - DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_EVICTED); + if ((flag_diff & DRM_BO_FLAG_MAPPABLE) && + ((mask & DRM_BO_FLAG_MAPPABLE) || + (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) ) + return 0; - return 0; + *res_mask = cur_flags; + return 1; } -static int drm_bo_new_flags(drm_device_t * dev, - uint32_t flags, uint32_t new_mask, uint32_t hint, - int init, uint32_t * n_flags, uint32_t * n_mask) +int drm_bo_mem_space(drm_buffer_object_t * bo, + drm_bo_mem_reg_t * mem, int no_wait) { - uint32_t new_flags = 0; - uint32_t new_props; - drm_bo_driver_t *driver = dev->driver->bo_driver; + drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; - unsigned i; + drm_mem_type_manager_t *man; + + uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio; + const uint32_t *prios = dev->driver->bo_driver->mem_type_prio; + uint32_t i; + uint32_t mem_type = DRM_BO_MEM_LOCAL; + uint32_t cur_flags; + int type_found = 0; + int type_ok = 0; + int has_eagain = 0; + drm_mm_node_t *node = NULL; + int ret; - /* - * First adjust the mask to take away nonexistant memory types. - */ + mem->mm_node = NULL; + for (i = 0; i < num_prios; ++i) { + mem_type = prios[i]; + man = &bm->man[mem_type]; - for (i = 0; i < DRM_BO_MEM_TYPES; ++i) { - if (!bm->use_type[i]) - new_mask &= ~drm_bo_type_flags(i); - } + type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask, + &cur_flags); - if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) { - DRM_ERROR - ("DRM_BO_FLAG_NO_EVICT is only available to priviliged " - "processes\n"); - return -EPERM; - } - if (new_mask & DRM_BO_FLAG_BIND_CACHED) { - if (((new_mask & DRM_BO_FLAG_MEM_TT) && - !driver->cached[DRM_BO_MEM_TT]) && - ((new_mask & DRM_BO_FLAG_MEM_VRAM) - && !driver->cached[DRM_BO_MEM_VRAM])) { - new_mask &= ~DRM_BO_FLAG_BIND_CACHED; - } else { - if (!driver->cached[DRM_BO_MEM_TT]) - new_flags &= DRM_BO_FLAG_MEM_TT; - if (!driver->cached[DRM_BO_MEM_VRAM]) - new_flags &= DRM_BO_FLAG_MEM_VRAM; + if (!type_ok) + continue; + + if (mem_type == DRM_BO_MEM_LOCAL) + break; + + if ((mem_type == bo->pinned_mem_type) && + (bo->pinned_node != NULL)) { + node = bo->pinned_node; + break; } - } - if ((new_mask & DRM_BO_FLAG_READ_CACHED) && - !(new_mask & DRM_BO_FLAG_BIND_CACHED)) { - if ((new_mask & DRM_BO_FLAG_NO_EVICT) && - !(new_mask & DRM_BO_FLAG_MEM_LOCAL)) { - DRM_ERROR - ("Cannot read cached from a pinned VRAM / TT buffer\n"); - return -EINVAL; + mutex_lock(&dev->struct_mutex); + if (man->has_type && man->use_type) { + type_found = 1; + node = drm_mm_search_free(&man->manager, mem->num_pages, + mem->page_alignment, 1); + if (node) + node = drm_mm_get_block(node, mem->num_pages, + mem->page_alignment); } + mutex_unlock(&dev->struct_mutex); + if (node) + break; } - /* - * Determine new memory location: - */ + if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) { + mem->mm_node = node; + mem->mem_type = mem_type; + mem->flags = cur_flags; + return 0; + } - if (!(flags & new_mask & DRM_BO_MASK_MEM) || init) { + if (!type_found) + return -EINVAL; - new_flags = new_mask & DRM_BO_MASK_MEM; + num_prios = dev->driver->bo_driver->num_mem_busy_prio; + prios = dev->driver->bo_driver->mem_busy_prio; - if (!new_flags) { - DRM_ERROR("Invalid buffer object memory flags\n"); - return -EINVAL; - } + for (i = 0; i < num_prios; ++i) { + mem_type = prios[i]; + man = &bm->man[mem_type]; - if (new_flags & DRM_BO_FLAG_MEM_LOCAL) { - if ((hint & DRM_BO_HINT_AVOID_LOCAL) && - new_flags & (DRM_BO_FLAG_MEM_VRAM | - DRM_BO_FLAG_MEM_TT)) { - new_flags &= ~DRM_BO_FLAG_MEM_LOCAL; - } else { - new_flags = DRM_BO_FLAG_MEM_LOCAL; - } - } - if (new_flags & DRM_BO_FLAG_MEM_TT) { - if ((new_mask & DRM_BO_FLAG_PREFER_VRAM) && - new_flags & DRM_BO_FLAG_MEM_VRAM) { - new_flags = DRM_BO_FLAG_MEM_VRAM; - } else { - new_flags = DRM_BO_FLAG_MEM_TT; - } + if (!drm_bo_mt_compatible(man, mem_type, mem->mask, &cur_flags)) + continue; + + ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait); + + if (ret == 0) { + mem->flags = cur_flags; + return 0; } - } else { - new_flags = flags & DRM_BO_MASK_MEM; + + if (ret == -EAGAIN) + has_eagain = 1; + } + + ret = (has_eagain) ? -EAGAIN : -ENOMEM; + return ret; +} + +EXPORT_SYMBOL(drm_bo_mem_space); + +static int drm_bo_new_mask(drm_buffer_object_t * bo, + uint32_t new_mask, uint32_t hint) +{ + uint32_t new_props; + + if (bo->type == drm_bo_type_user) { + DRM_ERROR("User buffers are not supported yet\n"); + return -EINVAL; + } + if (bo->type == drm_bo_type_fake && + !(new_mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) { + DRM_ERROR("Fake buffers must be pinned.\n"); + return -EINVAL; + } + + if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) { + DRM_ERROR + ("DRM_BO_FLAG_NO_EVICT is only available to priviliged " + "processes\n"); + return -EPERM; } new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE | @@ -723,22 +882,7 @@ static int drm_bo_new_flags(drm_device_t * dev, return -EINVAL; } - new_flags |= new_mask & ~DRM_BO_MASK_MEM; - - if (((flags ^ new_flags) & DRM_BO_FLAG_BIND_CACHED) && - (new_flags & DRM_BO_FLAG_NO_EVICT) && - (flags & (DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MEM_VRAM))) { - if (!(flags & DRM_BO_FLAG_CACHED)) { - DRM_ERROR - ("Cannot change caching policy of pinned buffer\n"); - return -EINVAL; - } else { - new_flags &= ~DRM_BO_FLAG_CACHED; - } - } - - *n_flags = new_flags; - *n_mask = new_mask; + bo->mem.mask = new_mask; return 0; } @@ -825,8 +969,8 @@ static int drm_bo_read_cached(drm_buffer_object_t * bo) int ret = 0; BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); - if (bo->mm_node) - ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1, 0); + if (bo->mem.mm_node) + ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1); return ret; } @@ -916,21 +1060,15 @@ static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo, drm_bo_arg_reply_t * rep) { rep->handle = bo->base.hash.key; - rep->flags = bo->flags; - rep->size = bo->num_pages * PAGE_SIZE; + rep->flags = bo->mem.flags; + rep->size = bo->mem.num_pages * PAGE_SIZE; rep->offset = bo->offset; - - if (bo->ttm_object) { - rep->arg_handle = bo->ttm_object->map_list.user_token; - } else { - rep->arg_handle = 0; - } - - rep->mask = bo->mask; + rep->arg_handle = bo->map_list.user_token; + rep->mask = bo->mem.mask; rep->buffer_start = bo->buffer_start; rep->fence_flags = bo->fence_type; rep->rep_flags = 0; - rep->page_alignment = bo->page_alignment; + rep->page_alignment = bo->mem.page_alignment; if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) { DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY, @@ -988,14 +1126,14 @@ static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle, } if ((map_flags & DRM_BO_FLAG_READ) && - (bo->flags & DRM_BO_FLAG_READ_CACHED) && - (!(bo->flags & DRM_BO_FLAG_CACHED))) { + (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) && + (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) { drm_bo_read_cached(bo); } break; } else if ((map_flags & DRM_BO_FLAG_READ) && - (bo->flags & DRM_BO_FLAG_READ_CACHED) && - (!(bo->flags & DRM_BO_FLAG_CACHED))) { + (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) && + (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) { /* * We are already mapped with different flags. @@ -1078,46 +1216,124 @@ static void drm_buffer_user_object_unmap(drm_file_t * priv, /* * bo->mutex locked. + * Note that new_mem_flags are NOT transferred to the bo->mem.mask. */ -static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_flags, - int no_wait, int force_no_move) +int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, + int no_wait, int move_unfenced) { + drm_device_t *dev = bo->dev; + drm_buffer_manager_t *bm = &dev->bm; int ret = 0; - + drm_bo_mem_reg_t mem; /* * Flush outstanding fences. */ + drm_bo_busy(bo); /* - * Make sure we're not mapped. + * Wait for outstanding fences. */ - ret = drm_bo_wait_unmapped(bo, no_wait); + ret = drm_bo_wait(bo, 0, 0, no_wait); if (ret) return ret; + mem.num_pages = bo->mem.num_pages; + mem.size = mem.num_pages << PAGE_SHIFT; + mem.mask = new_mem_flags; + mem.page_alignment = bo->mem.page_alignment; + + mutex_lock(&bm->evict_mutex); + mutex_lock(&dev->struct_mutex); + list_del(&bo->lru); + list_add_tail(&bo->lru, &bm->unfenced); + DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, + _DRM_BO_FLAG_UNFENCED); + mutex_unlock(&dev->struct_mutex); + /* - * Wait for outstanding fences. + * Determine where to move the buffer. */ + ret = drm_bo_mem_space(bo, &mem, no_wait); + if (ret) + goto out_unlock; - ret = drm_bo_wait(bo, 0, 0, no_wait); + ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait); - if (ret == -EINTR) - return -EAGAIN; - if (ret) - return ret; + out_unlock: + if (ret || !move_unfenced) { + mutex_lock(&dev->struct_mutex); + if (mem.mm_node) { + if (mem.mm_node != bo->pinned_node) + drm_mm_put_block(mem.mm_node); + mem.mm_node = NULL; + } + DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); + DRM_WAKEUP(&bo->event_queue); + list_del(&bo->lru); + drm_bo_add_to_lru(bo); + mutex_unlock(&dev->struct_mutex); + } - if (new_flags & DRM_BO_FLAG_MEM_TT) { - ret = drm_move_local_to_tt(bo, no_wait); - if (ret) - return ret; - } else { - drm_move_tt_to_local(bo, 0, force_no_move); + mutex_unlock(&bm->evict_mutex); + return ret; +} + +static int drm_bo_mem_compat(drm_bo_mem_reg_t * mem) +{ + uint32_t flag_diff = (mem->mask ^ mem->flags); + + if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0) + return 0; + if ((flag_diff & DRM_BO_FLAG_CACHED) && + (!(mem->mask & DRM_BO_FLAG_CACHED) || + (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) { + return 0; + } + if ((flag_diff & DRM_BO_FLAG_MAPPABLE) && + ((mem->mask & DRM_BO_FLAG_MAPPABLE) || + (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE))) + return 0; + return 1; +} + +static int drm_bo_check_fake(drm_device_t * dev, drm_bo_mem_reg_t * mem) +{ + drm_buffer_manager_t *bm = &dev->bm; + drm_mem_type_manager_t *man; + uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio; + const uint32_t *prios = dev->driver->bo_driver->mem_type_prio; + uint32_t i; + int type_ok = 0; + uint32_t mem_type = 0; + uint32_t cur_flags; + + if (drm_bo_mem_compat(mem)) + return 0; + + BUG_ON(mem->mm_node); + + for (i = 0; i < num_prios; ++i) { + mem_type = prios[i]; + man = &bm->man[mem_type]; + type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask, + &cur_flags); + if (type_ok) + break; } - return 0; + if (type_ok) { + mem->mm_node = NULL; + mem->mem_type = mem_type; + mem->flags = cur_flags; + DRM_FLAG_MASKED(mem->flags, mem->mask, ~DRM_BO_MASK_MEMTYPE); + return 0; + } + + DRM_ERROR("Illegal fake buffer flags 0x%08x\n", mem->mask); + return -EINVAL; } /* @@ -1125,98 +1341,105 @@ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_flags, */ static int drm_buffer_object_validate(drm_buffer_object_t * bo, - uint32_t new_flags, int move_unfenced, int no_wait) { drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; - uint32_t flag_diff = (new_flags ^ bo->flags); drm_bo_driver_t *driver = dev->driver->bo_driver; - int ret; - if (new_flags & DRM_BO_FLAG_MEM_VRAM) { - DRM_ERROR("Vram support not implemented yet\n"); - return -EINVAL; - } - - DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", new_flags, bo->flags); - ret = driver->fence_type(new_flags, &bo->fence_class, &bo->fence_type); + DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", bo->mem.mask, + bo->mem.flags); + ret = + driver->fence_type(bo, &bo->fence_class, &bo->fence_type); if (ret) { DRM_ERROR("Driver did not support given buffer permissions\n"); return ret; } + ret = drm_bo_wait_unmapped(bo, no_wait); + if (ret) + return ret; + + if (bo->type == drm_bo_type_fake) { + ret = drm_bo_check_fake(dev, &bo->mem); + if (ret) + return ret; + } + /* - * Move out if we need to change caching policy. + * Check whether we need to move buffer. */ - if ((flag_diff & DRM_BO_FLAG_BIND_CACHED) && - !(bo->flags & DRM_BO_FLAG_MEM_LOCAL)) { - if (bo->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) { - DRM_ERROR("Cannot change caching policy of " - "pinned buffer.\n"); - return -EINVAL; - } - ret = drm_bo_move_buffer(bo, DRM_BO_FLAG_MEM_LOCAL, no_wait, 0); + if (!drm_bo_mem_compat(&bo->mem)) { + ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait, + move_unfenced); if (ret) { if (ret != -EAGAIN) DRM_ERROR("Failed moving buffer.\n"); return ret; } } - DRM_MASK_VAL(bo->flags, DRM_BO_FLAG_BIND_CACHED, new_flags); - flag_diff = (new_flags ^ bo->flags); /* - * Check whether we dropped no_move policy, and in that case, - * release reserved manager regions. + * Pinned buffers. */ - if ((flag_diff & DRM_BO_FLAG_NO_MOVE) && - !(new_flags & DRM_BO_FLAG_NO_MOVE)) { + if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) { + bo->pinned_mem_type = bo->mem.mem_type; mutex_lock(&dev->struct_mutex); - if (bo->mm_node) { - drm_mm_put_block(bo->mm_node); - bo->mm_node = NULL; + list_del_init(&bo->pinned_lru); + drm_bo_add_to_pinned_lru(bo); + + if (bo->pinned_node != bo->mem.mm_node) { + if (bo->pinned_node != NULL) + drm_mm_put_block(bo->pinned_node); + bo->pinned_node = bo->mem.mm_node; } + mutex_unlock(&dev->struct_mutex); + + } else if (bo->pinned_node != NULL) { + + mutex_lock(&dev->struct_mutex); + drm_mm_put_block(bo->pinned_node); + list_del_init(&bo->pinned_lru); + bo->pinned_node = NULL; + mutex_unlock(&dev->struct_mutex); + } /* - * Check whether we need to move buffer. + * We might need to add a TTM. */ - if ((bo->type != drm_bo_type_fake) && (flag_diff & DRM_BO_MASK_MEM)) { - ret = drm_bo_move_buffer(bo, new_flags, no_wait, 1); - if (ret) { - if (ret != -EAGAIN) - DRM_ERROR("Failed moving buffer.\n"); + if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) { + ret = drm_bo_add_ttm(bo); + if (ret) return ret; - } } + DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE); - if (move_unfenced) { - - /* - * Place on unfenced list. - */ + /* + * Finally, adjust lru to be sure. + */ + mutex_lock(&dev->struct_mutex); + list_del(&bo->lru); + if (move_unfenced) { + list_add_tail(&bo->lru, &bm->unfenced); DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, _DRM_BO_FLAG_UNFENCED); - mutex_lock(&dev->struct_mutex); - list_del(&bo->lru); - list_add_tail(&bo->lru, &bm->unfenced); - mutex_unlock(&dev->struct_mutex); } else { - - mutex_lock(&dev->struct_mutex); - list_del_init(&bo->lru); - drm_bo_add_to_lru(bo, bm); - mutex_unlock(&dev->struct_mutex); + drm_bo_add_to_lru(bo); + if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) { + DRM_WAKEUP(&bo->event_queue); + DRM_FLAG_MASKED(bo->priv_flags, 0, + _DRM_BO_FLAG_UNFENCED); + } } + mutex_unlock(&dev->struct_mutex); - bo->flags = new_flags; return 0; } @@ -1225,10 +1448,8 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, drm_bo_arg_reply_t * rep) { drm_buffer_object_t *bo; - drm_device_t *dev = priv->head->dev; int ret; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; - uint32_t new_flags; bo = drm_lookup_buffer_object(priv, handle, 1); if (!bo) { @@ -1241,22 +1462,20 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, if (ret) goto out; - ret = drm_bo_new_flags(dev, bo->flags, - (flags & mask) | (bo->mask & ~mask), hint, - 0, &new_flags, &bo->mask); - + DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask); + ret = drm_bo_new_mask(bo, flags, hint); if (ret) goto out; ret = - drm_buffer_object_validate(bo, new_flags, - !(hint & DRM_BO_HINT_DONT_FENCE), + drm_buffer_object_validate(bo, !(hint & DRM_BO_HINT_DONT_FENCE), no_wait); drm_bo_fill_rep_arg(bo, rep); out: mutex_unlock(&bo->mutex); + drm_bo_usage_deref_unlocked(bo); return ret; } @@ -1307,90 +1526,6 @@ static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle, return ret; } -/* - * Call bo->mutex locked. - */ - -static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo) -{ - drm_device_t *dev = bo->dev; - drm_ttm_object_t *to = NULL; - int ret = 0; - uint32_t ttm_flags = 0; - - bo->ttm_object = NULL; - bo->ttm = NULL; - - switch (bo->type) { - case drm_bo_type_dc: - mutex_lock(&dev->struct_mutex); - ret = drm_ttm_object_create(dev, bo->num_pages * PAGE_SIZE, - ttm_flags, &to); - mutex_unlock(&dev->struct_mutex); - break; - case drm_bo_type_user: - case drm_bo_type_fake: - break; - default: - DRM_ERROR("Illegal buffer object type\n"); - ret = -EINVAL; - break; - } - - if (ret) { - return ret; - } - - if (to) { - bo->ttm_object = to; - bo->ttm = drm_ttm_from_object(to); - } - return ret; -} - -/* - * Transfer a buffer object's memory and LRU status to a newly - * created object. User-space references remains with the old - * object. Call bo->mutex locked. - */ - -int drm_buffer_object_transfer(drm_buffer_object_t *bo, - drm_buffer_object_t **new_obj) -{ - drm_buffer_object_t *fbo; - drm_device_t *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; - - fbo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ); - if (!fbo) - return -ENOMEM; - - *fbo = *bo; - mutex_init(&fbo->mutex); - mutex_lock(&fbo->mutex); - mutex_lock(&dev->struct_mutex); - - INIT_LIST_HEAD(&fbo->ddestroy); - INIT_LIST_HEAD(&fbo->lru); - list_splice_init(&bo->lru, &fbo->lru); - - bo->mm_node = NULL; - bo->ttm = NULL; - bo->ttm_object = NULL; - bo->fence = NULL; - bo->flags = 0; - - fbo->mm_node->private = (void *)fbo; - atomic_set(&fbo->usage, 1); - atomic_inc(&bm->count); - mutex_unlock(&dev->struct_mutex); - mutex_unlock(&fbo->mutex); - - *new_obj = fbo; - return 0; -} - - int drm_buffer_object_create(drm_file_t * priv, unsigned long size, drm_bo_type_t type, @@ -1404,7 +1539,6 @@ int drm_buffer_object_create(drm_file_t * priv, drm_buffer_manager_t *bm = &dev->bm; drm_buffer_object_t *bo; int ret = 0; - uint32_t new_flags; unsigned long num_pages; if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) { @@ -1429,12 +1563,18 @@ int drm_buffer_object_create(drm_file_t * priv, atomic_set(&bo->mapped, -1); DRM_INIT_WAITQUEUE(&bo->event_queue); INIT_LIST_HEAD(&bo->lru); + INIT_LIST_HEAD(&bo->pinned_lru); INIT_LIST_HEAD(&bo->ddestroy); +#ifdef DRM_ODD_MM_COMPAT + INIT_LIST_HEAD(&bo->p_mm_list); + INIT_LIST_HEAD(&bo->vma_list); +#endif bo->dev = dev; bo->type = type; - bo->num_pages = num_pages; - bo->mm_node = NULL; - bo->page_alignment = page_alignment; + bo->mem.mem_type = DRM_BO_MEM_LOCAL; + bo->mem.num_pages = num_pages; + bo->mem.mm_node = NULL; + bo->mem.page_alignment = page_alignment; if (bo->type == drm_bo_type_fake) { bo->offset = buffer_start; bo->buffer_start = 0; @@ -1442,18 +1582,22 @@ int drm_buffer_object_create(drm_file_t * priv, bo->buffer_start = buffer_start; } bo->priv_flags = 0; - bo->flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED; + bo->mem.flags = 0; + bo->mem.mask = 0; atomic_inc(&bm->count); - ret = drm_bo_new_flags(dev, bo->flags, mask, hint, - 1, &new_flags, &bo->mask); - if (ret) - goto out_err; - ret = drm_bo_add_ttm(priv, bo); + ret = drm_bo_new_mask(bo, mask, hint); + if (ret) goto out_err; - ret = drm_buffer_object_validate(bo, new_flags, 0, - hint & DRM_BO_HINT_DONT_BLOCK); + if (bo->type == drm_bo_type_dc) { + mutex_lock(&dev->struct_mutex); + ret = drm_bo_setup_vm_locked(bo); + mutex_unlock(&dev->struct_mutex); + if (ret) + goto out_err; + } + ret = drm_buffer_object_validate(bo, 0, hint & DRM_BO_HINT_DONT_BLOCK); if (ret) goto out_err; @@ -1463,6 +1607,7 @@ int drm_buffer_object_create(drm_file_t * priv, out_err: mutex_unlock(&bo->mutex); + drm_bo_usage_deref_unlocked(bo); return ret; } @@ -1630,111 +1775,174 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS) return 0; } +/** + *Clean the unfenced list and put on regular LRU. + *This is part of the memory manager cleanup and should only be + *called with the DRI lock held. + *Call dev->struct_sem locked. + */ + +static void drm_bo_clean_unfenced(drm_device_t *dev) +{ + drm_buffer_manager_t *bm = &dev->bm; + struct list_head *head, *list; + drm_buffer_object_t *entry; + + head = &bm->unfenced; + + list = head->next; + while(list != head) { + prefetch(list->next); + entry = list_entry(list, drm_buffer_object_t, lru); + + atomic_inc(&entry->usage); + mutex_unlock(&dev->struct_mutex); + mutex_lock(&entry->mutex); + mutex_lock(&dev->struct_mutex); + + list_del(&entry->lru); + DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); + drm_bo_add_to_lru(entry); + mutex_unlock(&entry->mutex); + list = head->next; + } +} + +static int drm_bo_leave_list(drm_buffer_object_t * bo, + uint32_t mem_type, + int free_pinned, int allow_errors) +{ + drm_device_t *dev = bo->dev; + int ret = 0; + + mutex_lock(&bo->mutex); + + ret = drm_bo_expire_fence(bo, allow_errors); + if (ret) + goto out; + + if (free_pinned) { + DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE); + mutex_lock(&dev->struct_mutex); + list_del_init(&bo->pinned_lru); + if (bo->pinned_node == bo->mem.mm_node) + bo->pinned_node = NULL; + if (bo->pinned_node != NULL) { + drm_mm_put_block(bo->pinned_node); + bo->pinned_node = NULL; + } + mutex_unlock(&dev->struct_mutex); + } + + if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) { + DRM_ERROR("A DRM_BO_NO_EVICT buffer present at " + "cleanup. Removing flag and evicting.\n"); + bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT; + bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT; + } + + if (bo->mem.mem_type == mem_type) + ret = drm_bo_evict(bo, mem_type, 0); + + if (ret) { + if (allow_errors) { + goto out; + } else { + ret = 0; + DRM_ERROR("Cleanup eviction failed\n"); + } + } + + out: + mutex_unlock(&bo->mutex); + return ret; +} + + +static drm_buffer_object_t *drm_bo_entry(struct list_head *list, + int pinned_list) +{ + if (pinned_list) + return list_entry(list, drm_buffer_object_t, pinned_lru); + else + return list_entry(list, drm_buffer_object_t, lru); +} + /* - * dev->struct_sem locked. + * dev->struct_mutex locked. */ static int drm_bo_force_list_clean(drm_device_t * dev, struct list_head *head, unsigned mem_type, - int force_no_move, int allow_errors) + int free_pinned, + int allow_errors, + int pinned_list) { - drm_buffer_manager_t *bm = &dev->bm; struct list_head *list, *next, *prev; - drm_buffer_object_t *entry; + drm_buffer_object_t *entry, *nentry; int ret; - int clean; + int do_restart; - retry: - clean = 1; + /* + * The list traversal is a bit odd here, because an item may + * disappear from the list when we release the struct_mutex or + * when we decrease the usage count. Also we're not guaranteed + * to drain pinned lists, so we can't always restart. + */ + +restart: + nentry = NULL; list_for_each_safe(list, next, head) { prev = list->prev; - entry = list_entry(list, drm_buffer_object_t, lru); + + entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list); atomic_inc(&entry->usage); - mutex_unlock(&dev->struct_mutex); - mutex_lock(&entry->mutex); - mutex_lock(&dev->struct_mutex); + if (nentry) { + atomic_dec(&nentry->usage); + nentry = NULL; + } - if (prev != list->prev || next != list->next) { - mutex_unlock(&entry->mutex); - drm_bo_usage_deref_locked(entry); - goto retry; + /* + * Protect the next item from destruction, so we can check + * its list pointers later on. + */ + + if (next != head) { + nentry = drm_bo_entry(next, pinned_list); + atomic_inc(&nentry->usage); } - if (entry->mm_node) { - clean = 0; + mutex_unlock(&dev->struct_mutex); - /* - * Expire the fence. - */ + ret = drm_bo_leave_list(entry, mem_type, free_pinned, + allow_errors); + mutex_lock(&dev->struct_mutex); - mutex_unlock(&dev->struct_mutex); - if (entry->fence && bm->nice_mode) { - unsigned long _end = jiffies + 3 * DRM_HZ; - do { - ret = drm_bo_wait(entry, 0, 1, 0); - if (ret && allow_errors) { - if (ret == -EINTR) - ret = -EAGAIN; - goto out_err; - } - } while (ret && !time_after_eq(jiffies, _end)); - - if (entry->fence) { - bm->nice_mode = 0; - DRM_ERROR("Detected GPU hang or " - "fence manager was taken down. " - "Evicting waiting buffers\n"); - } - } - if (entry->fence) { - drm_fence_usage_deref_unlocked(dev, - entry->fence); - entry->fence = NULL; - } + drm_bo_usage_deref_locked(entry); + if (ret) + return ret; - DRM_MASK_VAL(entry->priv_flags, _DRM_BO_FLAG_UNFENCED, - 0); + /* + * Has the next item disappeared from the list? + */ - if (force_no_move) { - DRM_MASK_VAL(entry->flags, DRM_BO_FLAG_NO_MOVE, - 0); - } - if (entry->flags & DRM_BO_FLAG_NO_EVICT) { - DRM_ERROR("A DRM_BO_NO_EVICT buffer present at " - "cleanup. Removing flag and evicting.\n"); - entry->flags &= ~DRM_BO_FLAG_NO_EVICT; - entry->mask &= ~DRM_BO_FLAG_NO_EVICT; - } + do_restart = ((next->prev != list) && (next->prev != prev)); - ret = drm_bo_evict(entry, mem_type, 1, force_no_move); - if (ret) { - if (allow_errors) { - goto out_err; - } else { - DRM_ERROR("Aargh. Eviction failed.\n"); - } - } - mutex_lock(&dev->struct_mutex); - } - mutex_unlock(&entry->mutex); - drm_bo_usage_deref_locked(entry); - if (prev != list->prev || next != list->next) { - goto retry; + if (nentry != NULL && do_restart) { + drm_bo_usage_deref_locked(nentry); + nentry = NULL; } + + if (do_restart) + goto restart; } - if (!clean) - goto retry; return 0; - out_err: - mutex_unlock(&entry->mutex); - drm_bo_usage_deref_unlocked(entry); - mutex_lock(&dev->struct_mutex); - return ret; } int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type) { drm_buffer_manager_t *bm = &dev->bm; + drm_mem_type_manager_t *man = &bm->man[mem_type]; int ret = -EINVAL; if (mem_type >= DRM_BO_MEM_TYPES) { @@ -1742,36 +1950,23 @@ int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type) return ret; } - if (!bm->has_type[mem_type]) { + if (!man->has_type) { DRM_ERROR("Trying to take down uninitialized " "memory manager type\n"); return ret; } - bm->use_type[mem_type] = 0; - bm->has_type[mem_type] = 0; + man->use_type = 0; + man->has_type = 0; ret = 0; if (mem_type > 0) { - /* - * Throw out unfenced buffers. - */ - - drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 1, 0); + drm_bo_clean_unfenced(dev); + drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0); + drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1); - /* - * Throw out evicted no-move buffers. - */ - - drm_bo_force_list_clean(dev, &bm->pinned[DRM_BO_MEM_LOCAL], - mem_type, 1, 0); - drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 1, - 0); - drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 1, - 0); - - if (drm_mm_clean(&bm->manager[mem_type])) { - drm_mm_takedown(&bm->manager[mem_type]); + if (drm_mm_clean(&man->manager)) { + drm_mm_takedown(&man->manager); } else { ret = -EBUSY; } @@ -1780,62 +1975,75 @@ int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type) return ret; } +/** + *Evict all buffers of a particular mem_type, but leave memory manager + *regions for NO_MOVE buffers intact. New buffers cannot be added at this + *point since we have the hardware lock. + */ + static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type) { int ret; drm_buffer_manager_t *bm = &dev->bm; + drm_mem_type_manager_t *man = &bm->man[mem_type]; if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) { DRM_ERROR("Illegal memory manager memory type %u,\n", mem_type); return -EINVAL; } - ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1); + drm_bo_clean_unfenced(dev); + ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0); if (ret) return ret; - ret = drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 0, 1); - if (ret) - return ret; - ret = - drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 0, 1); + ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1); + return ret; } -static int drm_bo_init_mm(drm_device_t * dev, - unsigned type, - unsigned long p_offset, unsigned long p_size) +int drm_bo_init_mm(drm_device_t * dev, + unsigned type, + unsigned long p_offset, unsigned long p_size) { drm_buffer_manager_t *bm = &dev->bm; int ret = -EINVAL; + drm_mem_type_manager_t *man; if (type >= DRM_BO_MEM_TYPES) { DRM_ERROR("Illegal memory type %d\n", type); return ret; } - if (bm->has_type[type]) { + + man = &bm->man[type]; + if (man->has_type) { DRM_ERROR("Memory manager already initialized for type %d\n", type); return ret; } + ret = dev->driver->bo_driver->init_mem_type(dev, type, man); + if (ret) + return ret; + ret = 0; if (type != DRM_BO_MEM_LOCAL) { if (!p_size) { DRM_ERROR("Zero size memory manager type %d\n", type); return ret; } - ret = drm_mm_init(&bm->manager[type], p_offset, p_size); + ret = drm_mm_init(&man->manager, p_offset, p_size); if (ret) return ret; } - bm->has_type[type] = 1; - bm->use_type[type] = 1; + man->has_type = 1; + man->use_type = 1; - INIT_LIST_HEAD(&bm->lru[type]); - INIT_LIST_HEAD(&bm->pinned[type]); + INIT_LIST_HEAD(&man->lru); + INIT_LIST_HEAD(&man->pinned); return 0; } +EXPORT_SYMBOL(drm_bo_init_mm); /* * This is called from lastclose, so we don't need to bother about @@ -1847,6 +2055,7 @@ int drm_bo_driver_finish(drm_device_t * dev) drm_buffer_manager_t *bm = &dev->bm; int ret = 0; unsigned i = DRM_BO_MEM_TYPES; + drm_mem_type_manager_t *man; mutex_lock(&dev->bm.init_mutex); mutex_lock(&dev->struct_mutex); @@ -1856,17 +2065,19 @@ int drm_bo_driver_finish(drm_device_t * dev) bm->initialized = 0; while (i--) { - if (bm->has_type[i]) { - bm->use_type[i] = 0; + man = &bm->man[i]; + if (man->has_type) { + man->use_type = 0; if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) { ret = -EBUSY; DRM_ERROR("DRM memory manager type %d " "is not clean.\n", i); } - bm->has_type[i] = 0; + man->has_type = 0; } } mutex_unlock(&dev->struct_mutex); + if (!cancel_delayed_work(&bm->wq)) { flush_scheduled_work(); } @@ -1875,10 +2086,10 @@ int drm_bo_driver_finish(drm_device_t * dev) if (list_empty(&bm->ddestroy)) { DRM_DEBUG("Delayed destroy list was clean\n"); } - if (list_empty(&bm->lru[0])) { + if (list_empty(&bm->man[0].lru)) { DRM_DEBUG("Swap list was clean\n"); } - if (list_empty(&bm->pinned[0])) { + if (list_empty(&bm->man[0].pinned)) { DRM_DEBUG("NO_MOVE list was clean\n"); } if (list_empty(&bm->unfenced)) { @@ -2006,3 +2217,144 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS) DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return 0; } + +/* + * buffer object vm functions. + */ + +int drm_mem_reg_is_pci(drm_device_t * dev, drm_bo_mem_reg_t * mem) +{ + drm_buffer_manager_t *bm = &dev->bm; + drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; + + if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { + if (mem->mem_type == DRM_BO_MEM_LOCAL) + return 0; + + if (man->flags & _DRM_FLAG_MEMTYPE_CMA) + return 0; + + if (mem->flags & DRM_BO_FLAG_CACHED) + return 0; + } + return 1; +} + +EXPORT_SYMBOL(drm_mem_reg_is_pci); + +/** + * \c Get the PCI offset for the buffer object memory. + * + * \param bo The buffer object. + * \param bus_base On return the base of the PCI region + * \param bus_offset On return the byte offset into the PCI region + * \param bus_size On return the byte size of the buffer object or zero if + * the buffer object memory is not accessible through a PCI region. + * \return Failure indication. + * + * Returns -EINVAL if the buffer object is currently not mappable. + * Otherwise returns zero. + */ + +int drm_bo_pci_offset(drm_device_t * dev, + drm_bo_mem_reg_t * mem, + unsigned long *bus_base, + unsigned long *bus_offset, unsigned long *bus_size) +{ + drm_buffer_manager_t *bm = &dev->bm; + drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; + + *bus_size = 0; + if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)) + return -EINVAL; + + if (drm_mem_reg_is_pci(dev, mem)) { + *bus_offset = mem->mm_node->start << PAGE_SHIFT; + *bus_size = mem->num_pages << PAGE_SHIFT; + *bus_base = man->io_offset; + } + + return 0; +} + +/** + * \c Kill all user-space virtual mappings of this buffer object. + * + * \param bo The buffer object. + * + * Call bo->mutex locked. + */ + +void drm_bo_unmap_virtual(drm_buffer_object_t * bo) +{ + drm_device_t *dev = bo->dev; + loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT; + loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT; + + unmap_mapping_range(dev->dev_mapping, offset, holelen, 1); +} + +static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo) +{ + drm_map_list_t *list = &bo->map_list; + drm_local_map_t *map; + drm_device_t *dev = bo->dev; + + if (list->user_token) { + drm_ht_remove_item(&dev->map_hash, &list->hash); + list->user_token = 0; + } + if (list->file_offset_node) { + drm_mm_put_block(list->file_offset_node); + list->file_offset_node = NULL; + } + + map = list->map; + if (!map) + return; + + drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ); + list->map = NULL; + list->user_token = 0ULL; + drm_bo_usage_deref_locked(bo); +} + +static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo) +{ + drm_map_list_t *list = &bo->map_list; + drm_local_map_t *map; + drm_device_t *dev = bo->dev; + + list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ); + if (!list->map) + return -ENOMEM; + + map = list->map; + map->offset = 0; + map->type = _DRM_TTM; + map->flags = _DRM_REMOVABLE; + map->size = bo->mem.num_pages * PAGE_SIZE; + atomic_inc(&bo->usage); + map->handle = (void *)bo; + + list->file_offset_node = drm_mm_search_free(&dev->offset_manager, + bo->mem.num_pages, 0, 0); + + if (!list->file_offset_node) { + drm_bo_takedown_vm_locked(bo); + return -ENOMEM; + } + + list->file_offset_node = drm_mm_get_block(list->file_offset_node, + bo->mem.num_pages, 0); + + list->hash.key = list->file_offset_node->start; + if (drm_ht_insert_item(&dev->map_hash, &list->hash)) { + drm_bo_takedown_vm_locked(bo); + return -ENOMEM; + } + + list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT; + + return 0; +} |