From af24465b2eddfcc5296edc830ea5ed86065a4abd Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 7 Feb 2007 12:52:23 +0100 Subject: Fix a stray unlock_kernel() in drm_vm.c Add a file for memory move helpers, drm_bo_move.c Implement generic memory move. Cached, no_move and unmapped memory temporarily broken. --- linux-core/drm_bo_move.c | 75 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) create mode 100644 linux-core/drm_bo_move.c (limited to 'linux-core/drm_bo_move.c') diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c new file mode 100644 index 00000000..abfa8f80 --- /dev/null +++ b/linux-core/drm_bo_move.c @@ -0,0 +1,75 @@ +/************************************************************************** + * + * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellström + */ + +#include "drmP.h" + +int drm_bo_move_ttm(drm_device_t *dev, + drm_ttm_t *ttm, + int evict, + int no_wait, + drm_bo_mem_reg_t *old_mem, + drm_bo_mem_reg_t *new_mem) +{ + uint32_t save_flags = old_mem->flags; + uint32_t save_mask = old_mem->mask; + int ret; + + if (old_mem->mem_type == DRM_BO_MEM_TT) { + + if (evict) + drm_ttm_evict(ttm); + else + drm_ttm_unbind(ttm); + + mutex_lock(&dev->struct_mutex); + drm_mm_put_block(old_mem->mm_node); + mutex_unlock(&dev->struct_mutex); + save_flags |= DRM_BO_FLAG_CACHED; + + } else { + + ret = drm_bind_ttm(ttm, + new_mem->flags & DRM_BO_FLAG_BIND_CACHED, + new_mem->mm_node->start); + if (ret) + return ret; + + if (!(new_mem->flags & DRM_BO_FLAG_BIND_CACHED)) { + save_flags &= ~DRM_BO_FLAG_CACHED; + } + + } + + *old_mem = *new_mem; + new_mem->mm_node = NULL; + old_mem->mask = save_mask; + DRM_MASK_VAL(save_flags, new_mem->flags, DRM_BO_MASK_MEM); + return 0; +} -- cgit v1.2.3 From c1fbd8a56653b91af57a408bbcf20a760a2bd8c8 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 7 Feb 2007 17:25:13 +0100 Subject: Checkpoint commit. Flag handling and memory type selection cleanup. glxgears won't start. --- linux-core/drm_bo_move.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) (limited to 'linux-core/drm_bo_move.c') diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index abfa8f80..b4486bfe 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -50,26 +50,25 @@ int drm_bo_move_ttm(drm_device_t *dev, mutex_lock(&dev->struct_mutex); drm_mm_put_block(old_mem->mm_node); + old_mem->mm_node = NULL; mutex_unlock(&dev->struct_mutex); - save_flags |= DRM_BO_FLAG_CACHED; - - } else { - + DRM_FLAG_MASKED(old_mem->flags, + DRM_BO_FLAG_CACHED | DRM_BO_FLAG_MAPPABLE | + DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEMTYPE); + old_mem->mem_type = DRM_BO_MEM_LOCAL; + save_flags = old_mem->flags; + } + if (new_mem->mem_type != DRM_BO_MEM_LOCAL) { ret = drm_bind_ttm(ttm, - new_mem->flags & DRM_BO_FLAG_BIND_CACHED, + new_mem->flags & DRM_BO_FLAG_CACHED, new_mem->mm_node->start); if (ret) return ret; - - if (!(new_mem->flags & DRM_BO_FLAG_BIND_CACHED)) { - save_flags &= ~DRM_BO_FLAG_CACHED; - } - } *old_mem = *new_mem; new_mem->mm_node = NULL; old_mem->mask = save_mask; - DRM_MASK_VAL(save_flags, new_mem->flags, DRM_BO_MASK_MEM); + DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); return 0; } -- cgit v1.2.3 From 1257907fa9a24de7aa95485e1b3ab509fdc4d4e6 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 8 Feb 2007 13:29:08 +0100 Subject: Simplify external ttm page allocation. Implement a memcpy fallback for copying between buffers. --- linux-core/drm_bo_move.c | 177 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 177 insertions(+) (limited to 'linux-core/drm_bo_move.c') diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index b4486bfe..23e8c0f2 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -72,3 +72,180 @@ int drm_bo_move_ttm(drm_device_t *dev, DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); return 0; } + + +/** + * \c Return a kernel virtual address to the buffer object PCI memory. + * + * \param bo The buffer object. + * \return Failure indication. + * + * Returns -EINVAL if the buffer object is currently not mappable. + * Returns -ENOMEM if the ioremap operation failed. + * Otherwise returns zero. + * + * After a successfull call, bo->iomap contains the virtual address, or NULL + * if the buffer object content is not accessible through PCI space. + * Call bo->mutex locked. + */ + + +int drm_mem_reg_ioremap(drm_device_t *dev, drm_bo_mem_reg_t *mem, void **virtual) +{ + drm_buffer_manager_t *bm = &dev->bm; + drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; + unsigned long bus_offset; + unsigned long bus_size; + unsigned long bus_base; + int ret; + void *addr; + + *virtual = NULL; + ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, &bus_size); + if (ret || bus_size == 0) + return ret; + + if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) + addr = (void *) (((u8 *)man->io_addr) + bus_offset); + else { + addr = ioremap_nocache(bus_base + bus_offset, bus_size); + if (!addr) + return -ENOMEM; + } + *virtual = addr; + return 0; +} + + +/** + * \c Unmap mapping obtained using drm_bo_ioremap + * + * \param bo The buffer object. + * + * Call bo->mutex locked. + */ + +void drm_mem_reg_iounmap(drm_device_t *dev, drm_bo_mem_reg_t *mem, + void *virtual) +{ + drm_buffer_manager_t *bm; + drm_mem_type_manager_t *man; + + + bm = &dev->bm; + man = &bm->man[mem->mem_type]; + + if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) + iounmap(virtual); +} + + +static int drm_copy_io_page(void *dst, void *src, unsigned long page) +{ + uint32_t *dstP = (uint32_t *)((unsigned long) dst + (page << PAGE_SHIFT)); + uint32_t *srcP = (uint32_t *)((unsigned long) src + (page << PAGE_SHIFT)); + + int i; + for (i=0; i < PAGE_SIZE / sizeof(uint32_t); ++i) + iowrite32(ioread32(srcP++), dstP++); + return 0; +} + +static int drm_copy_io_ttm_page(drm_ttm_t *ttm, void *src, unsigned long page) +{ + struct page *d = drm_ttm_get_page(ttm, page); + void *dst; + + if (!d) + return -ENOMEM; + + src = (void *)((unsigned long) src + (page << PAGE_SHIFT)); + dst = kmap(d); + if (!dst) + return -ENOMEM; + + memcpy_fromio(dst, src, PAGE_SIZE); + kunmap(dst); + return 0; +} + +static int drm_copy_ttm_io_page(drm_ttm_t *ttm, void *dst, unsigned long page) +{ + struct page *s = drm_ttm_get_page(ttm, page); + void *src; + + if (!s) + return -ENOMEM; + + dst = (void *)((unsigned long) dst + (page << PAGE_SHIFT)); + src = kmap(s); + if (!src) + return -ENOMEM; + + memcpy_toio(dst, src, PAGE_SIZE); + kunmap(src); + return 0; +} + + +int drm_bo_move_memcpy(drm_device_t *dev, + drm_ttm_t *ttm, + int evict, + int no_wait, + drm_bo_mem_reg_t *old_mem, + drm_bo_mem_reg_t *new_mem) +{ + void *old_iomap; + void *new_iomap; + int ret; + uint32_t save_flags = old_mem->flags; + uint32_t save_mask = old_mem->mask; + unsigned long i; + unsigned long page; + unsigned long add = 0; + int dir; + + + ret = drm_mem_reg_ioremap(dev, old_mem, &old_iomap); + if (ret) + return ret; + ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap); + if (ret) + goto out; + + if (old_iomap == NULL && new_iomap == NULL) + goto out2; + + add = 0; + dir = 1; + + if ((old_mem->mem_type == new_mem->mem_type) && + (new_mem->mm_node->start < + old_mem->mm_node->start + old_mem->mm_node->size)) { + dir = -1; + add = new_mem->num_pages - 1; + } + + for (i=0; i < new_mem->num_pages; ++i) { + page = i*dir + add; + if (old_iomap == NULL) + ret = drm_copy_ttm_io_page(ttm, new_iomap, page); + else if (new_iomap == NULL) + ret = drm_copy_io_ttm_page(ttm, old_iomap, page); + else + ret = drm_copy_io_page(new_iomap, old_iomap, page); + if (ret) + goto out1; + } + +out2: + *old_mem = *new_mem; + new_mem->mm_node = NULL; + old_mem->mask = save_mask; + DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); +out1: + drm_mem_reg_iounmap(dev, new_mem, &new_iomap); +out: + drm_mem_reg_iounmap(dev, old_mem, old_iomap); + return ret; +} -- cgit v1.2.3 From e4b2da440699f581a8779ea8cb9e99e4c903e6a7 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 8 Feb 2007 16:21:38 +0100 Subject: A minor function interface change and some memcpy bugfixing. Hooray!! it sort of works with a fixed AGP area as faked VRAM. --- linux-core/drm_bo_move.c | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) (limited to 'linux-core/drm_bo_move.c') diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 23e8c0f2..b7a49299 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -30,13 +30,14 @@ #include "drmP.h" -int drm_bo_move_ttm(drm_device_t *dev, - drm_ttm_t *ttm, +int drm_bo_move_ttm(drm_buffer_object_t *bo, int evict, int no_wait, - drm_bo_mem_reg_t *old_mem, drm_bo_mem_reg_t *new_mem) { + drm_device_t *dev = bo->dev; + drm_ttm_t *ttm = bo->ttm; + drm_bo_mem_reg_t *old_mem = &bo->mem; uint32_t save_flags = old_mem->flags; uint32_t save_mask = old_mem->mask; int ret; @@ -135,8 +136,9 @@ void drm_mem_reg_iounmap(drm_device_t *dev, drm_bo_mem_reg_t *mem, bm = &dev->bm; man = &bm->man[mem->mem_type]; - if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) + if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) { iounmap(virtual); + } } @@ -188,13 +190,16 @@ static int drm_copy_ttm_io_page(drm_ttm_t *ttm, void *dst, unsigned long page) } -int drm_bo_move_memcpy(drm_device_t *dev, - drm_ttm_t *ttm, +int drm_bo_move_memcpy(drm_buffer_object_t *bo, int evict, int no_wait, - drm_bo_mem_reg_t *old_mem, drm_bo_mem_reg_t *new_mem) { + drm_device_t *dev = bo->dev; + drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type]; + drm_ttm_t *ttm = bo->ttm; + drm_bo_mem_reg_t *old_mem = &bo->mem; + drm_bo_mem_reg_t old_copy = *old_mem; void *old_iomap; void *new_iomap; int ret; @@ -205,7 +210,6 @@ int drm_bo_move_memcpy(drm_device_t *dev, unsigned long add = 0; int dir; - ret = drm_mem_reg_ioremap(dev, old_mem, &old_iomap); if (ret) return ret; @@ -237,15 +241,22 @@ int drm_bo_move_memcpy(drm_device_t *dev, if (ret) goto out1; } - + mb(); out2: *old_mem = *new_mem; new_mem->mm_node = NULL; old_mem->mask = save_mask; DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); + + if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) { + drm_ttm_unbind(ttm); + drm_destroy_ttm(ttm); + bo->ttm = NULL; + } + out1: - drm_mem_reg_iounmap(dev, new_mem, &new_iomap); + drm_mem_reg_iounmap(dev, new_mem, new_iomap); out: - drm_mem_reg_iounmap(dev, old_mem, old_iomap); + drm_mem_reg_iounmap(dev, &old_copy, old_iomap); return ret; } -- cgit v1.2.3 From bf8f46d4c64eb5b66814223f7e5ddb8d8e7a555e Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 8 Feb 2007 18:59:02 +0100 Subject: Fix mm_block leak. Some other minor fixes. --- linux-core/drm_bo_move.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'linux-core/drm_bo_move.c') diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index b7a49299..4ed3392d 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -243,6 +243,11 @@ int drm_bo_move_memcpy(drm_buffer_object_t *bo, } mb(); out2: + if (old_mem->mm_node) { + mutex_lock(&dev->struct_mutex); + drm_mm_put_block(old_mem->mm_node); + mutex_unlock(&dev->struct_mutex); + } *old_mem = *new_mem; new_mem->mm_node = NULL; old_mem->mask = save_mask; -- cgit v1.2.3 From a0ed808d05a7965366e329a6e8f4e538350b9c23 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 8 Feb 2007 19:06:39 +0100 Subject: Don't create a ttm just to copy from. --- linux-core/drm_bo_move.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'linux-core/drm_bo_move.c') diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 4ed3392d..9bfb3ef1 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -219,6 +219,8 @@ int drm_bo_move_memcpy(drm_buffer_object_t *bo, if (old_iomap == NULL && new_iomap == NULL) goto out2; + if (old_iomap == NULL && ttm == NULL) + goto out2; add = 0; dir = 1; -- cgit v1.2.3 From b2bcbf874b0f26ca0c490fb0453bef64ce6d9dd7 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 8 Feb 2007 21:28:33 +0100 Subject: Add an accelerated buffer copy cleanup helper. Export helper functions and make some important buffer-object functions non-static. Add an i915 accelerated blit buffer move for pci memory buffers. --- linux-core/drm_bo_move.c | 119 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 119 insertions(+) (limited to 'linux-core/drm_bo_move.c') diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 9bfb3ef1..d2c44501 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -73,6 +73,7 @@ int drm_bo_move_ttm(drm_buffer_object_t *bo, DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); return 0; } +EXPORT_SYMBOL(drm_bo_move_ttm); /** @@ -267,3 +268,121 @@ out: drm_mem_reg_iounmap(dev, &old_copy, old_iomap); return ret; } +EXPORT_SYMBOL(drm_bo_move_memcpy); + +/* + * Transfer a buffer object's memory and LRU status to a newly + * created object. User-space references remains with the old + * object. Call bo->mutex locked. + */ + +int drm_buffer_object_transfer(drm_buffer_object_t *bo, + drm_buffer_object_t **new_obj) +{ + drm_buffer_object_t *fbo; + drm_device_t *dev = bo->dev; + drm_buffer_manager_t *bm = &dev->bm; + + fbo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ); + if (!fbo) + return -ENOMEM; + + *fbo = *bo; + mutex_init(&fbo->mutex); + mutex_lock(&fbo->mutex); + mutex_lock(&dev->struct_mutex); + + INIT_LIST_HEAD(&fbo->ddestroy); + INIT_LIST_HEAD(&fbo->lru); + + bo->mem.mm_node = NULL; + bo->ttm = NULL; + atomic_inc(&bo->fence->usage); + bo->mem.flags = 0; + + fbo->mem.mm_node->private = (void *)fbo; + atomic_set(&fbo->usage, 1); + atomic_inc(&bm->count); + mutex_unlock(&dev->struct_mutex); + mutex_unlock(&fbo->mutex); + + *new_obj = fbo; + return 0; +} + +int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, + int evict, + int no_wait, + uint32_t fence_type, + uint32_t fence_flags, + drm_bo_mem_reg_t *new_mem) +{ + drm_device_t *dev = bo->dev; + drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type]; + drm_bo_mem_reg_t *old_mem = &bo->mem; + int ret; + uint32_t save_flags = old_mem->flags; + uint32_t save_mask = old_mem->mask; + drm_buffer_object_t *old_obj; + + if (bo->fence) + drm_fence_usage_deref_unlocked(dev, bo->fence); + + ret = drm_fence_object_create(dev, fence_type, + fence_flags | DRM_FENCE_FLAG_EMIT, + &bo->fence); + if (ret) + return ret; + + if (evict) { + ret = drm_bo_wait(bo, 0, 1, 0); + if (ret) + return ret; + if (old_mem->mm_node) { + mutex_lock(&dev->struct_mutex); + drm_mm_put_block(old_mem->mm_node); + old_mem->mm_node = NULL; + mutex_unlock(&dev->struct_mutex); + } + if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && + (bo->ttm != NULL)) { + drm_ttm_unbind(bo->ttm); + drm_destroy_ttm(bo->ttm); + bo->ttm = NULL; + } + } else { + + /* This should help pipeline ordinary buffer moves. + * + * Hang old buffer memory on a new buffer object, + * and leave it to be released when the blit + * operation has completed. + */ + + ret = drm_buffer_object_transfer(bo, &old_obj); + if (ret) + return ret; + + if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) + old_obj->ttm = NULL; + else + bo->ttm = NULL; + + atomic_inc(&old_obj->fence->usage); + mutex_lock(&dev->struct_mutex); + list_del(&old_obj->lru); + drm_bo_add_to_lru(old_obj, &old_obj->dev->bm); + drm_bo_usage_deref_locked(old_obj); + mutex_unlock(&dev->struct_mutex); + + } + + *old_mem = *new_mem; + new_mem->mm_node = NULL; + old_mem->mask = save_mask; + DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); + return 0; +} +EXPORT_SYMBOL(drm_bo_move_accel_cleanup); + + -- cgit v1.2.3 From 6a49d9a8abd9f168211017c2d585d0d64e89c530 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 9 Feb 2007 00:02:02 +0100 Subject: Fix evict_mutex locking range. Implement unmappable buffers. (fault moves them to mappable when needed). Various bugfixes. --- linux-core/drm_bo_move.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'linux-core/drm_bo_move.c') diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index d2c44501..53f7fea8 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -295,10 +295,7 @@ int drm_buffer_object_transfer(drm_buffer_object_t *bo, INIT_LIST_HEAD(&fbo->ddestroy); INIT_LIST_HEAD(&fbo->lru); - bo->mem.mm_node = NULL; - bo->ttm = NULL; atomic_inc(&bo->fence->usage); - bo->mem.flags = 0; fbo->mem.mm_node->private = (void *)fbo; atomic_set(&fbo->usage, 1); @@ -355,7 +352,7 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, /* This should help pipeline ordinary buffer moves. * * Hang old buffer memory on a new buffer object, - * and leave it to be released when the blit + * and leave it to be released when the GPU * operation has completed. */ -- cgit v1.2.3 From 99acdaee482fc8a2fc6718317e2f546401e93739 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 9 Feb 2007 00:07:29 +0100 Subject: Fix copyright statements. --- linux-core/drm_bo_move.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'linux-core/drm_bo_move.c') diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 53f7fea8..e1340205 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA + * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a -- cgit v1.2.3 From 53aee3122a1821b8ca24ed2bc5c1940cb0f2ff8e Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 9 Feb 2007 16:36:53 +0100 Subject: I915 accelerated blit copy functional. Fixed - to System memory copies are implemented by flipping in a cache-coherent TTM, blitting to it, and then flipping it out. --- linux-core/drm_bo_move.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'linux-core/drm_bo_move.c') diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index e1340205..d712a70f 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -333,8 +333,10 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, if (evict) { ret = drm_bo_wait(bo, 0, 1, 0); - if (ret) + if (ret) { + DRM_ERROR("Wait failure\n"); return ret; + } if (old_mem->mm_node) { mutex_lock(&dev->struct_mutex); drm_mm_put_block(old_mem->mm_node); -- cgit v1.2.3 From 85ee2a8d044cd4d8de4894a794151af9471648e3 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Sat, 10 Feb 2007 12:06:36 +0100 Subject: Various bugfixes. --- linux-core/drm_bo_move.c | 40 ++++++++++++++++++++++++++++++---------- 1 file changed, 30 insertions(+), 10 deletions(-) (limited to 'linux-core/drm_bo_move.c') diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index d712a70f..3347f945 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -168,7 +168,7 @@ static int drm_copy_io_ttm_page(drm_ttm_t *ttm, void *src, unsigned long page) return -ENOMEM; memcpy_fromio(dst, src, PAGE_SIZE); - kunmap(dst); + kunmap(d); return 0; } @@ -186,7 +186,7 @@ static int drm_copy_ttm_io_page(drm_ttm_t *ttm, void *dst, unsigned long page) return -ENOMEM; memcpy_toio(dst, src, PAGE_SIZE); - kunmap(src); + kunmap(s); return 0; } @@ -283,7 +283,7 @@ int drm_buffer_object_transfer(drm_buffer_object_t *bo, drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; - fbo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ); + fbo = drm_ctl_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ); if (!fbo) return -ENOMEM; @@ -292,11 +292,15 @@ int drm_buffer_object_transfer(drm_buffer_object_t *bo, mutex_lock(&fbo->mutex); mutex_lock(&dev->struct_mutex); + DRM_INIT_WAITQUEUE(&bo->event_queue); INIT_LIST_HEAD(&fbo->ddestroy); INIT_LIST_HEAD(&fbo->lru); +#ifdef DRM_ODD_MM_COMPAT + INIT_LIST_HEAD(&fbo->vma_list); + INIT_LIST_HEAD(&fbo->p_mm_list); +#endif atomic_inc(&bo->fence->usage); - fbo->mem.mm_node->private = (void *)fbo; atomic_set(&fbo->usage, 1); atomic_inc(&bm->count); @@ -307,6 +311,11 @@ int drm_buffer_object_transfer(drm_buffer_object_t *bo, return 0; } +/* + * Since move is underway, we need to block signals in this function. + * We cannot restart until it has finished. + */ + int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, int evict, int no_wait, @@ -324,19 +333,29 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, if (bo->fence) drm_fence_usage_deref_unlocked(dev, bo->fence); - ret = drm_fence_object_create(dev, fence_type, fence_flags | DRM_FENCE_FLAG_EMIT, &bo->fence); if (ret) return ret; - if (evict) { +#ifdef DRM_ODD_MM_COMPAT + /* + * In this mode, we don't allow pipelining a copy blit, + * since the buffer will be accessible from user space + * the moment we return and rebuild the page tables. + * + * With normal vm operation, page tables are rebuilt + * on demand using fault(), which waits for buffer idle. + */ + if (1) +#else + if (evict) +#endif + { ret = drm_bo_wait(bo, 0, 1, 0); - if (ret) { - DRM_ERROR("Wait failure\n"); + if (ret) return ret; - } if (old_mem->mm_node) { mutex_lock(&dev->struct_mutex); drm_mm_put_block(old_mem->mm_node); @@ -359,6 +378,7 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, */ ret = drm_buffer_object_transfer(bo, &old_obj); + if (ret) return ret; @@ -367,9 +387,9 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, else bo->ttm = NULL; - atomic_inc(&old_obj->fence->usage); mutex_lock(&dev->struct_mutex); list_del(&old_obj->lru); + DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); drm_bo_add_to_lru(old_obj, &old_obj->dev->bm); drm_bo_usage_deref_locked(old_obj); mutex_unlock(&dev->struct_mutex); -- cgit v1.2.3 From f02f83ee08a2bb87700544a9b67f475532e84af4 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Mon, 12 Feb 2007 17:47:57 +0100 Subject: Cleanup and fix support for pinned buffers. --- linux-core/drm_bo_move.c | 53 ++++++++++++++++++++++++++++++++---------------- 1 file changed, 35 insertions(+), 18 deletions(-) (limited to 'linux-core/drm_bo_move.c') diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 3347f945..c6fe4ec2 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -30,12 +30,31 @@ #include "drmP.h" + +/** + * Free the old memory node unless it's a pinned region and we + * have not been requested to free also pinned regions. + */ + +static void drm_bo_free_old_node(drm_buffer_object_t *bo) +{ + drm_bo_mem_reg_t *old_mem = &bo->mem; + + if (old_mem->mm_node && + (old_mem->mm_node != bo->pinned_node)) { + mutex_lock(&bo->dev->struct_mutex); + drm_mm_put_block(old_mem->mm_node); + old_mem->mm_node = NULL; + mutex_unlock(&bo->dev->struct_mutex); + } + old_mem->mm_node = NULL; +} + int drm_bo_move_ttm(drm_buffer_object_t *bo, int evict, int no_wait, drm_bo_mem_reg_t *new_mem) { - drm_device_t *dev = bo->dev; drm_ttm_t *ttm = bo->ttm; drm_bo_mem_reg_t *old_mem = &bo->mem; uint32_t save_flags = old_mem->flags; @@ -49,10 +68,7 @@ int drm_bo_move_ttm(drm_buffer_object_t *bo, else drm_ttm_unbind(ttm); - mutex_lock(&dev->struct_mutex); - drm_mm_put_block(old_mem->mm_node); - old_mem->mm_node = NULL; - mutex_unlock(&dev->struct_mutex); + drm_bo_free_old_node(bo); DRM_FLAG_MASKED(old_mem->flags, DRM_BO_FLAG_CACHED | DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEMTYPE); @@ -246,11 +262,8 @@ int drm_bo_move_memcpy(drm_buffer_object_t *bo, } mb(); out2: - if (old_mem->mm_node) { - mutex_lock(&dev->struct_mutex); - drm_mm_put_block(old_mem->mm_node); - mutex_unlock(&dev->struct_mutex); - } + drm_bo_free_old_node(bo); + *old_mem = *new_mem; new_mem->mm_node = NULL; old_mem->mask = save_mask; @@ -295,12 +308,14 @@ int drm_buffer_object_transfer(drm_buffer_object_t *bo, DRM_INIT_WAITQUEUE(&bo->event_queue); INIT_LIST_HEAD(&fbo->ddestroy); INIT_LIST_HEAD(&fbo->lru); + INIT_LIST_HEAD(&fbo->pinned_lru); #ifdef DRM_ODD_MM_COMPAT INIT_LIST_HEAD(&fbo->vma_list); INIT_LIST_HEAD(&fbo->p_mm_list); #endif atomic_inc(&bo->fence->usage); + fbo->pinned_node = NULL; fbo->mem.mm_node->private = (void *)fbo; atomic_set(&fbo->usage, 1); atomic_inc(&bm->count); @@ -356,12 +371,9 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, ret = drm_bo_wait(bo, 0, 1, 0); if (ret) return ret; - if (old_mem->mm_node) { - mutex_lock(&dev->struct_mutex); - drm_mm_put_block(old_mem->mm_node); - old_mem->mm_node = NULL; - mutex_unlock(&dev->struct_mutex); - } + + drm_bo_free_old_node(bo); + if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) { drm_ttm_unbind(bo->ttm); @@ -388,9 +400,14 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, bo->ttm = NULL; mutex_lock(&dev->struct_mutex); - list_del(&old_obj->lru); + list_del_init(&old_obj->lru); DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); - drm_bo_add_to_lru(old_obj, &old_obj->dev->bm); + + if (old_obj->mem.mm_node == bo->pinned_node) + old_obj->mem.mm_node = NULL; + else + drm_bo_add_to_lru(old_obj); + drm_bo_usage_deref_locked(old_obj); mutex_unlock(&dev->struct_mutex); -- cgit v1.2.3 From b0c5339ed69c6ff08b7817f870e895aae2ef04c7 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Mon, 12 Feb 2007 20:32:03 +0100 Subject: More bugfixes. --- linux-core/drm_bo_move.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'linux-core/drm_bo_move.c') diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index c6fe4ec2..1d142087 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -365,7 +365,8 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, */ if (1) #else - if (evict) + if (evict || ((bo->mem.mm_node == bo->pinned_node) && + bo->mem.mm_node != NULL)) #endif { ret = drm_bo_wait(bo, 0, 1, 0); @@ -402,11 +403,7 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, mutex_lock(&dev->struct_mutex); list_del_init(&old_obj->lru); DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); - - if (old_obj->mem.mm_node == bo->pinned_node) - old_obj->mem.mm_node = NULL; - else - drm_bo_add_to_lru(old_obj); + drm_bo_add_to_lru(old_obj); drm_bo_usage_deref_locked(old_obj); mutex_unlock(&dev->struct_mutex); -- cgit v1.2.3 From 398913dc0e632c71e3095a7d50dae911aed18884 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Mon, 12 Feb 2007 20:34:50 +0100 Subject: Lindent. --- linux-core/drm_bo_move.c | 122 ++++++++++++++++++++++------------------------- 1 file changed, 56 insertions(+), 66 deletions(-) (limited to 'linux-core/drm_bo_move.c') diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 1d142087..7e195125 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -30,18 +30,16 @@ #include "drmP.h" - /** * Free the old memory node unless it's a pinned region and we * have not been requested to free also pinned regions. */ -static void drm_bo_free_old_node(drm_buffer_object_t *bo) +static void drm_bo_free_old_node(drm_buffer_object_t * bo) { drm_bo_mem_reg_t *old_mem = &bo->mem; - if (old_mem->mm_node && - (old_mem->mm_node != bo->pinned_node)) { + if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) { mutex_lock(&bo->dev->struct_mutex); drm_mm_put_block(old_mem->mm_node); old_mem->mm_node = NULL; @@ -50,10 +48,8 @@ static void drm_bo_free_old_node(drm_buffer_object_t *bo) old_mem->mm_node = NULL; } -int drm_bo_move_ttm(drm_buffer_object_t *bo, - int evict, - int no_wait, - drm_bo_mem_reg_t *new_mem) +int drm_bo_move_ttm(drm_buffer_object_t * bo, + int evict, int no_wait, drm_bo_mem_reg_t * new_mem) { drm_ttm_t *ttm = bo->ttm; drm_bo_mem_reg_t *old_mem = &bo->mem; @@ -69,15 +65,15 @@ int drm_bo_move_ttm(drm_buffer_object_t *bo, drm_ttm_unbind(ttm); drm_bo_free_old_node(bo); - DRM_FLAG_MASKED(old_mem->flags, - DRM_BO_FLAG_CACHED | DRM_BO_FLAG_MAPPABLE | - DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEMTYPE); + DRM_FLAG_MASKED(old_mem->flags, + DRM_BO_FLAG_CACHED | DRM_BO_FLAG_MAPPABLE | + DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEMTYPE); old_mem->mem_type = DRM_BO_MEM_LOCAL; save_flags = old_mem->flags; - } + } if (new_mem->mem_type != DRM_BO_MEM_LOCAL) { - ret = drm_bind_ttm(ttm, - new_mem->flags & DRM_BO_FLAG_CACHED, + ret = drm_bind_ttm(ttm, + new_mem->flags & DRM_BO_FLAG_CACHED, new_mem->mm_node->start); if (ret) return ret; @@ -89,8 +85,8 @@ int drm_bo_move_ttm(drm_buffer_object_t *bo, DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); return 0; } -EXPORT_SYMBOL(drm_bo_move_ttm); +EXPORT_SYMBOL(drm_bo_move_ttm); /** * \c Return a kernel virtual address to the buffer object PCI memory. @@ -107,11 +103,11 @@ EXPORT_SYMBOL(drm_bo_move_ttm); * Call bo->mutex locked. */ - -int drm_mem_reg_ioremap(drm_device_t *dev, drm_bo_mem_reg_t *mem, void **virtual) +int drm_mem_reg_ioremap(drm_device_t * dev, drm_bo_mem_reg_t * mem, + void **virtual) { drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; + drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; unsigned long bus_offset; unsigned long bus_size; unsigned long bus_base; @@ -120,11 +116,11 @@ int drm_mem_reg_ioremap(drm_device_t *dev, drm_bo_mem_reg_t *mem, void **virtual *virtual = NULL; ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, &bus_size); - if (ret || bus_size == 0) + if (ret || bus_size == 0) return ret; if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) - addr = (void *) (((u8 *)man->io_addr) + bus_offset); + addr = (void *)(((u8 *) man->io_addr) + bus_offset); else { addr = ioremap_nocache(bus_base + bus_offset, bus_size); if (!addr) @@ -134,7 +130,6 @@ int drm_mem_reg_ioremap(drm_device_t *dev, drm_bo_mem_reg_t *mem, void **virtual return 0; } - /** * \c Unmap mapping obtained using drm_bo_ioremap * @@ -143,34 +138,34 @@ int drm_mem_reg_ioremap(drm_device_t *dev, drm_bo_mem_reg_t *mem, void **virtual * Call bo->mutex locked. */ -void drm_mem_reg_iounmap(drm_device_t *dev, drm_bo_mem_reg_t *mem, +void drm_mem_reg_iounmap(drm_device_t * dev, drm_bo_mem_reg_t * mem, void *virtual) { - drm_buffer_manager_t *bm; - drm_mem_type_manager_t *man; - + drm_buffer_manager_t *bm; + drm_mem_type_manager_t *man; bm = &dev->bm; man = &bm->man[mem->mem_type]; - + if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) { iounmap(virtual); } } - static int drm_copy_io_page(void *dst, void *src, unsigned long page) { - uint32_t *dstP = (uint32_t *)((unsigned long) dst + (page << PAGE_SHIFT)); - uint32_t *srcP = (uint32_t *)((unsigned long) src + (page << PAGE_SHIFT)); + uint32_t *dstP = + (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT)); + uint32_t *srcP = + (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT)); int i; - for (i=0; i < PAGE_SIZE / sizeof(uint32_t); ++i) + for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i) iowrite32(ioread32(srcP++), dstP++); return 0; } -static int drm_copy_io_ttm_page(drm_ttm_t *ttm, void *src, unsigned long page) +static int drm_copy_io_ttm_page(drm_ttm_t * ttm, void *src, unsigned long page) { struct page *d = drm_ttm_get_page(ttm, page); void *dst; @@ -178,7 +173,7 @@ static int drm_copy_io_ttm_page(drm_ttm_t *ttm, void *src, unsigned long page) if (!d) return -ENOMEM; - src = (void *)((unsigned long) src + (page << PAGE_SHIFT)); + src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); dst = kmap(d); if (!dst) return -ENOMEM; @@ -188,7 +183,7 @@ static int drm_copy_io_ttm_page(drm_ttm_t *ttm, void *src, unsigned long page) return 0; } -static int drm_copy_ttm_io_page(drm_ttm_t *ttm, void *dst, unsigned long page) +static int drm_copy_ttm_io_page(drm_ttm_t * ttm, void *dst, unsigned long page) { struct page *s = drm_ttm_get_page(ttm, page); void *src; @@ -196,7 +191,7 @@ static int drm_copy_ttm_io_page(drm_ttm_t *ttm, void *dst, unsigned long page) if (!s) return -ENOMEM; - dst = (void *)((unsigned long) dst + (page << PAGE_SHIFT)); + dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); src = kmap(s); if (!src) return -ENOMEM; @@ -206,11 +201,8 @@ static int drm_copy_ttm_io_page(drm_ttm_t *ttm, void *dst, unsigned long page) return 0; } - -int drm_bo_move_memcpy(drm_buffer_object_t *bo, - int evict, - int no_wait, - drm_bo_mem_reg_t *new_mem) +int drm_bo_move_memcpy(drm_buffer_object_t * bo, + int evict, int no_wait, drm_bo_mem_reg_t * new_mem) { drm_device_t *dev = bo->dev; drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type]; @@ -226,42 +218,42 @@ int drm_bo_move_memcpy(drm_buffer_object_t *bo, unsigned long page; unsigned long add = 0; int dir; - + ret = drm_mem_reg_ioremap(dev, old_mem, &old_iomap); if (ret) return ret; ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap); - if (ret) + if (ret) goto out; if (old_iomap == NULL && new_iomap == NULL) goto out2; if (old_iomap == NULL && ttm == NULL) goto out2; - + add = 0; dir = 1; - if ((old_mem->mem_type == new_mem->mem_type) && - (new_mem->mm_node->start < - old_mem->mm_node->start + old_mem->mm_node->size)) { + if ((old_mem->mem_type == new_mem->mem_type) && + (new_mem->mm_node->start < + old_mem->mm_node->start + old_mem->mm_node->size)) { dir = -1; add = new_mem->num_pages - 1; } - for (i=0; i < new_mem->num_pages; ++i) { - page = i*dir + add; - if (old_iomap == NULL) + for (i = 0; i < new_mem->num_pages; ++i) { + page = i * dir + add; + if (old_iomap == NULL) ret = drm_copy_ttm_io_page(ttm, new_iomap, page); else if (new_iomap == NULL) ret = drm_copy_io_ttm_page(ttm, old_iomap, page); - else + else ret = drm_copy_io_page(new_iomap, old_iomap, page); if (ret) goto out1; } mb(); -out2: + out2: drm_bo_free_old_node(bo); *old_mem = *new_mem; @@ -275,12 +267,13 @@ out2: bo->ttm = NULL; } -out1: + out1: drm_mem_reg_iounmap(dev, new_mem, new_iomap); -out: + out: drm_mem_reg_iounmap(dev, &old_copy, old_iomap); return ret; } + EXPORT_SYMBOL(drm_bo_move_memcpy); /* @@ -289,8 +282,8 @@ EXPORT_SYMBOL(drm_bo_move_memcpy); * object. Call bo->mutex locked. */ -int drm_buffer_object_transfer(drm_buffer_object_t *bo, - drm_buffer_object_t **new_obj) +int drm_buffer_object_transfer(drm_buffer_object_t * bo, + drm_buffer_object_t ** new_obj) { drm_buffer_object_t *fbo; drm_device_t *dev = bo->dev; @@ -299,7 +292,7 @@ int drm_buffer_object_transfer(drm_buffer_object_t *bo, fbo = drm_ctl_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ); if (!fbo) return -ENOMEM; - + *fbo = *bo; mutex_init(&fbo->mutex); mutex_lock(&fbo->mutex); @@ -331,12 +324,11 @@ int drm_buffer_object_transfer(drm_buffer_object_t *bo, * We cannot restart until it has finished. */ -int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, +int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo, int evict, int no_wait, uint32_t fence_type, - uint32_t fence_flags, - drm_bo_mem_reg_t *new_mem) + uint32_t fence_flags, drm_bo_mem_reg_t * new_mem) { drm_device_t *dev = bo->dev; drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type]; @@ -345,7 +337,7 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, uint32_t save_flags = old_mem->flags; uint32_t save_mask = old_mem->mask; drm_buffer_object_t *old_obj; - + if (bo->fence) drm_fence_usage_deref_unlocked(dev, bo->fence); ret = drm_fence_object_create(dev, fence_type, @@ -365,18 +357,17 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, */ if (1) #else - if (evict || ((bo->mem.mm_node == bo->pinned_node) && + if (evict || ((bo->mem.mm_node == bo->pinned_node) && bo->mem.mm_node != NULL)) #endif { ret = drm_bo_wait(bo, 0, 1, 0); - if (ret) + if (ret) return ret; drm_bo_free_old_node(bo); - if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && - (bo->ttm != NULL)) { + if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) { drm_ttm_unbind(bo->ttm); drm_destroy_ttm(bo->ttm); bo->ttm = NULL; @@ -404,7 +395,7 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, list_del_init(&old_obj->lru); DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); drm_bo_add_to_lru(old_obj); - + drm_bo_usage_deref_locked(old_obj); mutex_unlock(&dev->struct_mutex); @@ -416,6 +407,5 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo, DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE); return 0; } -EXPORT_SYMBOL(drm_bo_move_accel_cleanup); - +EXPORT_SYMBOL(drm_bo_move_accel_cleanup); -- cgit v1.2.3 From 9efdae317ce01cea95f75855b175243ae858fde4 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 13 Feb 2007 20:05:32 +0100 Subject: More bugfixes. Fixed memory, pinned buffers and unmappable memory now seems fully functional. --- linux-core/drm_bo_move.c | 1 - 1 file changed, 1 deletion(-) (limited to 'linux-core/drm_bo_move.c') diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 7e195125..21f5f6cc 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -58,7 +58,6 @@ int drm_bo_move_ttm(drm_buffer_object_t * bo, int ret; if (old_mem->mem_type == DRM_BO_MEM_TT) { - if (evict) drm_ttm_evict(ttm); else -- cgit v1.2.3 From 7766378d97323de375687285f2e125008b79045d Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 15 Feb 2007 12:10:33 +0100 Subject: Initial support for fence object classes. (Fence objects belonging to different command submission mechanisms). --- linux-core/drm_bo_move.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'linux-core/drm_bo_move.c') diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 21f5f6cc..4f752065 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -326,6 +326,7 @@ int drm_buffer_object_transfer(drm_buffer_object_t * bo, int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo, int evict, int no_wait, + uint32_t fence_class, uint32_t fence_type, uint32_t fence_flags, drm_bo_mem_reg_t * new_mem) { @@ -339,7 +340,7 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo, if (bo->fence) drm_fence_usage_deref_unlocked(dev, bo->fence); - ret = drm_fence_object_create(dev, fence_type, + ret = drm_fence_object_create(dev, fence_class, fence_type, fence_flags | DRM_FENCE_FLAG_EMIT, &bo->fence); if (ret) -- cgit v1.2.3