summaryrefslogtreecommitdiff
path: root/linux-core/drm_bo_move.c
diff options
context:
space:
mode:
authorThomas Hellstrom <thomas-at-tungstengraphics-dot-com>2007-02-08 13:29:08 +0100
committerThomas Hellstrom <thomas-at-tungstengraphics-dot-com>2007-02-08 13:29:08 +0100
commit1257907fa9a24de7aa95485e1b3ab509fdc4d4e6 (patch)
treea73a3d646917f4df5192bfe3bb23805e33d583fd /linux-core/drm_bo_move.c
parent09984ad77bdeca0e9d87b1fe2be1489205fda297 (diff)
Simplify external ttm page allocation.
Implement a memcpy fallback for copying between buffers.
Diffstat (limited to 'linux-core/drm_bo_move.c')
-rw-r--r--linux-core/drm_bo_move.c177
1 files changed, 177 insertions, 0 deletions
diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c
index b4486bfe..23e8c0f2 100644
--- a/linux-core/drm_bo_move.c
+++ b/linux-core/drm_bo_move.c
@@ -72,3 +72,180 @@ int drm_bo_move_ttm(drm_device_t *dev,
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
return 0;
}
+
+
+/**
+ * \c Return a kernel virtual address to the buffer object PCI memory.
+ *
+ * \param bo The buffer object.
+ * \return Failure indication.
+ *
+ * Returns -EINVAL if the buffer object is currently not mappable.
+ * Returns -ENOMEM if the ioremap operation failed.
+ * Otherwise returns zero.
+ *
+ * After a successfull call, bo->iomap contains the virtual address, or NULL
+ * if the buffer object content is not accessible through PCI space.
+ * Call bo->mutex locked.
+ */
+
+
+int drm_mem_reg_ioremap(drm_device_t *dev, drm_bo_mem_reg_t *mem, void **virtual)
+{
+ drm_buffer_manager_t *bm = &dev->bm;
+ drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
+ unsigned long bus_offset;
+ unsigned long bus_size;
+ unsigned long bus_base;
+ int ret;
+ void *addr;
+
+ *virtual = NULL;
+ ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, &bus_size);
+ if (ret || bus_size == 0)
+ return ret;
+
+ if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP))
+ addr = (void *) (((u8 *)man->io_addr) + bus_offset);
+ else {
+ addr = ioremap_nocache(bus_base + bus_offset, bus_size);
+ if (!addr)
+ return -ENOMEM;
+ }
+ *virtual = addr;
+ return 0;
+}
+
+
+/**
+ * \c Unmap mapping obtained using drm_bo_ioremap
+ *
+ * \param bo The buffer object.
+ *
+ * Call bo->mutex locked.
+ */
+
+void drm_mem_reg_iounmap(drm_device_t *dev, drm_bo_mem_reg_t *mem,
+ void *virtual)
+{
+ drm_buffer_manager_t *bm;
+ drm_mem_type_manager_t *man;
+
+
+ bm = &dev->bm;
+ man = &bm->man[mem->mem_type];
+
+ if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP))
+ iounmap(virtual);
+}
+
+
+static int drm_copy_io_page(void *dst, void *src, unsigned long page)
+{
+ uint32_t *dstP = (uint32_t *)((unsigned long) dst + (page << PAGE_SHIFT));
+ uint32_t *srcP = (uint32_t *)((unsigned long) src + (page << PAGE_SHIFT));
+
+ int i;
+ for (i=0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
+ iowrite32(ioread32(srcP++), dstP++);
+ return 0;
+}
+
+static int drm_copy_io_ttm_page(drm_ttm_t *ttm, void *src, unsigned long page)
+{
+ struct page *d = drm_ttm_get_page(ttm, page);
+ void *dst;
+
+ if (!d)
+ return -ENOMEM;
+
+ src = (void *)((unsigned long) src + (page << PAGE_SHIFT));
+ dst = kmap(d);
+ if (!dst)
+ return -ENOMEM;
+
+ memcpy_fromio(dst, src, PAGE_SIZE);
+ kunmap(dst);
+ return 0;
+}
+
+static int drm_copy_ttm_io_page(drm_ttm_t *ttm, void *dst, unsigned long page)
+{
+ struct page *s = drm_ttm_get_page(ttm, page);
+ void *src;
+
+ if (!s)
+ return -ENOMEM;
+
+ dst = (void *)((unsigned long) dst + (page << PAGE_SHIFT));
+ src = kmap(s);
+ if (!src)
+ return -ENOMEM;
+
+ memcpy_toio(dst, src, PAGE_SIZE);
+ kunmap(src);
+ return 0;
+}
+
+
+int drm_bo_move_memcpy(drm_device_t *dev,
+ drm_ttm_t *ttm,
+ int evict,
+ int no_wait,
+ drm_bo_mem_reg_t *old_mem,
+ drm_bo_mem_reg_t *new_mem)
+{
+ void *old_iomap;
+ void *new_iomap;
+ int ret;
+ uint32_t save_flags = old_mem->flags;
+ uint32_t save_mask = old_mem->mask;
+ unsigned long i;
+ unsigned long page;
+ unsigned long add = 0;
+ int dir;
+
+
+ ret = drm_mem_reg_ioremap(dev, old_mem, &old_iomap);
+ if (ret)
+ return ret;
+ ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap);
+ if (ret)
+ goto out;
+
+ if (old_iomap == NULL && new_iomap == NULL)
+ goto out2;
+
+ add = 0;
+ dir = 1;
+
+ if ((old_mem->mem_type == new_mem->mem_type) &&
+ (new_mem->mm_node->start <
+ old_mem->mm_node->start + old_mem->mm_node->size)) {
+ dir = -1;
+ add = new_mem->num_pages - 1;
+ }
+
+ for (i=0; i < new_mem->num_pages; ++i) {
+ page = i*dir + add;
+ if (old_iomap == NULL)
+ ret = drm_copy_ttm_io_page(ttm, new_iomap, page);
+ else if (new_iomap == NULL)
+ ret = drm_copy_io_ttm_page(ttm, old_iomap, page);
+ else
+ ret = drm_copy_io_page(new_iomap, old_iomap, page);
+ if (ret)
+ goto out1;
+ }
+
+out2:
+ *old_mem = *new_mem;
+ new_mem->mm_node = NULL;
+ old_mem->mask = save_mask;
+ DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
+out1:
+ drm_mem_reg_iounmap(dev, new_mem, &new_iomap);
+out:
+ drm_mem_reg_iounmap(dev, old_mem, old_iomap);
+ return ret;
+}