summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Anholt <eric@anholt.net>2008-05-01 15:22:21 -0700
committerEric Anholt <eric@anholt.net>2008-05-01 15:22:21 -0700
commitccd1bae0f676490a88240c62f02e072d2cf3b030 (patch)
treed74b3bc831796e174dd8059ad7bad056be2d96a5
parent5af87acbc2025b9f72d51b30f176e9c3909695ac (diff)
checkpoint: relocations support.
-rw-r--r--linux-core/drm_agpsupport.c8
-rw-r--r--linux-core/drm_gem.c4
-rw-r--r--linux-core/drm_memory.c2
-rw-r--r--linux-core/drm_memrange.c3
-rw-r--r--linux-core/i915_gem.c104
-rw-r--r--shared-core/i915_drm.h2
6 files changed, 98 insertions, 25 deletions
diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c
index b37d6d9d..15400386 100644
--- a/linux-core/drm_agpsupport.c
+++ b/linux-core/drm_agpsupport.c
@@ -493,9 +493,9 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
*/
DRM_AGP_MEM *
drm_agp_bind_pages(struct drm_device *dev,
- struct page **pages,
- unsigned long num_pages,
- uint32_t gtt_offset)
+ struct page **pages,
+ unsigned long num_pages,
+ uint32_t gtt_offset)
{
struct page **cur_page, **last_page = pages + num_pages;
DRM_AGP_MEM *mem;
@@ -531,7 +531,7 @@ drm_agp_bind_pages(struct drm_device *dev,
return mem;
}
-
+EXPORT_SYMBOL(drm_agp_bind_pages);
/*
* AGP ttm backend interface.
diff --git a/linux-core/drm_gem.c b/linux-core/drm_gem.c
index def526f0..09c0fe85 100644
--- a/linux-core/drm_gem.c
+++ b/linux-core/drm_gem.c
@@ -148,7 +148,7 @@ drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
return obj;
}
-
+EXPORT_SYMBOL(drm_gem_object_lookup);
/**
* Allocates a new mm object and returns a handle to it.
@@ -354,6 +354,7 @@ drm_gem_object_reference(struct drm_device *dev, struct drm_gem_object *obj)
obj->refcount++;
spin_unlock(&obj->lock);
}
+EXPORT_SYMBOL(drm_gem_object_reference);
void
drm_gem_object_unreference(struct drm_device *dev, struct drm_gem_object *obj)
@@ -372,3 +373,4 @@ drm_gem_object_unreference(struct drm_device *dev, struct drm_gem_object *obj)
kfree(obj);
}
}
+EXPORT_SYMBOL(drm_gem_object_unreference);
diff --git a/linux-core/drm_memory.c b/linux-core/drm_memory.c
index 75f5b521..4b494f9c 100644
--- a/linux-core/drm_memory.c
+++ b/linux-core/drm_memory.c
@@ -310,6 +310,7 @@ int drm_free_agp(DRM_AGP_MEM * handle, int pages)
{
return drm_agp_free_memory(handle) ? 0 : -EINVAL;
}
+EXPORT_SYMBOL(drm_free_agp);
/** Wrapper around agp_bind_memory() */
int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
@@ -322,6 +323,7 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
{
return drm_agp_unbind_memory(handle);
}
+EXPORT_SYMBOL(drm_unbind_agp);
#else /* __OS_HAS_AGP*/
static void *agp_remap(unsigned long offset, unsigned long size,
diff --git a/linux-core/drm_memrange.c b/linux-core/drm_memrange.c
index e1d2233b..663943ab 100644
--- a/linux-core/drm_memrange.c
+++ b/linux-core/drm_memrange.c
@@ -167,6 +167,7 @@ struct drm_memrange_node *drm_memrange_get_block(struct drm_memrange_node * pare
return child;
}
+EXPORT_SYMBOL(drm_memrange_get_block);
/*
* Put a block. Merge with the previous and / or next block if they are free.
@@ -257,6 +258,7 @@ struct drm_memrange_node *drm_memrange_search_free(const struct drm_memrange * m
return best;
}
+EXPORT_SYMBOL(drm_memrange_search_free);
int drm_memrange_clean(struct drm_memrange * mm)
{
@@ -297,6 +299,7 @@ int drm_memrange_for_each(struct drm_memrange *mm,
return 0;
}
+EXPORT_SYMBOL(drm_memrange_for_each);
EXPORT_SYMBOL(drm_memrange_init);
diff --git a/linux-core/i915_gem.c b/linux-core/i915_gem.c
index bd030a88..3e4403c7 100644
--- a/linux-core/i915_gem.c
+++ b/linux-core/i915_gem.c
@@ -59,14 +59,10 @@ i915_gem_object_free_page_list(struct drm_device *dev,
if (obj_priv->page_list == NULL)
return;
- /* Count how many we had successfully allocated, since release_pages()
- * doesn't like NULLs.
- */
for (i = 0; i < obj->size / PAGE_SIZE; i++) {
if (obj_priv->page_list[i] == NULL)
- break;
+ put_page(obj_priv->page_list[i]);
}
- release_pages(obj_priv->page_list, i, 0);
drm_free(obj_priv->page_list,
page_count * sizeof(struct page *),
@@ -149,11 +145,9 @@ i915_gem_reloc_and_validate_object(struct drm_device *dev,
struct drm_i915_gem_validate_entry *entry,
struct drm_gem_object *obj)
{
- struct drm_i915_gem_reloc *relocs;
+ struct drm_i915_gem_relocation_entry reloc;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
-
- /* Walk the list of relocations and perform them if necessary. */
- /* XXX */
+ int i;
/* Choose the GTT offset for our buffer and put it there. */
if (obj_priv->gtt_space == NULL) {
@@ -162,6 +156,64 @@ i915_gem_reloc_and_validate_object(struct drm_device *dev,
return -ENOMEM;
}
+ /* Apply the relocations, using the GTT aperture to avoid cache
+ * flushing requirements.
+ */
+ for (i = 0; i < entry->relocation_count; i++) {
+ struct drm_gem_object *target_obj;
+ struct drm_i915_gem_object *target_obj_priv;
+ void *reloc_page;
+ uint32_t reloc_val, *reloc_entry;
+ int ret;
+
+ ret = copy_from_user(&reloc, entry->relocs + i, sizeof(reloc));
+ if (ret != 0)
+ return ret;
+
+ target_obj = drm_gem_object_lookup(dev, file_priv,
+ reloc.target_handle);
+ if (target_obj == NULL)
+ return -EINVAL;
+ target_obj_priv = target_obj->driver_private;
+
+ /* The target buffer should have appeared before us in the
+ * validate list, so it should have a GTT space bound by now.
+ */
+ if (target_obj_priv->gtt_space == NULL) {
+ DRM_ERROR("No GTT space found for object %d\n",
+ reloc.target_handle);
+ return -EINVAL;
+ }
+
+ if (reloc.offset > obj->size - 4) {
+ DRM_ERROR("Relocation beyond object bounds.\n");
+ return -EINVAL;
+ }
+ if (reloc.offset & 3) {
+ DRM_ERROR("Relocation not 4-byte aligned.\n");
+ return -EINVAL;
+ }
+
+ /* Map the page containing the relocation we're going to
+ * perform.
+ */
+ reloc_page = ioremap(dev->agp->base +
+ (reloc.offset & ~(PAGE_SIZE - 1)),
+ PAGE_SIZE);
+ if (reloc_page == NULL)
+ return -ENOMEM;
+
+ reloc_entry = (uint32_t *)((char *)reloc_page +
+ (reloc.offset & (PAGE_SIZE - 1)));
+ reloc_val = target_obj_priv->gtt_offset + reloc.delta;
+
+ DRM_DEBUG("Applied relocation: %p@0x%08x = 0x%08x\n",
+ obj, reloc.offset, reloc_val);
+ *reloc_entry = reloc_val;
+
+ iounmap(reloc_page);
+ }
+
return 0;
}
@@ -178,32 +230,44 @@ evict_callback(struct drm_memrange_node *node, void *data)
return 0;
}
+static int
+i915_gem_sync_and_evict(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+ RING_LOCALS;
+
+ BEGIN_LP_RING(2);
+ OUT_RING(CMD_MI_FLUSH | MI_READ_FLUSH | MI_EXE_FLUSH);
+ OUT_RING(0); /* noop */
+ ADVANCE_LP_RING();
+ ret = i915_quiescent(dev);
+ if (ret != 0)
+ return ret;
+
+ /* Evict everything so we have space for sure. */
+ drm_memrange_for_each(&dev_priv->mm.gtt_space, evict_callback, dev);
+
+ return 0;
+}
+
int
i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_execbuffer *args = data;
struct drm_i915_gem_validate_entry *validate_list;
struct drm_gem_object **object_list;
int ret, i;
- RING_LOCALS;
LOCK_TEST_WITH_RETURN(dev, file_priv);
/* Big hammer: flush and idle the hardware so we can map things in/out.
*/
- BEGIN_LP_RING(2);
- OUT_RING(CMD_MI_FLUSH | MI_READ_FLUSH | MI_EXE_FLUSH);
- OUT_RING(0); /* noop */
- ADVANCE_LP_RING();
- ret = i915_quiescent(dev);
+ ret = i915_gem_sync_and_evict(dev);
if (ret != 0)
return ret;
- /* Evict everything so we have space for sure. */
- drm_memrange_for_each(&dev_priv->mm.gtt_space, evict_callback, dev);
-
/* Copy in the validate list from userland */
validate_list = drm_calloc(sizeof(*validate_list), args->buffer_count,
DRM_MEM_DRIVER);
@@ -249,6 +313,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
sizeof(*validate_list) * args->buffer_count);
/* Clean up and return */
+ ret = i915_gem_sync_and_evict(dev);
+
err:
if (object_list != NULL) {
for (i = 0; i < args->buffer_count; i++)
diff --git a/shared-core/i915_drm.h b/shared-core/i915_drm.h
index 4c241fc5..52d1f31f 100644
--- a/shared-core/i915_drm.h
+++ b/shared-core/i915_drm.h
@@ -419,7 +419,7 @@ struct drm_i915_gem_relocation_entry {
* list to refer to the buffer, but handle lookup should be O(1) anyway,
* and prevents O(n) search in userland to find what that index is.
*/
- uint32_t target_buffer;
+ uint32_t target_handle;
/** Offset in the buffer the relocation entry will be written into */
uint32_t offset;