summaryrefslogtreecommitdiff
path: root/linux-core/drm_bo.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2007-10-05 12:11:43 +1000
committerDave Airlie <airlied@redhat.com>2007-10-05 12:11:43 +1000
commit19b7cc34443889fc73de2e10462e1c0f9fa38041 (patch)
tree1cd73a2ef2aaed132cbc766e4ad4a8d2e3dde4e6 /linux-core/drm_bo.c
parentd351601899e5814d809b8e86ab6f0d6e7676f585 (diff)
parent495bbbaadc93c574eb98dd2ad64bdca4d91d4152 (diff)
Merge branch 'pre-superioctl-branch'
Diffstat (limited to 'linux-core/drm_bo.c')
-rw-r--r--linux-core/drm_bo.c220
1 files changed, 153 insertions, 67 deletions
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c
index a2f66dc6..4e735770 100644
--- a/linux-core/drm_bo.c
+++ b/linux-core/drm_bo.c
@@ -142,12 +142,8 @@ static int drm_bo_add_ttm(struct drm_buffer_object * bo)
switch (bo->type) {
case drm_bo_type_dc:
- bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
- if (!bo->ttm)
- ret = -ENOMEM;
- break;
case drm_bo_type_kernel:
- bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
+ bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
if (!bo->ttm)
ret = -ENOMEM;
break;
@@ -175,7 +171,8 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,
struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
int ret = 0;
- if (old_is_pci || new_is_pci)
+ if (old_is_pci || new_is_pci ||
+ ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))
ret = drm_bo_vm_pre_move(bo, old_is_pci);
if (ret)
return ret;
@@ -190,9 +187,7 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,
goto out_err;
if (mem->mem_type != DRM_BO_MEM_LOCAL) {
- ret = drm_bind_ttm(bo->ttm, new_man->flags &
- DRM_BO_FLAG_CACHED,
- mem->mm_node->start);
+ ret = drm_bind_ttm(bo->ttm, mem);
if (ret)
goto out_err;
}
@@ -242,7 +237,9 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,
_DRM_BO_FLAG_EVICTED);
if (bo->mem.mm_node)
- bo->offset = bo->mem.mm_node->start << PAGE_SHIFT;
+ bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
+ bm->man[bo->mem.mem_type].gpu_offset;
+
return 0;
@@ -290,6 +287,7 @@ int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,
}
return 0;
}
+EXPORT_SYMBOL(drm_bo_wait);
static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors)
{
@@ -417,7 +415,7 @@ static void drm_bo_destroy_locked(struct drm_buffer_object * bo)
atomic_dec(&bm->count);
- BUG_ON(!list_empty(&bo->base.list));
+ // BUG_ON(!list_empty(&bo->base.list));
drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
return;
@@ -503,6 +501,7 @@ void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo)
drm_bo_destroy_locked(tmp_bo);
}
}
+EXPORT_SYMBOL(drm_bo_usage_deref_locked);
static void drm_bo_base_deref_locked(struct drm_file * file_priv,
struct drm_user_object * uo)
@@ -531,37 +530,80 @@ void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo)
}
EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
+void drm_putback_buffer_objects(struct drm_device *dev)
+{
+ struct drm_buffer_manager *bm = &dev->bm;
+ struct list_head *list = &bm->unfenced;
+ struct drm_buffer_object *entry, *next;
+
+ mutex_lock(&dev->struct_mutex);
+ list_for_each_entry_safe(entry, next, list, lru) {
+ atomic_inc(&entry->usage);
+ mutex_unlock(&dev->struct_mutex);
+
+ mutex_lock(&entry->mutex);
+ BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
+ mutex_lock(&dev->struct_mutex);
+
+ list_del_init(&entry->lru);
+ DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
+ DRM_WAKEUP(&entry->event_queue);
+
+ /*
+ * FIXME: Might want to put back on head of list
+ * instead of tail here.
+ */
+
+ drm_bo_add_to_lru(entry);
+ mutex_unlock(&entry->mutex);
+ drm_bo_usage_deref_locked(&entry);
+ }
+ mutex_unlock(&dev->struct_mutex);
+}
+EXPORT_SYMBOL(drm_putback_buffer_objects);
+
+
/*
* Note. The caller has to register (if applicable)
* and deregister fence object usage.
*/
-int drm_fence_buffer_objects(struct drm_file * file_priv,
+int drm_fence_buffer_objects(struct drm_device *dev,
struct list_head *list,
- uint32_t fence_class, uint32_t fence_flags,
+ uint32_t fence_flags,
struct drm_fence_object * fence,
struct drm_fence_object ** used_fence)
{
- struct drm_device *dev = file_priv->head->dev;
struct drm_buffer_manager *bm = &dev->bm;
-
struct drm_buffer_object *entry;
uint32_t fence_type = 0;
+ uint32_t fence_class = ~0;
int count = 0;
int ret = 0;
struct list_head *l;
- LIST_HEAD(f_list);
mutex_lock(&dev->struct_mutex);
if (!list)
list = &bm->unfenced;
+ if (fence)
+ fence_class = fence->fence_class;
+
list_for_each_entry(entry, list, lru) {
BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
- fence_type |= entry->fence_type;
- if (entry->fence_class == fence_class)
- count++;
+ fence_type |= entry->new_fence_type;
+ if (fence_class == ~0)
+ fence_class = entry->new_fence_class;
+ else if (entry->new_fence_class != fence_class) {
+ DRM_ERROR("Unmatching fence classes on unfenced list: "
+ "%d and %d.\n",
+ fence_class,
+ entry->new_fence_class);
+ ret = -EINVAL;
+ goto out;
+ }
+ count++;
}
if (!count) {
@@ -569,14 +611,6 @@ int drm_fence_buffer_objects(struct drm_file * file_priv,
goto out;
}
- /*
- * Transfer to a local list before we release the dev->struct_mutex;
- * This is so we don't get any new unfenced objects while fencing
- * the ones we already have..
- */
-
- list_splice_init(list, &f_list);
-
if (fence) {
if ((fence_type & fence->type) != fence_type ||
(fence->fence_class != fence_class)) {
@@ -596,8 +630,8 @@ int drm_fence_buffer_objects(struct drm_file * file_priv,
}
count = 0;
- l = f_list.next;
- while (l != &f_list) {
+ l = list->next;
+ while (l != list) {
prefetch(l->next);
entry = list_entry(l, struct drm_buffer_object, lru);
atomic_inc(&entry->usage);
@@ -611,6 +645,8 @@ int drm_fence_buffer_objects(struct drm_file * file_priv,
if (entry->fence)
drm_fence_usage_deref_locked(&entry->fence);
entry->fence = drm_fence_reference_locked(fence);
+ entry->fence_class = entry->new_fence_class;
+ entry->fence_type = entry->new_fence_type;
DRM_FLAG_MASKED(entry->priv_flags, 0,
_DRM_BO_FLAG_UNFENCED);
DRM_WAKEUP(&entry->event_queue);
@@ -618,7 +654,7 @@ int drm_fence_buffer_objects(struct drm_file * file_priv,
}
mutex_unlock(&entry->mutex);
drm_bo_usage_deref_locked(&entry);
- l = f_list.next;
+ l = list->next;
}
DRM_DEBUG("Fenced %d buffers\n", count);
out:
@@ -626,7 +662,6 @@ int drm_fence_buffer_objects(struct drm_file * file_priv,
*used_fence = fence;
return ret;
}
-
EXPORT_SYMBOL(drm_fence_buffer_objects);
/*
@@ -941,6 +976,7 @@ struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
atomic_inc(&bo->usage);
return bo;
}
+EXPORT_SYMBOL(drm_lookup_buffer_object);
/*
* Call bo->mutex locked.
@@ -1076,9 +1112,12 @@ static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait,
static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo,
struct drm_bo_info_rep *rep)
{
+ if (!rep)
+ return;
+
rep->handle = bo->base.hash.key;
rep->flags = bo->mem.flags;
- rep->size = bo->mem.num_pages * PAGE_SIZE;
+ rep->size = bo->num_pages * PAGE_SIZE;
rep->offset = bo->offset;
rep->arg_handle = bo->map_list.user_token;
rep->mask = bo->mem.mask;
@@ -1257,7 +1296,7 @@ int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags,
if (ret)
return ret;
- mem.num_pages = bo->mem.num_pages;
+ mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT;
mem.mask = new_mem_flags;
mem.page_alignment = bo->mem.page_alignment;
@@ -1305,7 +1344,7 @@ static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem)
if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
return 0;
if ((flag_diff & DRM_BO_FLAG_CACHED) &&
- (!(mem->mask & DRM_BO_FLAG_CACHED) ||
+ (/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/
(mem->mask & DRM_BO_FLAG_FORCE_CACHING))) {
return 0;
}
@@ -1372,7 +1411,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo,
(unsigned long long) bo->mem.mask,
(unsigned long long) bo->mem.flags);
- ret = driver->fence_type(bo, &ftype);
+ ret = driver->fence_type(bo, &fence_class, &ftype);
if (ret) {
DRM_ERROR("Driver did not support given buffer permissions\n");
@@ -1401,13 +1440,15 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo,
return ret;
}
-
- bo->fence_class = fence_class;
- bo->fence_type = ftype;
+
+ bo->new_fence_class = fence_class;
+ bo->new_fence_type = ftype;
+
ret = drm_bo_wait_unmapped(bo, no_wait);
- if (ret)
+ if (ret) {
+ DRM_ERROR("Timed out waiting for buffer unmap.\n");
return ret;
-
+ }
if (bo->type == drm_bo_type_fake) {
ret = drm_bo_check_fake(dev, &bo->mem);
if (ret)
@@ -1462,23 +1503,13 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo,
return 0;
}
-static int drm_bo_handle_validate(struct drm_file *file_priv,
- uint32_t handle,
- uint32_t fence_class,
- uint64_t flags, uint64_t mask, uint32_t hint,
- struct drm_bo_info_rep *rep)
+int drm_bo_do_validate(struct drm_buffer_object *bo,
+ uint64_t flags, uint64_t mask, uint32_t hint,
+ uint32_t fence_class,
+ int no_wait,
+ struct drm_bo_info_rep *rep)
{
- struct drm_device *dev = file_priv->head->dev;
- struct drm_buffer_object *bo;
int ret;
- int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
-
- mutex_lock(&dev->struct_mutex);
- bo = drm_lookup_buffer_object(file_priv, handle, 1);
- mutex_unlock(&dev->struct_mutex);
- if (!bo) {
- return -EINVAL;
- }
mutex_lock(&bo->mutex);
ret = drm_bo_wait_unfenced(bo, no_wait, 0);
@@ -1486,24 +1517,56 @@ static int drm_bo_handle_validate(struct drm_file *file_priv,
if (ret)
goto out;
+
DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
ret = drm_bo_new_mask(bo, flags, hint);
if (ret)
goto out;
- ret =
- drm_buffer_object_validate(bo, fence_class,
- !(hint & DRM_BO_HINT_DONT_FENCE),
- no_wait);
- drm_bo_fill_rep_arg(bo, rep);
-
- out:
+ ret = drm_buffer_object_validate(bo,
+ fence_class,
+ !(hint & DRM_BO_HINT_DONT_FENCE),
+ no_wait);
+out:
+ if (rep)
+ drm_bo_fill_rep_arg(bo, rep);
mutex_unlock(&bo->mutex);
+ return ret;
+}
+EXPORT_SYMBOL(drm_bo_do_validate);
+
+
+int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle,
+ uint32_t fence_class,
+ uint64_t flags, uint64_t mask, uint32_t hint,
+ struct drm_bo_info_rep * rep,
+ struct drm_buffer_object **bo_rep)
+{
+ struct drm_device *dev = file_priv->head->dev;
+ struct drm_buffer_object *bo;
+ int ret;
+ int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
+
+ mutex_lock(&dev->struct_mutex);
+ bo = drm_lookup_buffer_object(file_priv, handle, 1);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (!bo) {
+ return -EINVAL;
+ }
+
+ ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class,
+ no_wait, rep);
+
+ if (!ret && bo_rep)
+ *bo_rep = bo;
+ else
+ drm_bo_usage_deref_unlocked(&bo);
- drm_bo_usage_deref_unlocked(&bo);
return ret;
}
+EXPORT_SYMBOL(drm_bo_handle_validate);
/**
* Fills out the generic buffer object ioctl reply with the information for
@@ -1609,8 +1672,9 @@ int drm_buffer_object_create(struct drm_device *dev,
#endif
bo->dev = dev;
bo->type = type;
+ bo->num_pages = num_pages;
bo->mem.mem_type = DRM_BO_MEM_LOCAL;
- bo->mem.num_pages = num_pages;
+ bo->mem.num_pages = bo->num_pages;
bo->mem.mm_node = NULL;
bo->mem.page_alignment = page_alignment;
if (bo->type == drm_bo_type_fake) {
@@ -1637,7 +1701,7 @@ int drm_buffer_object_create(struct drm_device *dev,
}
bo->fence_class = 0;
- ret = driver->fence_type(bo, &bo->fence_type);
+ ret = driver->fence_type(bo, &bo->fence_class, &bo->fence_type);
if (ret) {
DRM_ERROR("Driver did not support given buffer permissions\n");
goto out_err;
@@ -1739,7 +1803,7 @@ int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr
req->bo_req.flags,
req->bo_req.mask,
req->bo_req.hint,
- &rep);
+ &rep, NULL);
break;
case drm_bo_fence:
ret = -EINVAL;
@@ -2089,9 +2153,30 @@ static void drm_bo_clean_unfenced(struct drm_device *dev)
struct drm_buffer_manager *bm = &dev->bm;
struct list_head *head, *list;
struct drm_buffer_object *entry;
+ struct drm_fence_object *fence;
head = &bm->unfenced;
+ if (list_empty(head))
+ return;
+
+ DRM_ERROR("Clean unfenced\n");
+
+ if (drm_fence_buffer_objects(dev, NULL, 0, NULL, &fence)) {
+
+ /*
+ * Fixme: Should really wait here.
+ */
+ }
+
+ if (fence)
+ drm_fence_usage_deref_locked(&fence);
+
+ if (list_empty(head))
+ return;
+
+ DRM_ERROR("Really clean unfenced\n");
+
list = head->next;
while(list != head) {
prefetch(list->next);
@@ -2251,7 +2336,7 @@ int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type)
if (!man->has_type) {
DRM_ERROR("Trying to take down uninitialized "
- "memory manager type\n");
+ "memory manager type %u\n", mem_type);
return ret;
}
man->use_type = 0;
@@ -2273,6 +2358,7 @@ int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type)
return ret;
}
+EXPORT_SYMBOL(drm_bo_clean_mm);
/**
*Evict all buffers of a particular mem_type, but leave memory manager