summaryrefslogtreecommitdiff
path: root/linux-core
diff options
context:
space:
mode:
Diffstat (limited to 'linux-core')
-rw-r--r--linux-core/drmP.h76
-rw-r--r--linux-core/drm_drv.c3
-rw-r--r--linux-core/drm_fence.c587
-rw-r--r--linux-core/drm_stub.c1
4 files changed, 666 insertions, 1 deletions
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index 81ca6aec..4be49b56 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -155,6 +155,7 @@
#define DRM_MEM_MM 22
#define DRM_MEM_HASHTAB 23
#define DRM_MEM_OBJECTS 24
+#define DRM_MEM_FENCE 25
#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
@@ -637,6 +638,8 @@ struct drm_driver {
unsigned long (*get_reg_ofs) (struct drm_device * dev);
void (*set_version) (struct drm_device * dev, drm_set_version_t * sv);
+ struct drm_fence_driver *fence_driver;
+
int major;
int minor;
int patchlevel;
@@ -667,6 +670,36 @@ typedef struct drm_head {
} drm_head_t;
+typedef struct drm_fence_driver{
+ int no_types;
+ uint32_t wrap_diff;
+ uint32_t flush_diff;
+ uint32_t sequence_mask;
+ int lazy_capable;
+ int (*emit) (struct drm_device *dev, uint32_t *breadcrumb);
+ void (*poke_flush) (struct drm_device *dev);
+} drm_fence_driver_t;
+
+
+typedef struct drm_fence_manager{
+ int initialized;
+ rwlock_t lock;
+
+ /*
+ * The list below should be maintained in sequence order and
+ * access is protected by the above spinlock.
+ */
+
+ struct list_head ring;
+ struct list_head *fence_types[32];
+ volatile uint32_t pending_flush;
+ wait_queue_head_t fence_queue;
+ int pending_exe_flush;
+ uint32_t last_exe_flush;
+ uint32_t exe_flush_sequence;
+} drm_fence_manager_t;
+
+
/**
* DRM device structure. This structure represent a complete card that
* may contain multiple heads.
@@ -798,8 +831,20 @@ typedef struct drm_device {
drm_local_map_t *agp_buffer_map;
unsigned int agp_buffer_token;
drm_head_t primary; /**< primary screen head */
+
+ drm_fence_manager_t fm;
+
} drm_device_t;
+#if __OS_HAS_AGP
+typedef struct drm_agp_ttm_priv {
+ DRM_AGP_MEM *mem;
+ struct agp_bridge_data *bridge;
+ unsigned mem_type;
+ int populated;
+} drm_agp_ttm_priv;
+#endif
+
static __inline__ int drm_core_check_feature(struct drm_device *dev,
int feature)
{
@@ -894,6 +939,24 @@ typedef struct drm_ref_object {
drm_ref_t unref_action;
} drm_ref_object_t;
+typedef struct drm_fence_object{
+ drm_user_object_t base;
+ atomic_t usage;
+
+ /*
+ * The below three fields are protected by the fence manager spinlock.
+ */
+
+ struct list_head ring;
+ volatile uint32_t type;
+ volatile uint32_t signaled;
+ uint32_t sequence;
+ volatile uint32_t flush_mask;
+ volatile uint32_t submitted_flush;
+} drm_fence_object_t;
+
+
+
/******************************************************************/
@@ -924,7 +987,6 @@ unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
extern unsigned long drm_core_get_map_ofs(drm_map_t * map);
extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev);
-extern pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma);
/* Memory management support (drm_memory.h) */
#include "drm_memory.h"
@@ -1205,6 +1267,18 @@ extern int drm_user_object_ref(drm_file_t *priv, uint32_t user_token, drm_object
extern int drm_user_object_unref(drm_file_t *priv, uint32_t user_token, drm_object_type_t type);
+
+/*
+ * fence objects (drm_fence.c)
+ */
+
+extern void drm_fence_handler(drm_device_t *dev, uint32_t breadcrumb, uint32_t type);
+extern void drm_fence_manager_init(drm_device_t *dev);
+extern void drm_fence_manager_takedown(drm_device_t *dev);
+extern void drm_fence_flush_old(drm_device_t *dev, uint32_t sequence);
+extern int drm_fence_ioctl(DRM_IOCTL_ARGS);
+
+
/* Inline replacements for DRM_IOREMAP macros */
static __inline__ void drm_core_ioremap(struct drm_map *map,
struct drm_device *dev)
diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c
index ccfd1855..e6ae690a 100644
--- a/linux-core/drm_drv.c
+++ b/linux-core/drm_drv.c
@@ -119,6 +119,7 @@ static drm_ioctl_desc_t drm_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0},
+ [DRM_IOCTL_NR(DRM_IOCTL_FENCE)] = {drm_fence_ioctl, DRM_AUTH},
};
#define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( drm_ioctls )
@@ -347,6 +348,8 @@ static void __exit drm_cleanup(drm_device_t * dev)
drm_lastclose(dev);
+ drm_fence_manager_takedown(dev);
+
if (dev->maplist) {
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
dev->maplist = NULL;
diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c
new file mode 100644
index 00000000..fc27c576
--- /dev/null
+++ b/linux-core/drm_fence.c
@@ -0,0 +1,587 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+
+static void drm_fm_update_pointers(drm_fence_manager_t * fm,
+ struct list_head *list, int no_types,
+ uint32_t type)
+{
+ int i;
+ for (i = 0; i < no_types; ++i) {
+ if (type & (1 << i)) {
+ fm->fence_types[i] = list;
+ }
+ }
+}
+
+/*
+ * Typically called by the IRQ handler.
+ */
+
+void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type)
+{
+ int i;
+ int wake = 0;
+ int largest = 0;
+ uint32_t diff;
+ uint32_t relevant;
+ int index = 0;
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_driver_t *driver = dev->driver->fence_driver;
+ struct list_head *list;
+ struct list_head *fence_list;
+ drm_fence_object_t *fence;
+ int found = 0;
+
+ for (i = 0; i < driver->no_types; ++i) {
+ if (!(type & (1 << i)))
+ continue;
+
+ list = fm->fence_types[i];
+ fence_list = list->next;
+
+ if (fence_list == &fm->ring)
+ continue;
+
+ fence = list_entry(fence_list, drm_fence_object_t, ring);
+
+ diff = (sequence - fence->sequence) & driver->sequence_mask;
+
+ if (diff < driver->wrap_diff) {
+ if (diff >= largest) {
+ largest = diff;
+ index = i;
+ found = 1;
+ }
+ }
+ }
+
+ if (!found)
+ return;
+
+ /*
+ * Start with the fence object with the lowest sequence number, affected by
+ * the type mask of this call. Update signaled fields,
+ * Check if we need to wake sleeping processes
+ */
+
+ list = fm->fence_types[index]->next;
+ do {
+ if (list == &fm->ring) {
+ drm_fm_update_pointers(fm, list->prev,
+ driver->no_types, type);
+ break;
+ }
+ fence = list_entry(list, drm_fence_object_t, ring);
+ diff = (sequence - fence->sequence) & driver->sequence_mask;
+ if (diff >= driver->wrap_diff) {
+ drm_fm_update_pointers(fm, fence->ring.prev,
+ driver->no_types, type);
+ break;
+ }
+ relevant = type & fence->type;
+ if ((fence->signaled | relevant) != fence->signaled) {
+ fence->signaled |= relevant;
+ fence->submitted_flush |= relevant;
+ wake = 1;
+ }
+
+ relevant = fence->flush_mask &
+ ~(fence->signaled | fence->submitted_flush);
+ if (relevant) {
+ fm->pending_flush |= relevant;
+ fence->submitted_flush = fence->flush_mask;
+ }
+
+ list = list->next;
+
+ /*
+ * Remove a completely signaled fence from the
+ * fence manager ring.
+ */
+
+ if (!(fence->type & ~fence->signaled)) {
+ fence_list = &fence->ring;
+ for (i = 0; i < driver->no_types; ++i) {
+ if (fm->fence_types[i] == fence_list)
+ fm->fence_types[i] = fence_list->prev;
+ }
+ list_del_init(fence_list);
+ }
+
+ } while (1);
+
+ /*
+ * Wake sleeping processes.
+ */
+
+ if (wake) {
+ DRM_WAKEUP(&fm->fence_queue);
+ }
+}
+
+EXPORT_SYMBOL(drm_fence_handler);
+
+static void drm_fence_unring(drm_device_t * dev, struct list_head *ring)
+{
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_driver_t *driver = dev->driver->fence_driver;
+ unsigned long flags;
+ int i;
+
+ write_lock_irqsave(&fm->lock, flags);
+ for (i = 0; i < driver->no_types; ++i) {
+ if (fm->fence_types[i] == ring)
+ fm->fence_types[i] = ring->prev;
+ }
+ list_del_init(ring);
+ write_unlock_irqrestore(&fm->lock, flags);
+}
+
+void drm_fence_usage_deref_locked(drm_device_t * dev,
+ drm_fence_object_t * fence)
+{
+ if (atomic_dec_and_test(&fence->usage)) {
+ drm_fence_unring(dev, &fence->ring);
+ drm_free(fence, sizeof(*fence), DRM_MEM_FENCE);
+ }
+}
+
+void drm_fence_usage_deref_unlocked(drm_device_t * dev,
+ drm_fence_object_t * fence)
+{
+ if (atomic_dec_and_test(&fence->usage)) {
+ mutex_lock(&dev->struct_mutex);
+ if (atomic_read(&fence->usage) == 0) {
+ drm_fence_unring(dev, &fence->ring);
+ drm_free(fence, sizeof(*fence), DRM_MEM_FENCE);
+ }
+ mutex_unlock(&dev->struct_mutex);
+ }
+}
+
+static void drm_fence_object_destroy(drm_file_t * priv,
+ drm_user_object_t * base)
+{
+ drm_device_t *dev = priv->head->dev;
+ drm_fence_object_t *fence =
+ drm_user_object_entry(base, drm_fence_object_t, base);
+
+ drm_fence_usage_deref_locked(dev, fence);
+}
+
+static int fence_signaled(drm_device_t * dev, drm_fence_object_t * fence,
+ uint32_t mask, int poke_flush)
+{
+ unsigned long flags;
+ int signaled;
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_driver_t *driver = dev->driver->fence_driver;
+
+ if (poke_flush)
+ driver->poke_flush(dev);
+ read_lock_irqsave(&fm->lock, flags);
+ signaled =
+ (fence->type & mask & fence->signaled) == (fence->type & mask);
+ read_unlock_irqrestore(&fm->lock, flags);
+
+ return signaled;
+}
+
+static void drm_fence_flush_exe(drm_fence_manager_t * fm,
+ drm_fence_driver_t * driver, uint32_t sequence)
+{
+ uint32_t diff;
+
+ if (!fm->pending_exe_flush) {
+ struct list_head *list;
+
+ /*
+ * Last_exe_flush is invalid. Find oldest sequence.
+ */
+
+ list = fm->fence_types[_DRM_FENCE_TYPE_EXE];
+ if (list->next == &fm->ring) {
+ return;
+ } else {
+ drm_fence_object_t *fence =
+ list_entry(list->next, drm_fence_object_t, ring);
+ fm->last_exe_flush = (fence->sequence - 1) &
+ driver->sequence_mask;
+ }
+ diff = (sequence - fm->last_exe_flush) & driver->sequence_mask;
+ if (diff >= driver->wrap_diff)
+ return;
+ fm->exe_flush_sequence = sequence;
+ fm->pending_exe_flush = 1;
+ } else {
+ diff =
+ (sequence - fm->exe_flush_sequence) & driver->sequence_mask;
+ if (diff < driver->wrap_diff) {
+ fm->exe_flush_sequence = sequence;
+ }
+ }
+}
+
+/*
+ * Make sure old fence objects are signaled before their fence sequences are
+ * wrapped around and reused.
+ */
+
+static int drm_fence_object_flush(drm_device_t * dev,
+ drm_fence_object_t * fence, uint32_t type)
+{
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_driver_t *driver = dev->driver->fence_driver;
+ unsigned long flags;
+
+ if (type & ~fence->type) {
+ DRM_ERROR("Flush trying to extend fence type\n");
+ return -EINVAL;
+ }
+
+ write_lock_irqsave(&fm->lock, flags);
+ fence->flush_mask |= type;
+ if (fence->submitted_flush == fence->signaled) {
+ if ((fence->type & DRM_FENCE_EXE) &&
+ !(fence->submitted_flush & DRM_FENCE_EXE)) {
+ drm_fence_flush_exe(fm, driver, fence->sequence);
+ fence->submitted_flush |= DRM_FENCE_EXE;
+ } else {
+ fm->pending_flush |= (fence->flush_mask &
+ ~fence->submitted_flush);
+ fence->submitted_flush = fence->flush_mask;
+ }
+ }
+ write_unlock_irqrestore(&fm->lock, flags);
+ driver->poke_flush(dev);
+ return 0;
+}
+
+void drm_fence_flush_old(drm_device_t * dev, uint32_t sequence)
+{
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_driver_t *driver = dev->driver->fence_driver;
+ uint32_t old_sequence;
+ unsigned long flags;
+ drm_fence_object_t *fence;
+ uint32_t diff;
+
+ mutex_lock(&dev->struct_mutex);
+ read_lock_irqsave(&fm->lock, flags);
+ if (fm->ring.next == &fm->ring) {
+ read_unlock_irqrestore(&fm->lock, flags);
+ mutex_unlock(&dev->struct_mutex);
+ return;
+ }
+ old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask;
+ fence = list_entry(fm->ring.next, drm_fence_object_t, ring);
+ atomic_inc(&fence->usage);
+ mutex_unlock(&dev->struct_mutex);
+ diff = (old_sequence - fence->sequence) & driver->sequence_mask;
+ read_unlock_irqrestore(&fm->lock, flags);
+ if (diff < driver->wrap_diff) {
+ drm_fence_object_flush(dev, fence, fence->type);
+ }
+ drm_fence_usage_deref_unlocked(dev, fence);
+}
+
+EXPORT_SYMBOL(drm_fence_flush_old);
+
+static int drm_fence_object_wait(drm_device_t * dev, drm_fence_object_t * fence,
+ int lazy, int ignore_signals, uint32_t mask)
+{
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_driver_t *driver = dev->driver->fence_driver;
+ int ret = 0;
+ unsigned long _end;
+
+ if (mask & ~fence->type) {
+ DRM_ERROR("Wait trying to extend fence type\n");
+ return -EINVAL;
+ }
+
+ if (fence_signaled(dev, fence, mask, 0))
+ return 0;
+
+ _end = jiffies + 3 * DRM_HZ;
+
+ drm_fence_object_flush(dev, fence, mask);
+ if (lazy && driver->lazy_capable) {
+ do {
+ DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ,
+ fence_signaled(dev, fence, mask, 1));
+ if (time_after_eq(jiffies, _end))
+ break;
+ } while (ret == -EINTR && ignore_signals);
+
+ if (time_after_eq(jiffies, _end) && (ret != 0))
+ ret = -EBUSY;
+ return ret;
+
+ } else {
+ int signaled;
+ do {
+ signaled = fence_signaled(dev, fence, mask, 1);
+ } while (!signaled && !time_after_eq(jiffies, _end));
+ if (!signaled)
+ return -EBUSY;
+ }
+ return 0;
+}
+
+int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence,
+ uint32_t type)
+{
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_driver_t *driver = dev->driver->fence_driver;
+ unsigned long flags;
+ uint32_t sequence;
+ int ret;
+
+ drm_fence_unring(dev, &fence->ring);
+ ret = driver->emit(dev, &sequence);
+ if (ret)
+ return ret;
+
+ write_lock_irqsave(&fm->lock, flags);
+ fence->type = type;
+ fence->flush_mask = 0x00;
+ fence->submitted_flush = 0x00;
+ fence->signaled = 0x00;
+ fence->sequence = sequence;
+ list_add_tail(&fence->ring, &fm->ring);
+ write_unlock_irqrestore(&fm->lock, flags);
+ return 0;
+}
+
+int drm_fence_object_init(drm_device_t * dev, uint32_t type, int emit,
+ drm_fence_object_t * fence)
+{
+ int ret = 0;
+ unsigned long flags;
+ drm_fence_manager_t *fm = &dev->fm;
+
+ mutex_lock(&dev->struct_mutex);
+ atomic_set(&fence->usage, 1);
+ mutex_unlock(&dev->struct_mutex);
+
+ write_lock_irqsave(&fm->lock, flags);
+ INIT_LIST_HEAD(&fence->ring);
+ fence->type = type;
+ fence->flush_mask = 0;
+ fence->submitted_flush = 0;
+ fence->signaled = 0;
+ fence->sequence = 0;
+ write_unlock_irqrestore(&fm->lock, flags);
+ if (emit) {
+ ret = drm_fence_object_emit(dev, fence, type);
+ }
+ return ret;
+}
+
+EXPORT_SYMBOL(drm_fence_object_init);
+
+static int drm_fence_object_create(drm_file_t * priv, uint32_t type,
+ int emit, int shareable,
+ uint32_t * user_handle,
+ drm_fence_object_t ** c_fence)
+{
+ drm_device_t *dev = priv->head->dev;
+ int ret;
+ drm_fence_object_t *fence;
+
+ fence = drm_calloc(1, sizeof(*fence), DRM_MEM_FENCE);
+ if (!fence)
+ return -ENOMEM;
+ ret = drm_fence_object_init(dev, type, emit, fence);
+ if (ret) {
+ drm_fence_usage_deref_unlocked(dev, fence);
+ return ret;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ ret = drm_add_user_object(priv, &fence->base, shareable);
+ mutex_unlock(&dev->struct_mutex);
+ if (ret) {
+ drm_fence_usage_deref_unlocked(dev, fence);
+ *c_fence = NULL;
+ *user_handle = 0;
+ return ret;
+ }
+ fence->base.type = drm_fence_type;
+ fence->base.remove = &drm_fence_object_destroy;
+ *user_handle = fence->base.hash.key;
+ *c_fence = fence;
+
+ return 0;
+}
+
+void drm_fence_manager_init(drm_device_t * dev)
+{
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_driver_t *fed = dev->driver->fence_driver;
+ int i;
+
+ fm->lock = RW_LOCK_UNLOCKED;
+ INIT_LIST_HEAD(&fm->ring);
+ fm->pending_flush = 0;
+ DRM_INIT_WAITQUEUE(&fm->fence_queue);
+ fm->initialized = 0;
+ if (fed) {
+ fm->initialized = 1;
+ for (i = 0; i < fed->no_types; ++i) {
+ fm->fence_types[i] = &fm->ring;
+ }
+ }
+}
+
+void drm_fence_manager_takedown(drm_device_t * dev)
+{
+}
+
+drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle)
+{
+ drm_device_t *dev = priv->head->dev;
+ drm_user_object_t *uo;
+ drm_fence_object_t *fence;
+
+ mutex_lock(&dev->struct_mutex);
+ uo = drm_lookup_user_object(priv, handle);
+ if (!uo || (uo->type != drm_fence_type)) {
+ mutex_unlock(&dev->struct_mutex);
+ return NULL;
+ }
+ fence = drm_user_object_entry(uo, drm_fence_object_t, base);
+ atomic_inc(&fence->usage);
+ mutex_unlock(&dev->struct_mutex);
+ return fence;
+}
+
+int drm_fence_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ int ret;
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_arg_t arg;
+ drm_fence_object_t *fence;
+ drm_user_object_t *uo;
+ unsigned long flags;
+ ret = 0;
+
+ if (!fm->initialized) {
+ DRM_ERROR("The DRM driver does not support fencing.\n");
+ return -EINVAL;
+ }
+
+ DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
+ switch (arg.op) {
+ case drm_fence_create:{
+ int emit = arg.flags & DRM_FENCE_FLAG_EMIT;
+ if (emit)
+ LOCK_TEST_WITH_RETURN(dev, filp);
+ ret =
+ drm_fence_object_create(priv, arg.type,
+ emit,
+ arg.
+ flags &
+ DRM_FENCE_FLAG_SHAREABLE,
+ &arg.handle, &fence);
+ if (ret)
+ return ret;
+ mutex_lock(&dev->struct_mutex);
+ atomic_inc(&fence->usage);
+ mutex_unlock(&dev->struct_mutex);
+ break;
+ }
+ case drm_fence_destroy:
+ mutex_lock(&dev->struct_mutex);
+ uo = drm_lookup_user_object(priv, arg.handle);
+ if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+ ret = drm_remove_user_object(priv, uo);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ case drm_fence_reference:
+ ret =
+ drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo);
+ if (ret)
+ return ret;
+ fence = drm_lookup_fence_object(priv, arg.handle);
+ break;
+ case drm_fence_unreference:
+ ret = drm_user_object_unref(priv, arg.handle, drm_fence_type);
+ return ret;
+ case drm_fence_signaled:
+ fence = drm_lookup_fence_object(priv, arg.handle);
+ if (!fence)
+ return -EINVAL;
+ break;
+ case drm_fence_flush:
+ fence = drm_lookup_fence_object(priv, arg.handle);
+ if (!fence)
+ return -EINVAL;
+ ret = drm_fence_object_flush(dev, fence, arg.type);
+ break;
+ case drm_fence_wait:
+ fence = drm_lookup_fence_object(priv, arg.handle);
+ if (!fence)
+ return -EINVAL;
+ ret =
+ drm_fence_object_wait(dev, fence,
+ arg.flags & DRM_FENCE_FLAG_WAIT_LAZY,
+ arg.
+ flags &
+ DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS,
+ arg.type);
+ break;
+ case drm_fence_emit:
+ LOCK_TEST_WITH_RETURN(dev, filp);
+ fence = drm_lookup_fence_object(priv, arg.handle);
+ if (!fence)
+ return -EINVAL;
+ ret = drm_fence_object_emit(dev, fence, arg.type);
+ break;
+ default:
+ return -EINVAL;
+ }
+ read_lock_irqsave(&fm->lock, flags);
+ arg.type = fence->type;
+ arg.signaled = fence->signaled;
+ read_unlock_irqrestore(&fm->lock, flags);
+ drm_fence_usage_deref_unlocked(dev, fence);
+
+ DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
+ return ret;
+}
diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c
index 9059f42c..6182141a 100644
--- a/linux-core/drm_stub.c
+++ b/linux-core/drm_stub.c
@@ -133,6 +133,7 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
goto error_out_unreg;
}
+ drm_fence_manager_init(dev);
return 0;
error_out_unreg: