summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--libdrm/xf86drm.c127
-rw-r--r--libdrm/xf86drm.h22
-rw-r--r--linux-core/Makefile.kernel6
-rw-r--r--linux-core/drmP.h241
-rw-r--r--linux-core/drm_agpsupport.c165
-rw-r--r--linux-core/drm_bufs.c2
-rw-r--r--linux-core/drm_compat.c140
-rw-r--r--linux-core/drm_compat.h18
-rw-r--r--linux-core/drm_drv.c4
-rw-r--r--linux-core/drm_fence.c589
-rw-r--r--linux-core/drm_fops.c123
-rw-r--r--linux-core/drm_hashtab.c17
-rw-r--r--linux-core/drm_hashtab.h1
-rw-r--r--linux-core/drm_lock.c57
-rw-r--r--linux-core/drm_object.c289
-rw-r--r--linux-core/drm_stub.c7
-rw-r--r--linux-core/drm_ttm.c802
-rw-r--r--linux-core/drm_ttm.h167
-rw-r--r--linux-core/drm_vm.c291
-rw-r--r--linux-core/i915_buffer.c40
-rw-r--r--linux-core/i915_drv.c19
-rw-r--r--linux-core/i915_fence.c121
-rw-r--r--shared-core/drm.h32
-rw-r--r--shared-core/i915_dma.c10
-rw-r--r--shared-core/i915_drv.h34
-rw-r--r--shared-core/i915_irq.c10
26 files changed, 3254 insertions, 80 deletions
diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c
index c9f1b2db..a2a2e28c 100644
--- a/libdrm/xf86drm.c
+++ b/libdrm/xf86drm.c
@@ -2236,3 +2236,130 @@ int drmCommandWriteRead(int fd, unsigned long drmCommandIndex, void *data,
}
return 0;
}
+
+int drmFenceCreate(int fd, int shareable, int class,unsigned type,
+ int emit,
+ drmFence *fence)
+{
+ drm_fence_arg_t arg;
+
+ arg.type = type;
+ arg.class = class;
+ arg.flags = (shareable) ? DRM_FENCE_FLAG_SHAREABLE : 0;
+ arg.flags |= (emit) ? DRM_FENCE_FLAG_EMIT : 0;
+ arg.op = drm_fence_create;
+ if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
+ return -errno;
+ fence->handle = arg.handle;
+ fence->class = arg.class;
+ fence->type = arg.type;
+ fence->signaled = 0;
+ return 0;
+}
+
+int drmFenceDestroy(int fd, const drmFence *fence)
+{
+ drm_fence_arg_t arg;
+
+ arg.handle = fence->handle;
+ arg.op = drm_fence_destroy;
+ if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
+ return -errno;
+ return 0;
+}
+
+int drmFenceReference(int fd, unsigned handle, drmFence *fence)
+{
+ drm_fence_arg_t arg;
+
+ arg.handle = handle;
+ arg.op = drm_fence_reference;
+ if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
+ return -errno;
+ fence->handle = arg.handle;
+ fence->class = arg.class;
+ fence->type = arg.type;
+ fence->signaled = arg.signaled;
+ return 0;
+}
+
+int drmFenceUnreference(int fd, const drmFence *fence)
+{
+ drm_fence_arg_t arg;
+
+ arg.handle = fence->handle;
+ arg.op = drm_fence_unreference;
+ if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
+ return -errno;
+ return 0;
+}
+
+int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type)
+{
+ drm_fence_arg_t arg;
+
+ arg.handle = fence->handle;
+ arg.type = flush_type;
+ arg.op = drm_fence_flush;
+ if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
+ return -errno;
+ fence->class = arg.class;
+ fence->type = arg.type;
+ fence->signaled = arg.signaled;
+ return 0;
+}
+
+int drmFenceSignaled(int fd, drmFence *fence)
+{
+ drm_fence_arg_t arg;
+
+ arg.handle = fence->handle;
+ arg.op = drm_fence_signaled;
+ if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
+ return -errno;
+ fence->class = arg.class;
+ fence->type = arg.type;
+ fence->signaled = arg.signaled;
+ return 0;
+}
+
+int drmFenceEmit(int fd, drmFence *fence, unsigned emit_type)
+{
+ drm_fence_arg_t arg;
+
+ arg.handle = fence->handle;
+ arg.type = emit_type;
+ arg.op = drm_fence_emit;
+ if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
+ return -errno;
+ fence->class = arg.class;
+ fence->type = arg.type;
+ fence->signaled = arg.signaled;
+ return 0;
+}
+
+int drmFenceWait(int fd, drmFence *fence, unsigned flush_type,
+ int lazy, int ignore_signals)
+{
+ drm_fence_arg_t arg;
+ int ret;
+
+ arg.handle = fence->handle;
+ arg.type = flush_type;
+ arg.flags = (lazy) ? DRM_FENCE_FLAG_WAIT_LAZY : 0;
+ arg.flags |= (ignore_signals) ? DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS : 0;
+ arg.op = drm_fence_wait;
+ do {
+ ret = ioctl(fd, DRM_IOCTL_FENCE, &arg);
+ } while (ret != 0 && errno == EAGAIN);
+
+ if (ret)
+ return -errno;
+
+ fence->class = arg.class;
+ fence->type = arg.type;
+ fence->signaled = arg.signaled;
+ return 0;
+}
+
+
diff --git a/libdrm/xf86drm.h b/libdrm/xf86drm.h
index 48a18f29..78730785 100644
--- a/libdrm/xf86drm.h
+++ b/libdrm/xf86drm.h
@@ -280,6 +280,12 @@ typedef struct _drmSetVersion {
int drm_dd_minor;
} drmSetVersion, *drmSetVersionPtr;
+typedef struct _drmFence{
+ unsigned handle;
+ int class;
+ unsigned type;
+ unsigned signaled;
+} drmFence;
#define __drm_dummy_lock(lock) (*(__volatile__ unsigned int *)lock)
@@ -596,6 +602,22 @@ extern int drmScatterGatherFree(int fd, drm_handle_t handle);
extern int drmWaitVBlank(int fd, drmVBlankPtr vbl);
+/* Fencing */
+
+extern int drmFenceCreate(int fd, int shareable, int class,
+ unsigned type, int emit,
+ drmFence *fence);
+extern int drmFenceDestroy(int fd, const drmFence *fence);
+extern int drmFenceReference(int fd, unsigned handle, drmFence *fence);
+extern int drmFenceUnreference(int fd, const drmFence *fence);
+extern int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type);
+extern int drmFenceSignaled(int fd, drmFence *fence);
+extern int drmFenceWait(int fd, drmFence *fence, unsigned flush_type,
+ int lazy, int ignore_signals);
+extern int drmFenceEmit(int fd, drmFence *fence, unsigned emit_type);
+
+
+
/* Support routines */
extern int drmError(int err, const char *label);
extern void *drmMalloc(int size);
diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel
index 211e5b05..e571f29e 100644
--- a/linux-core/Makefile.kernel
+++ b/linux-core/Makefile.kernel
@@ -12,13 +12,15 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \
drm_memory_debug.o ati_pcigart.o drm_sman.o \
- drm_hashtab.o drm_mm.o
+ drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \
+ drm_fence.o drm_ttm.o
tdfx-objs := tdfx_drv.o
r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
i810-objs := i810_drv.o i810_dma.o
i830-objs := i830_drv.o i830_dma.o i830_irq.o
-i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o
+i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
+ i915_buffer.o
radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o
sis-objs := sis_drv.o sis_mm.o
ffb-objs := ffb_drv.o ffb_context.o
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index 6cbb810f..e42b5e55 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -154,6 +154,10 @@
#define DRM_MEM_CTXLIST 21
#define DRM_MEM_MM 22
#define DRM_MEM_HASHTAB 23
+#define DRM_MEM_OBJECTS 24
+#define DRM_MEM_FENCE 25
+#define DRM_MEM_TTM 26
+#define DRM_MEM_BUFOBJ 27
#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
#define DRM_MAP_HASH_OFFSET 0x10000000
@@ -387,6 +391,19 @@ typedef struct drm_buf_entry {
drm_freelist_t freelist;
} drm_buf_entry_t;
+/*
+ * This should be small enough to allow the use of kmalloc for hash tables
+ * instead of vmalloc.
+ */
+
+#define DRM_FILE_HASH_ORDER 8
+typedef enum{
+ _DRM_REF_USE=0,
+ _DRM_REF_TYPE1,
+ _DRM_NO_REF_TYPES
+} drm_ref_t;
+
+
/** File private data */
typedef struct drm_file {
int authenticated;
@@ -401,6 +418,18 @@ typedef struct drm_file {
struct drm_head *head;
int remove_auth_on_close;
unsigned long lock_count;
+
+ /*
+ * The user object hash table is global and resides in the
+ * drm_device structure. We protect the lists and hash tables with the
+ * device struct_mutex. A bit coarse-grained but probably the best
+ * option.
+ */
+
+ struct list_head refd_objects;
+ struct list_head user_objects;
+
+ drm_open_hash_t refd_object_hash[_DRM_NO_REF_TYPES];
void *driver_priv;
} drm_file_t;
@@ -558,12 +587,25 @@ typedef struct drm_mm {
drm_mm_node_t root_node;
} drm_mm_t;
+#include "drm_ttm.h"
+
+/*
+ * buffer object driver
+ */
+
+typedef struct drm_bo_driver{
+ int cached_pages;
+ drm_ttm_backend_t *(*create_ttm_backend_entry)
+ (struct drm_device *dev, int cached);
+} drm_bo_driver_t;
+
/**
* DRM driver structure. This structure represent the common code for
* a family of cards. There will one drm_device for each card present
* in this family
*/
+
struct drm_device;
struct drm_driver {
int (*load) (struct drm_device *, unsigned long flags);
@@ -609,6 +651,9 @@ struct drm_driver {
unsigned long (*get_reg_ofs) (struct drm_device * dev);
void (*set_version) (struct drm_device * dev, drm_set_version_t * sv);
+ struct drm_fence_driver *fence_driver;
+ struct drm_bo_driver *bo_driver;
+
int major;
int minor;
int patchlevel;
@@ -638,6 +683,38 @@ typedef struct drm_head {
struct class_device *dev_class;
} drm_head_t;
+
+typedef struct drm_fence_driver{
+ int no_types;
+ uint32_t wrap_diff;
+ uint32_t flush_diff;
+ uint32_t sequence_mask;
+ int lazy_capable;
+ int (*emit) (struct drm_device *dev, uint32_t *breadcrumb);
+ void (*poke_flush) (struct drm_device *dev);
+} drm_fence_driver_t;
+
+#define _DRM_FENCE_TYPE_EXE 0x00
+
+typedef struct drm_fence_manager{
+ int initialized;
+ rwlock_t lock;
+
+ /*
+ * The list below should be maintained in sequence order and
+ * access is protected by the above spinlock.
+ */
+
+ struct list_head ring;
+ struct list_head *fence_types[32];
+ volatile uint32_t pending_flush;
+ wait_queue_head_t fence_queue;
+ int pending_exe_flush;
+ uint32_t last_exe_flush;
+ uint32_t exe_flush_sequence;
+} drm_fence_manager_t;
+
+
/**
* DRM device structure. This structure represent a complete card that
* may contain multiple heads.
@@ -685,6 +762,7 @@ typedef struct drm_device {
drm_map_list_t *maplist; /**< Linked list of regions */
int map_count; /**< Number of mappable regions */
drm_open_hash_t map_hash; /**< User token hash table for maps */
+ drm_open_hash_t object_hash; /**< User token hash table for objects */
/** \name Context handle management */
/*@{ */
@@ -768,8 +846,21 @@ typedef struct drm_device {
drm_local_map_t *agp_buffer_map;
unsigned int agp_buffer_token;
drm_head_t primary; /**< primary screen head */
+
+ drm_fence_manager_t fm;
+
} drm_device_t;
+#if __OS_HAS_AGP
+typedef struct drm_agp_ttm_priv {
+ DRM_AGP_MEM *mem;
+ struct agp_bridge_data *bridge;
+ unsigned mem_type;
+ int populated;
+} drm_agp_ttm_priv;
+#endif
+
+
static __inline__ int drm_core_check_feature(struct drm_device *dev,
int feature)
{
@@ -809,6 +900,82 @@ static inline int drm_mtrr_del(int handle, unsigned long offset,
#define drm_core_has_MTRR(dev) (0)
#endif
+/*
+ * User space objects and their references.
+ */
+
+#define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
+
+typedef enum {
+ drm_fence_type,
+ drm_buffer_type
+
+ /*
+ * Add other user space object types here.
+ */
+
+} drm_object_type_t;
+
+
+
+
+/*
+ * A user object is a structure that helps the drm give out user handles
+ * to kernel internal objects and to keep track of these objects so that
+ * they can be destroyed, for example when the user space process exits.
+ * Designed to be accessible using a user space 32-bit handle.
+ */
+
+typedef struct drm_user_object{
+ drm_hash_item_t hash;
+ struct list_head list;
+ drm_object_type_t type;
+ atomic_t refcount;
+ int shareable;
+ drm_file_t *owner;
+ void (*ref_struct_locked) (drm_file_t *priv, struct drm_user_object *obj,
+ drm_ref_t ref_action);
+ void (*unref)(drm_file_t *priv, struct drm_user_object *obj,
+ drm_ref_t unref_action);
+ void (*remove)(drm_file_t *priv, struct drm_user_object *obj);
+} drm_user_object_t;
+
+/*
+ * A ref object is a structure which is used to
+ * keep track of references to user objects and to keep track of these
+ * references so that they can be destroyed for example when the user space
+ * process exits. Designed to be accessible using a pointer to the _user_ object.
+ */
+
+
+typedef struct drm_ref_object {
+ drm_hash_item_t hash;
+ struct list_head list;
+ atomic_t refcount;
+ drm_ref_t unref_action;
+} drm_ref_object_t;
+
+typedef struct drm_fence_object{
+ drm_user_object_t base;
+ atomic_t usage;
+
+ /*
+ * The below three fields are protected by the fence manager spinlock.
+ */
+
+ struct list_head ring;
+ int class;
+ volatile uint32_t type;
+ volatile uint32_t signaled;
+ uint32_t sequence;
+ volatile uint32_t flush_mask;
+ volatile uint32_t submitted_flush;
+} drm_fence_object_t;
+
+
+
+
+
/******************************************************************/
/** \name Internal function definitions */
/*@{*/
@@ -837,6 +1004,7 @@ unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
extern unsigned long drm_core_get_map_ofs(drm_map_t * map);
extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev);
+extern pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma);
/* Memory management support (drm_memory.h) */
#include "drm_memory.h"
@@ -915,6 +1083,13 @@ extern int drm_unlock(struct inode *inode, struct file *filp,
extern int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context);
extern int drm_lock_free(drm_device_t * dev,
__volatile__ unsigned int *lock, unsigned int context);
+/*
+ * These are exported to drivers so that they can implement fencing using
+ * DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
+ */
+
+extern int drm_i_have_hw_lock(struct file *filp);
+extern int drm_kernel_take_hw_lock(struct file *filp);
/* Buffer management support (drm_bufs.h) */
extern int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request);
@@ -999,6 +1174,8 @@ extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size
extern int drm_agp_free_memory(DRM_AGP_MEM * handle);
extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start);
extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
+extern drm_ttm_backend_t *drm_agp_init_ttm_cached(struct drm_device *dev);
+extern drm_ttm_backend_t *drm_agp_init_ttm_uncached(struct drm_device *dev);
/* Stub support (drm_stub.h) */
extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
@@ -1058,6 +1235,70 @@ extern int drm_mm_init(drm_mm_t *mm, unsigned long start, unsigned long size);
extern void drm_mm_takedown(drm_mm_t *mm);
+/*
+ * User space object bookkeeping (drm_object.c)
+ */
+
+/*
+ * Must be called with the struct_mutex held.
+ */
+
+extern int drm_add_user_object(drm_file_t *priv, drm_user_object_t *item,
+
+/*
+ * Must be called with the struct_mutex held.
+ */
+ int shareable);
+extern drm_user_object_t *drm_lookup_user_object(drm_file_t *priv, uint32_t key);
+
+/*
+ * Must be called with the struct_mutex held.
+ * If "item" has been obtained by a call to drm_lookup_user_object. You may not
+ * release the struct_mutex before calling drm_remove_ref_object.
+ * This function may temporarily release the struct_mutex.
+ */
+
+extern int drm_remove_user_object(drm_file_t *priv, drm_user_object_t *item);
+
+/*
+ * Must be called with the struct_mutex held. May temporarily release it.
+ */
+
+extern int drm_add_ref_object(drm_file_t *priv, drm_user_object_t *referenced_object,
+ drm_ref_t ref_action);
+
+/*
+ * Must be called with the struct_mutex held.
+ */
+
+drm_ref_object_t *drm_lookup_ref_object(drm_file_t *priv,
+ drm_user_object_t *referenced_object,
+ drm_ref_t ref_action);
+/*
+ * Must be called with the struct_mutex held.
+ * If "item" has been obtained by a call to drm_lookup_ref_object. You may not
+ * release the struct_mutex before calling drm_remove_ref_object.
+ * This function may temporarily release the struct_mutex.
+ */
+
+extern void drm_remove_ref_object(drm_file_t *priv, drm_ref_object_t *item);
+extern int drm_user_object_ref(drm_file_t *priv, uint32_t user_token, drm_object_type_t type,
+ drm_user_object_t **object);
+extern int drm_user_object_unref(drm_file_t *priv, uint32_t user_token, drm_object_type_t type);
+
+
+
+/*
+ * fence objects (drm_fence.c)
+ */
+
+extern void drm_fence_handler(drm_device_t *dev, uint32_t breadcrumb, uint32_t type);
+extern void drm_fence_manager_init(drm_device_t *dev);
+extern void drm_fence_manager_takedown(drm_device_t *dev);
+extern void drm_fence_flush_old(drm_device_t *dev, uint32_t sequence);
+extern int drm_fence_ioctl(DRM_IOCTL_ARGS);
+
+
/* Inline replacements for DRM_IOREMAP macros */
static __inline__ void drm_core_ioremap(struct drm_map *map,
struct drm_device *dev)
diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c
index dce27cdf..e7226f1f 100644
--- a/linux-core/drm_agpsupport.c
+++ b/linux-core/drm_agpsupport.c
@@ -552,4 +552,169 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
return agp_unbind_memory(handle);
}
+/*
+ * AGP ttm backend interface.
+ */
+
+static int drm_agp_needs_cache_adjust_true(drm_ttm_backend_t *backend) {
+ return TRUE;
+}
+
+static int drm_agp_needs_cache_adjust_false(drm_ttm_backend_t *backend) {
+ return FALSE;
+}
+
+#define AGP_MEM_USER (1 << 16)
+#define AGP_MEM_UCACHED (2 << 16)
+
+static int drm_agp_populate(drm_ttm_backend_t *backend, unsigned long num_pages,
+ struct page **pages) {
+
+ drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
+ struct page **cur_page, **last_page = pages + num_pages;
+ DRM_AGP_MEM *mem;
+
+ DRM_DEBUG("drm_agp_populate_ttm\n");
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
+ mem = drm_agp_allocate_memory(num_pages, agp_priv->mem_type);
+#else
+ mem = drm_agp_allocate_memory(agp_priv->bridge, num_pages, agp_priv->mem_type);
+#endif
+ if (!mem)
+ return -1;
+
+ DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count);
+ mem->page_count = 0;
+ for (cur_page = pages; cur_page < last_page; ++cur_page) {
+ mem->memory[mem->page_count++] = page_to_phys(*cur_page);
+ }
+ agp_priv->mem = mem;
+ return 0;
+}
+
+static int drm_agp_bind_ttm(drm_ttm_backend_t *backend, unsigned long offset) {
+
+ drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
+ DRM_AGP_MEM *mem = agp_priv->mem;
+ int ret;
+
+ DRM_DEBUG("drm_agp_bind_ttm\n");
+ mem->is_flushed = FALSE;
+ ret = drm_agp_bind_memory(mem, offset);
+ if (ret) {
+ DRM_ERROR("AGP Bind memory failed\n");
+ }
+ return ret;
+}
+
+static int drm_agp_unbind_ttm(drm_ttm_backend_t *backend) {
+
+ drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
+
+ DRM_DEBUG("drm_agp_unbind_ttm\n");
+ if (agp_priv->mem->is_bound)
+ return drm_agp_unbind_memory(agp_priv->mem);
+ else
+ return 0;
+}
+
+static void drm_agp_clear_ttm(drm_ttm_backend_t *backend) {
+
+ drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
+ DRM_AGP_MEM *mem = agp_priv->mem;
+
+ DRM_DEBUG("drm_agp_clear_ttm\n");
+ if (mem) {
+ if (mem->is_bound) {
+ drm_agp_unbind_memory(mem);
+ }
+ agp_free_memory(mem);
+ }
+ agp_priv->mem = NULL;
+}
+
+static void drm_agp_destroy_ttm(drm_ttm_backend_t *backend) {
+
+ drm_agp_ttm_priv *agp_priv;
+
+ if (backend) {
+ DRM_DEBUG("drm_agp_destroy_ttm\n");
+ agp_priv = (drm_agp_ttm_priv *) backend->private;
+ if (agp_priv) {
+ if (agp_priv->mem) {
+ drm_agp_clear_ttm(backend);
+ }
+ drm_free(agp_priv, sizeof(*agp_priv), DRM_MEM_MAPPINGS);
+ }
+ drm_free(backend, sizeof(*backend), DRM_MEM_MAPPINGS);
+ }
+}
+
+
+drm_ttm_backend_t *drm_agp_init_ttm_uncached(struct drm_device *dev) {
+
+ drm_ttm_backend_t *agp_be;
+ drm_agp_ttm_priv *agp_priv;
+
+ agp_be = drm_calloc(1, sizeof(*agp_be), DRM_MEM_MAPPINGS);
+
+ if (!agp_be)
+ return NULL;
+
+ agp_priv = drm_calloc(1, sizeof(agp_priv), DRM_MEM_MAPPINGS);
+
+ if (!agp_priv) {
+ drm_free(agp_be, sizeof(*agp_be), DRM_MEM_MAPPINGS);
+ return NULL;
+ }
+
+ agp_priv->mem = NULL;
+ agp_priv->mem_type = AGP_MEM_USER;
+ agp_priv->bridge = dev->agp->bridge;
+ agp_priv->populated = FALSE;
+ agp_be->aperture_base = dev->agp->agp_info.aper_base;
+ agp_be->private = (void *) agp_priv;
+ agp_be->needs_cache_adjust = drm_agp_needs_cache_adjust_true;
+ agp_be->populate = drm_agp_populate;
+ agp_be->clear = drm_agp_clear_ttm;
+ agp_be->bind = drm_agp_bind_ttm;
+ agp_be->unbind = drm_agp_unbind_ttm;
+ agp_be->destroy = drm_agp_destroy_ttm;
+ return agp_be;
+}
+EXPORT_SYMBOL(drm_agp_init_ttm_uncached);
+
+drm_ttm_backend_t *drm_agp_init_ttm_cached(struct drm_device *dev) {
+
+ drm_ttm_backend_t *agp_be;
+ drm_agp_ttm_priv *agp_priv;
+
+ agp_be = drm_calloc(1, sizeof(*agp_be), DRM_MEM_MAPPINGS);
+
+ if (!agp_be)
+ return NULL;
+
+ agp_priv = drm_calloc(1, sizeof(agp_priv), DRM_MEM_MAPPINGS);
+
+ if (!agp_priv) {
+ drm_free(agp_be, sizeof(*agp_be), DRM_MEM_MAPPINGS);
+ return NULL;
+ }
+
+ agp_priv->mem = NULL;
+ agp_priv->mem_type = AGP_MEM_UCACHED;
+ agp_priv->bridge = dev->agp->bridge;
+ agp_priv->populated = FALSE;
+ agp_be->aperture_base = dev->agp->agp_info.aper_base;
+ agp_be->private = (void *) agp_priv;
+ agp_be->needs_cache_adjust = drm_agp_needs_cache_adjust_false;
+ agp_be->populate = drm_agp_populate;
+ agp_be->clear = drm_agp_clear_ttm;
+ agp_be->bind = drm_agp_bind_ttm;
+ agp_be->unbind = drm_agp_unbind_ttm;
+ agp_be->destroy = drm_agp_destroy_ttm;
+ return agp_be;
+}
+EXPORT_SYMBOL(drm_agp_init_ttm_cached);
+
#endif /* __OS_HAS_AGP */
diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c
index 13b0b174..29983060 100644
--- a/linux-core/drm_bufs.c
+++ b/linux-core/drm_bufs.c
@@ -422,6 +422,8 @@ int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
dmah.size = map->size;
__drm_pci_free(dev, &dmah);
break;
+ case _DRM_TTM:
+ BUG_ON(1);
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c
new file mode 100644
index 00000000..cdef4b97
--- /dev/null
+++ b/linux-core/drm_compat.c
@@ -0,0 +1,140 @@
+/**************************************************************************
+ *
+ * This kernel module is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ **************************************************************************/
+/*
+ * This code provides access to unexported mm kernel features. It is necessary
+ * to use the new DRM memory manager code with kernels that don't support it
+ * directly.
+ *
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ * Linux kernel mm subsystem authors.
+ * (Most code taken from there).
+ */
+
+#include "drmP.h"
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+
+#ifdef MODULE
+void pgd_clear_bad(pgd_t * pgd)
+{
+ pgd_ERROR(*pgd);
+ pgd_clear(pgd);
+}
+
+void pud_clear_bad(pud_t * pud)
+{
+ pud_ERROR(*pud);
+ pud_clear(pud);
+}
+
+void pmd_clear_bad(pmd_t * pmd)
+{
+ pmd_ERROR(*pmd);
+ pmd_clear(pmd);
+}
+#endif
+
+static inline void change_pte_range(struct mm_struct *mm, pmd_t * pmd,
+ unsigned long addr, unsigned long end)
+{
+ pte_t *pte;
+
+ pte = pte_offset_map(pmd, addr);
+ do {
+ if (pte_present(*pte)) {
+ pte_t ptent;
+ ptent = *pte;
+ ptep_get_and_clear(mm, addr, pte);
+ lazy_mmu_prot_update(ptent);
+ }
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+ pte_unmap(pte - 1);
+}
+
+static inline void change_pmd_range(struct mm_struct *mm, pud_t * pud,
+ unsigned long addr, unsigned long end)
+{
+ pmd_t *pmd;
+ unsigned long next;
+
+ pmd = pmd_offset(pud, addr);
+ do {
+ next = pmd_addr_end(addr, end);
+ if (pmd_none_or_clear_bad(pmd))
+ continue;
+ change_pte_range(mm, pmd, addr, next);
+ } while (pmd++, addr = next, addr != end);
+}
+
+static inline void change_pud_range(struct mm_struct *mm, pgd_t * pgd,
+ unsigned long addr, unsigned long end)
+{
+ pud_t *pud;
+ unsigned long next;
+
+ pud = pud_offset(pgd, addr);
+ do {
+ next = pud_addr_end(addr, end);
+ if (pud_none_or_clear_bad(pud))
+ continue;
+ change_pmd_range(mm, pud, addr, next);
+ } while (pud++, addr = next, addr != end);
+}
+
+/*
+ * This function should be called with all relevant spinlocks held.
+ */
+
+void drm_clear_vma(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ pgd_t *pgd;
+ unsigned long next;
+#if defined(flush_tlb_mm) || !defined(MODULE)
+ unsigned long start = addr;
+#endif
+ BUG_ON(addr >= end);
+ pgd = pgd_offset(mm, addr);
+ flush_cache_range(vma, addr, end);
+ do {
+ next = pgd_addr_end(addr, end);
+ if (pgd_none_or_clear_bad(pgd))
+ continue;
+ change_pud_range(mm, pgd, addr, next);
+ } while (pgd++, addr = next, addr != end);
+#if defined(flush_tlb_mm) || !defined(MODULE)
+ flush_tlb_range(vma, addr, end);
+#endif
+}
+
+pgprot_t drm_prot_map(uint32_t flags)
+{
+#ifdef MODULE
+ static pgprot_t drm_protection_map[16] = {
+ __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
+ __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
+ };
+
+ return drm_protection_map[flags & 0x0F];
+#else
+ extern pgprot_t protection_map[];
+ return protection_map[flags & 0x0F];
+#endif
+};
diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h
index 407853d7..80928319 100644
--- a/linux-core/drm_compat.h
+++ b/linux-core/drm_compat.h
@@ -227,4 +227,22 @@ static inline int remap_pfn_range(struct vm_area_struct *vma, unsigned long from
}
#endif
+#include <linux/mm.h>
+#include <asm/page.h>
+
+/*
+ * Flush relevant caches and clear a VMA structure so that page references
+ * will cause a page fault. Don't flush tlbs.
+ */
+
+extern void drm_clear_vma(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end);
+
+/*
+ * Return the PTE protection map entries for the VMA flags given by
+ * flags. This is a functional interface to the kernel's protection map.
+ */
+
+extern pgprot_t drm_prot_map(uint32_t flags);
+
#endif
diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c
index 9712170b..e6ae690a 100644
--- a/linux-core/drm_drv.c
+++ b/linux-core/drm_drv.c
@@ -119,6 +119,7 @@ static drm_ioctl_desc_t drm_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0},
+ [DRM_IOCTL_NR(DRM_IOCTL_FENCE)] = {drm_fence_ioctl, DRM_AUTH},
};
#define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( drm_ioctls )
@@ -347,10 +348,13 @@ static void __exit drm_cleanup(drm_device_t * dev)
drm_lastclose(dev);
+ drm_fence_manager_takedown(dev);
+
if (dev->maplist) {
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
dev->maplist = NULL;
drm_ht_remove(&dev->map_hash);
+ drm_ht_remove(&dev->object_hash);
}
if (!drm_fb_loaded)
diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c
new file mode 100644
index 00000000..cfcda2b2
--- /dev/null
+++ b/linux-core/drm_fence.c
@@ -0,0 +1,589 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+
+static void drm_fm_update_pointers(drm_fence_manager_t * fm,
+ struct list_head *list, int no_types,
+ uint32_t type)
+{
+ int i;
+ for (i = 0; i < no_types; ++i) {
+ if (type & (1 << i)) {
+ fm->fence_types[i] = list;
+ }
+ }
+}
+
+/*
+ * Typically called by the IRQ handler.
+ */
+
+void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type)
+{
+ int i;
+ int wake = 0;
+ int largest = 0;
+ uint32_t diff;
+ uint32_t relevant;
+ int index = 0;
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_driver_t *driver = dev->driver->fence_driver;
+ struct list_head *list;
+ struct list_head *fence_list;
+ drm_fence_object_t *fence;
+ int found = 0;
+
+ for (i = 0; i < driver->no_types; ++i) {
+ if (!(type & (1 << i)))
+ continue;
+
+ list = fm->fence_types[i];
+ fence_list = list->next;
+
+ if (fence_list == &fm->ring)
+ continue;
+
+ fence = list_entry(fence_list, drm_fence_object_t, ring);
+
+ diff = (sequence - fence->sequence) & driver->sequence_mask;
+
+ if (diff < driver->wrap_diff) {
+ if (diff >= largest) {
+ largest = diff;
+ index = i;
+ found = 1;
+ }
+ }
+ }
+
+ if (!found)
+ return;
+
+ /*
+ * Start with the fence object with the lowest sequence number, affected by
+ * the type mask of this call. Update signaled fields,
+ * Check if we need to wake sleeping processes
+ */
+
+ list = fm->fence_types[index]->next;
+ do {
+ if (list == &fm->ring) {
+ drm_fm_update_pointers(fm, list->prev,
+ driver->no_types, type);
+ break;
+ }
+ fence = list_entry(list, drm_fence_object_t, ring);
+ diff = (sequence - fence->sequence) & driver->sequence_mask;
+ if (diff >= driver->wrap_diff) {
+ drm_fm_update_pointers(fm, fence->ring.prev,
+ driver->no_types, type);
+ break;
+ }
+ relevant = type & fence->type;
+ if ((fence->signaled | relevant) != fence->signaled) {
+ fence->signaled |= relevant;
+ fence->submitted_flush |= relevant;
+ wake = 1;
+ }
+
+ relevant = fence->flush_mask &
+ ~(fence->signaled | fence->submitted_flush);
+ if (relevant) {
+ fm->pending_flush |= relevant;
+ fence->submitted_flush = fence->flush_mask;
+ }
+
+ list = list->next;
+
+ /*
+ * Remove a completely signaled fence from the
+ * fence manager ring.
+ */
+
+ if (!(fence->type & ~fence->signaled)) {
+ fence_list = &fence->ring;
+ for (i = 0; i < driver->no_types; ++i) {
+ if (fm->fence_types[i] == fence_list)
+ fm->fence_types[i] = fence_list->prev;
+ }
+ list_del_init(fence_list);
+ }
+
+ } while (1);
+
+ /*
+ * Wake sleeping processes.
+ */
+
+ if (wake) {
+ DRM_WAKEUP(&fm->fence_queue);
+ }
+}
+
+EXPORT_SYMBOL(drm_fence_handler);
+
+static void drm_fence_unring(drm_device_t * dev, struct list_head *ring)
+{
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_driver_t *driver = dev->driver->fence_driver;
+ unsigned long flags;
+ int i;
+
+ write_lock_irqsave(&fm->lock, flags);
+ for (i = 0; i < driver->no_types; ++i) {
+ if (fm->fence_types[i] == ring)
+ fm->fence_types[i] = ring->prev;
+ }
+ list_del_init(ring);
+ write_unlock_irqrestore(&fm->lock, flags);
+}
+
+void drm_fence_usage_deref_locked(drm_device_t * dev,
+ drm_fence_object_t * fence)
+{
+ if (atomic_dec_and_test(&fence->usage)) {
+ drm_fence_unring(dev, &fence->ring);
+ drm_free(fence, sizeof(*fence), DRM_MEM_FENCE);
+ }
+}
+
+void drm_fence_usage_deref_unlocked(drm_device_t * dev,
+ drm_fence_object_t * fence)
+{
+ if (atomic_dec_and_test(&fence->usage)) {
+ mutex_lock(&dev->struct_mutex);
+ if (atomic_read(&fence->usage) == 0) {
+ drm_fence_unring(dev, &fence->ring);
+ drm_free(fence, sizeof(*fence), DRM_MEM_FENCE);
+ }
+ mutex_unlock(&dev->struct_mutex);
+ }
+}
+
+static void drm_fence_object_destroy(drm_file_t * priv,
+ drm_user_object_t * base)
+{
+ drm_device_t *dev = priv->head->dev;
+ drm_fence_object_t *fence =
+ drm_user_object_entry(base, drm_fence_object_t, base);
+
+ drm_fence_usage_deref_locked(dev, fence);
+}
+
+static int fence_signaled(drm_device_t * dev, drm_fence_object_t * fence,
+ uint32_t mask, int poke_flush)
+{
+ unsigned long flags;
+ int signaled;
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_driver_t *driver = dev->driver->fence_driver;
+
+ if (poke_flush)
+ driver->poke_flush(dev);
+ read_lock_irqsave(&fm->lock, flags);
+ signaled =
+ (fence->type & mask & fence->signaled) == (fence->type & mask);
+ read_unlock_irqrestore(&fm->lock, flags);
+
+ return signaled;
+}
+
+static void drm_fence_flush_exe(drm_fence_manager_t * fm,
+ drm_fence_driver_t * driver, uint32_t sequence)
+{
+ uint32_t diff;
+
+ if (!fm->pending_exe_flush) {
+ struct list_head *list;
+
+ /*
+ * Last_exe_flush is invalid. Find oldest sequence.
+ */
+
+ list = fm->fence_types[_DRM_FENCE_TYPE_EXE];
+ if (list->next == &fm->ring) {
+ return;
+ } else {
+ drm_fence_object_t *fence =
+ list_entry(list->next, drm_fence_object_t, ring);
+ fm->last_exe_flush = (fence->sequence - 1) &
+ driver->sequence_mask;
+ }
+ diff = (sequence - fm->last_exe_flush) & driver->sequence_mask;
+ if (diff >= driver->wrap_diff)
+ return;
+ fm->exe_flush_sequence = sequence;
+ fm->pending_exe_flush = 1;
+ } else {
+ diff =
+ (sequence - fm->exe_flush_sequence) & driver->sequence_mask;
+ if (diff < driver->wrap_diff) {
+ fm->exe_flush_sequence = sequence;
+ }
+ }
+}
+
+/*
+ * Make sure old fence objects are signaled before their fence sequences are
+ * wrapped around and reused.
+ */
+
+static int drm_fence_object_flush(drm_device_t * dev,
+ drm_fence_object_t * fence, uint32_t type)
+{
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_driver_t *driver = dev->driver->fence_driver;
+ unsigned long flags;
+
+ if (type & ~fence->type) {
+ DRM_ERROR("Flush trying to extend fence type\n");
+ return -EINVAL;
+ }
+
+ write_lock_irqsave(&fm->lock, flags);
+ fence->flush_mask |= type;
+ if (fence->submitted_flush == fence->signaled) {
+ if ((fence->type & DRM_FENCE_EXE) &&
+ !(fence->submitted_flush & DRM_FENCE_EXE)) {
+ drm_fence_flush_exe(fm, driver, fence->sequence);
+ fence->submitted_flush |= DRM_FENCE_EXE;
+ } else {
+ fm->pending_flush |= (fence->flush_mask &
+ ~fence->submitted_flush);
+ fence->submitted_flush = fence->flush_mask;
+ }
+ }
+ write_unlock_irqrestore(&fm->lock, flags);
+ driver->poke_flush(dev);
+ return 0;
+}
+
+void drm_fence_flush_old(drm_device_t * dev, uint32_t sequence)
+{
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_driver_t *driver = dev->driver->fence_driver;
+ uint32_t old_sequence;
+ unsigned long flags;
+ drm_fence_object_t *fence;
+ uint32_t diff;
+
+ mutex_lock(&dev->struct_mutex);
+ read_lock_irqsave(&fm->lock, flags);
+ if (fm->ring.next == &fm->ring) {
+ read_unlock_irqrestore(&fm->lock, flags);
+ mutex_unlock(&dev->struct_mutex);
+ return;
+ }
+ old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask;
+ fence = list_entry(fm->ring.next, drm_fence_object_t, ring);
+ atomic_inc(&fence->usage);
+ mutex_unlock(&dev->struct_mutex);
+ diff = (old_sequence - fence->sequence) & driver->sequence_mask;
+ read_unlock_irqrestore(&fm->lock, flags);
+ if (diff < driver->wrap_diff) {
+ drm_fence_object_flush(dev, fence, fence->type);
+ }
+ drm_fence_usage_deref_unlocked(dev, fence);
+}
+
+EXPORT_SYMBOL(drm_fence_flush_old);
+
+static int drm_fence_object_wait(drm_device_t * dev, drm_fence_object_t * fence,
+ int lazy, int ignore_signals, uint32_t mask)
+{
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_driver_t *driver = dev->driver->fence_driver;
+ int ret = 0;
+ unsigned long _end;
+
+ if (mask & ~fence->type) {
+ DRM_ERROR("Wait trying to extend fence type\n");
+ return -EINVAL;
+ }
+
+ if (fence_signaled(dev, fence, mask, 0))
+ return 0;
+
+ _end = jiffies + 3 * DRM_HZ;
+
+ drm_fence_object_flush(dev, fence, mask);
+ if (lazy && driver->lazy_capable) {
+ do {
+ DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ,
+ fence_signaled(dev, fence, mask, 1));
+ if (time_after_eq(jiffies, _end))
+ break;
+ } while (ret == -EINTR && ignore_signals);
+
+ if (time_after_eq(jiffies, _end) && (ret != 0))
+ ret = -EBUSY;
+ return ret;
+
+ } else {
+ int signaled;
+ do {
+ signaled = fence_signaled(dev, fence, mask, 1);
+ } while (!signaled && !time_after_eq(jiffies, _end));
+ if (!signaled)
+ return -EBUSY;
+ }
+ return 0;
+}
+
+int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence,
+ uint32_t type)
+{
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_driver_t *driver = dev->driver->fence_driver;
+ unsigned long flags;
+ uint32_t sequence;
+ int ret;
+
+ drm_fence_unring(dev, &fence->ring);
+ ret = driver->emit(dev, &sequence);
+ if (ret)
+ return ret;
+
+ write_lock_irqsave(&fm->lock, flags);
+ fence->type = type;
+ fence->flush_mask = 0x00;
+ fence->submitted_flush = 0x00;
+ fence->signaled = 0x00;
+ fence->sequence = sequence;
+ list_add_tail(&fence->ring, &fm->ring);
+ write_unlock_irqrestore(&fm->lock, flags);
+ return 0;
+}
+
+int drm_fence_object_init(drm_device_t * dev, uint32_t type, int emit,
+ drm_fence_object_t * fence)
+{
+ int ret = 0;
+ unsigned long flags;
+ drm_fence_manager_t *fm = &dev->fm;
+
+ mutex_lock(&dev->struct_mutex);
+ atomic_set(&fence->usage, 1);
+ mutex_unlock(&dev->struct_mutex);
+
+ write_lock_irqsave(&fm->lock, flags);
+ INIT_LIST_HEAD(&fence->ring);
+ fence->class = 0;
+ fence->type = type;
+ fence->flush_mask = 0;
+ fence->submitted_flush = 0;
+ fence->signaled = 0;
+ fence->sequence = 0;
+ write_unlock_irqrestore(&fm->lock, flags);
+ if (emit) {
+ ret = drm_fence_object_emit(dev, fence, type);
+ }
+ return ret;
+}
+
+EXPORT_SYMBOL(drm_fence_object_init);
+
+static int drm_fence_object_create(drm_file_t * priv, uint32_t type,
+ int emit, int shareable,
+ uint32_t * user_handle,
+ drm_fence_object_t ** c_fence)
+{
+ drm_device_t *dev = priv->head->dev;
+ int ret;
+ drm_fence_object_t *fence;
+
+ fence = drm_calloc(1, sizeof(*fence), DRM_MEM_FENCE);
+ if (!fence)
+ return -ENOMEM;
+ ret = drm_fence_object_init(dev, type, emit, fence);
+ if (ret) {
+ drm_fence_usage_deref_unlocked(dev, fence);
+ return ret;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ ret = drm_add_user_object(priv, &fence->base, shareable);
+ mutex_unlock(&dev->struct_mutex);
+ if (ret) {
+ drm_fence_usage_deref_unlocked(dev, fence);
+ *c_fence = NULL;
+ *user_handle = 0;
+ return ret;
+ }
+ fence->base.type = drm_fence_type;
+ fence->base.remove = &drm_fence_object_destroy;
+ *user_handle = fence->base.hash.key;
+ *c_fence = fence;
+
+ return 0;
+}
+
+void drm_fence_manager_init(drm_device_t * dev)
+{
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_driver_t *fed = dev->driver->fence_driver;
+ int i;
+
+ fm->lock = RW_LOCK_UNLOCKED;
+ INIT_LIST_HEAD(&fm->ring);
+ fm->pending_flush = 0;
+ DRM_INIT_WAITQUEUE(&fm->fence_queue);
+ fm->initialized = 0;
+ if (fed) {
+ fm->initialized = 1;
+ for (i = 0; i < fed->no_types; ++i) {
+ fm->fence_types[i] = &fm->ring;
+ }
+ }
+}
+
+void drm_fence_manager_takedown(drm_device_t * dev)
+{
+}
+
+drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle)
+{
+ drm_device_t *dev = priv->head->dev;
+ drm_user_object_t *uo;
+ drm_fence_object_t *fence;
+
+ mutex_lock(&dev->struct_mutex);
+ uo = drm_lookup_user_object(priv, handle);
+ if (!uo || (uo->type != drm_fence_type)) {
+ mutex_unlock(&dev->struct_mutex);
+ return NULL;
+ }
+ fence = drm_user_object_entry(uo, drm_fence_object_t, base);
+ atomic_inc(&fence->usage);
+ mutex_unlock(&dev->struct_mutex);
+ return fence;
+}
+
+int drm_fence_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ int ret;
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_arg_t arg;
+ drm_fence_object_t *fence;
+ drm_user_object_t *uo;
+ unsigned long flags;
+ ret = 0;
+
+ if (!fm->initialized) {
+ DRM_ERROR("The DRM driver does not support fencing.\n");
+ return -EINVAL;
+ }
+
+ DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
+ switch (arg.op) {
+ case drm_fence_create:{
+ int emit = arg.flags & DRM_FENCE_FLAG_EMIT;
+ if (emit)
+ LOCK_TEST_WITH_RETURN(dev, filp);
+ ret =
+ drm_fence_object_create(priv, arg.type,
+ emit,
+ arg.
+ flags &
+ DRM_FENCE_FLAG_SHAREABLE,
+ &arg.handle, &fence);
+ if (ret)
+ return ret;
+ mutex_lock(&dev->struct_mutex);
+ atomic_inc(&fence->usage);
+ mutex_unlock(&dev->struct_mutex);
+ break;
+ }
+ case drm_fence_destroy:
+ mutex_lock(&dev->struct_mutex);
+ uo = drm_lookup_user_object(priv, arg.handle);
+ if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+ ret = drm_remove_user_object(priv, uo);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ case drm_fence_reference:
+ ret =
+ drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo);
+ if (ret)
+ return ret;
+ fence = drm_lookup_fence_object(priv, arg.handle);
+ break;
+ case drm_fence_unreference:
+ ret = drm_user_object_unref(priv, arg.handle, drm_fence_type);
+ return ret;
+ case drm_fence_signaled:
+ fence = drm_lookup_fence_object(priv, arg.handle);
+ if (!fence)
+ return -EINVAL;
+ break;
+ case drm_fence_flush:
+ fence = drm_lookup_fence_object(priv, arg.handle);
+ if (!fence)
+ return -EINVAL;
+ ret = drm_fence_object_flush(dev, fence, arg.type);
+ break;
+ case drm_fence_wait:
+ fence = drm_lookup_fence_object(priv, arg.handle);
+ if (!fence)
+ return -EINVAL;
+ ret =
+ drm_fence_object_wait(dev, fence,
+ arg.flags & DRM_FENCE_FLAG_WAIT_LAZY,
+ arg.
+ flags &
+ DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS,
+ arg.type);
+ break;
+ case drm_fence_emit:
+ LOCK_TEST_WITH_RETURN(dev, filp);
+ fence = drm_lookup_fence_object(priv, arg.handle);
+ if (!fence)
+ return -EINVAL;
+ ret = drm_fence_object_emit(dev, fence, arg.type);
+ break;
+ default:
+ return -EINVAL;
+ }
+ read_lock_irqsave(&fm->lock, flags);
+ arg.class = fence->class;
+ arg.type = fence->type;
+ arg.signaled = fence->signaled;
+ read_unlock_irqrestore(&fm->lock, flags);
+ drm_fence_usage_deref_unlocked(dev, fence);
+
+ DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
+ return ret;
+}
diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c
index 691edff9..10516bdd 100644
--- a/linux-core/drm_fops.c
+++ b/linux-core/drm_fops.c
@@ -47,6 +47,7 @@ static int drm_setup(drm_device_t * dev)
int i;
int ret;
+
if (dev->driver->firstopen) {
ret = dev->driver->firstopen(dev);
if (ret != 0)
@@ -56,6 +57,7 @@ static int drm_setup(drm_device_t * dev)
dev->magicfree.next = NULL;
/* prebuild the SAREA */
+
i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, _DRM_CONTAINS_LOCK, &map);
if (i != 0)
return i;
@@ -233,6 +235,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
int minor = iminor(inode);
drm_file_t *priv;
int ret;
+ int i,j;
if (filp->f_flags & O_EXCL)
return -EBUSY; /* No exclusive opens */
@@ -256,6 +259,22 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
priv->authenticated = capable(CAP_SYS_ADMIN);
priv->lock_count = 0;
+ INIT_LIST_HEAD(&priv->user_objects);
+ INIT_LIST_HEAD(&priv->refd_objects);
+
+ for (i=0; i<_DRM_NO_REF_TYPES; ++i) {
+ ret = drm_ht_create(&priv->refd_object_hash[i], DRM_FILE_HASH_ORDER);
+ if (ret)
+ break;
+ }
+
+ if (ret) {
+ for(j=0; j<i; ++j) {
+ drm_ht_remove(&priv->refd_object_hash[j]);
+ }
+ goto out_free;
+ }
+
if (dev->driver->open) {
ret = dev->driver->open(dev, priv);
if (ret < 0)
@@ -320,6 +339,53 @@ int drm_fasync(int fd, struct file *filp, int on)
}
EXPORT_SYMBOL(drm_fasync);
+static void drm_object_release(struct file *filp) {
+
+ drm_file_t *priv = filp->private_data;
+ struct list_head *head;
+ drm_user_object_t *user_object;
+ drm_ref_object_t *ref_object;
+ int i;
+
+ /*
+ * Free leftover ref objects created by me. Note that we cannot use
+ * list_for_each() here, as the struct_mutex may be temporarily released
+ * by the remove_() functions, and thus the lists may be altered.
+ * Also, a drm_remove_ref_object() will not remove it
+ * from the list unless its refcount is 1.
+ */
+
+ head = &priv->refd_objects;
+ while (head->next != head) {
+ ref_object = list_entry(head->next, drm_ref_object_t, list);
+ drm_remove_ref_object(priv, ref_object);
+ head = &priv->refd_objects;
+ }
+
+ /*
+ * Free leftover user objects created by me.
+ */
+
+ head = &priv->user_objects;
+ while (head->next != head) {
+ user_object = list_entry(head->next, drm_user_object_t, list);
+ drm_remove_user_object(priv, user_object);
+ head = &priv->user_objects;
+ }
+
+
+
+
+ for(i=0; i<_DRM_NO_REF_TYPES; ++i) {
+ drm_ht_remove(&priv->refd_object_hash[i]);
+ }
+}
+
+
+
+
+
+
/**
* Release file.
*
@@ -354,58 +420,24 @@ int drm_release(struct inode *inode, struct file *filp)
current->pid, (long)old_encode_dev(priv->head->device),
dev->open_count);
- if (priv->lock_count && dev->lock.hw_lock &&
- _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
- dev->lock.filp == filp) {
+ if (dev->driver->reclaim_buffers_locked) {
+ retcode = drm_kernel_take_hw_lock(filp);
+ if (!retcode) {
+ dev->driver->reclaim_buffers_locked(dev, filp);
+
+ drm_lock_free(dev, &dev->lock.hw_lock->lock,
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+ }
+
+ } else if (drm_i_have_hw_lock(filp)) {
DRM_DEBUG("File %p released, freeing lock for context %d\n",
filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
- if (dev->driver->reclaim_buffers_locked)
- dev->driver->reclaim_buffers_locked(dev, filp);
-
drm_lock_free(dev, &dev->lock.hw_lock->lock,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
-
- /* FIXME: may require heavy-handed reset of
- hardware at this point, possibly
- processed via a callback to the X
- server. */
- } else if (dev->driver->reclaim_buffers_locked && priv->lock_count
- && dev->lock.hw_lock) {
- /* The lock is required to reclaim buffers */
- DECLARE_WAITQUEUE(entry, current);
-
- add_wait_queue(&dev->lock.lock_queue, &entry);
- for (;;) {
- __set_current_state(TASK_INTERRUPTIBLE);
- if (!dev->lock.hw_lock) {
- /* Device has been unregistered */
- retcode = -EINTR;
- break;
- }
- if (drm_lock_take(&dev->lock.hw_lock->lock,
- DRM_KERNEL_CONTEXT)) {
- dev->lock.filp = filp;
- dev->lock.lock_time = jiffies;
- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
- break; /* Got lock */
- }
- /* Contention */
- schedule();
- if (signal_pending(current)) {
- retcode = -ERESTARTSYS;
- break;
- }
- }
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&dev->lock.lock_queue, &entry);
- if (!retcode) {
- dev->driver->reclaim_buffers_locked(dev, filp);
- drm_lock_free(dev, &dev->lock.hw_lock->lock,
- DRM_KERNEL_CONTEXT);
- }
}
+
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
!dev->driver->reclaim_buffers_locked) {
dev->driver->reclaim_buffers(dev, filp);
@@ -435,6 +467,7 @@ int drm_release(struct inode *inode, struct file *filp)
mutex_unlock(&dev->ctxlist_mutex);
mutex_lock(&dev->struct_mutex);
+ drm_object_release(filp);
if (priv->remove_auth_on_close == 1) {
drm_file_t *temp = dev->file_first;
while (temp) {
diff --git a/linux-core/drm_hashtab.c b/linux-core/drm_hashtab.c
index a0b2d680..63ee5f91 100644
--- a/linux-core/drm_hashtab.c
+++ b/linux-core/drm_hashtab.c
@@ -43,7 +43,16 @@ int drm_ht_create(drm_open_hash_t *ht, unsigned int order)
ht->size = 1 << order;
ht->order = order;
ht->fill = 0;
- ht->table = vmalloc(ht->size*sizeof(*ht->table));
+ ht->table = NULL;
+ ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > 4*PAGE_SIZE);
+ if (!ht->use_vmalloc) {
+ ht->table = drm_calloc(ht->size, sizeof(*ht->table),
+ DRM_MEM_HASHTAB);
+ }
+ if (!ht->table) {
+ ht->use_vmalloc = 1;
+ ht->table = vmalloc(ht->size*sizeof(*ht->table));
+ }
if (!ht->table) {
DRM_ERROR("Out of memory for hash table\n");
return -ENOMEM;
@@ -183,7 +192,11 @@ int drm_ht_remove_item(drm_open_hash_t *ht, drm_hash_item_t *item)
void drm_ht_remove(drm_open_hash_t *ht)
{
if (ht->table) {
- vfree(ht->table);
+ if (ht->use_vmalloc)
+ vfree(ht->table);
+ else
+ drm_free(ht->table, ht->size*sizeof(*ht->table),
+ DRM_MEM_HASHTAB);
ht->table = NULL;
}
}
diff --git a/linux-core/drm_hashtab.h b/linux-core/drm_hashtab.h
index 40afec05..613091c9 100644
--- a/linux-core/drm_hashtab.h
+++ b/linux-core/drm_hashtab.h
@@ -47,6 +47,7 @@ typedef struct drm_open_hash{
unsigned int order;
unsigned int fill;
struct hlist_head *table;
+ int use_vmalloc;
} drm_open_hash_t;
diff --git a/linux-core/drm_lock.c b/linux-core/drm_lock.c
index a268d8ee..c12e4897 100644
--- a/linux-core/drm_lock.c
+++ b/linux-core/drm_lock.c
@@ -308,3 +308,60 @@ static int drm_notifier(void *priv)
} while (prev != old);
return 0;
}
+
+/*
+ * Can be used by drivers to take the hardware lock if necessary.
+ * (Waiting for idle before reclaiming buffers etc.)
+ */
+
+int drm_i_have_hw_lock(struct file *filp)
+{
+ DRM_DEVICE;
+
+ return (priv->lock_count && dev->lock.hw_lock &&
+ _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
+ dev->lock.filp == filp);
+}
+
+EXPORT_SYMBOL(drm_i_have_hw_lock);
+
+int drm_kernel_take_hw_lock(struct file *filp)
+{
+ DRM_DEVICE;
+
+ int ret = 0;
+
+ if (!drm_i_have_hw_lock(filp)) {
+
+ DECLARE_WAITQUEUE(entry, current);
+
+ add_wait_queue(&dev->lock.lock_queue, &entry);
+ for (;;) {
+ __set_current_state(TASK_INTERRUPTIBLE);
+ if (!dev->lock.hw_lock) {
+ /* Device has been unregistered */
+ ret = -EINTR;
+ break;
+ }
+ if (drm_lock_take(&dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT)) {
+ dev->lock.filp = filp;
+ dev->lock.lock_time = jiffies;
+ atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
+ break; /* Got lock */
+ }
+ /* Contention */
+ schedule();
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ }
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&dev->lock.lock_queue, &entry);
+ }
+ return ret;
+}
+
+EXPORT_SYMBOL(drm_kernel_take_hw_lock);
+
diff --git a/linux-core/drm_object.c b/linux-core/drm_object.c
new file mode 100644
index 00000000..b928c01e
--- /dev/null
+++ b/linux-core/drm_object.c
@@ -0,0 +1,289 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+
+#include "drmP.h"
+
+int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item,
+ int shareable)
+{
+ drm_device_t *dev = priv->head->dev;
+ int ret;
+
+ atomic_set(&item->refcount, 1);
+ item->shareable = shareable;
+ item->owner = priv;
+
+ ret = drm_ht_just_insert_please(&dev->object_hash, &item->hash,
+ (unsigned long)item, 32, 0, 0);
+ if (ret)
+ return ret;
+
+ list_add_tail(&item->list, &priv->user_objects);
+ return 0;
+}
+
+drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, uint32_t key)
+{
+ drm_device_t *dev = priv->head->dev;
+ drm_hash_item_t *hash;
+ int ret;
+ drm_user_object_t *item;
+
+ ret = drm_ht_find_item(&dev->object_hash, key, &hash);
+ if (ret) {
+ return NULL;
+ }
+ item = drm_hash_entry(hash, drm_user_object_t, hash);
+
+ if (priv != item->owner) {
+ drm_open_hash_t *ht = &priv->refd_object_hash[_DRM_REF_USE];
+ ret = drm_ht_find_item(ht, (unsigned long)item, &hash);
+ if (ret) {
+ DRM_ERROR("Object not registered for usage\n");
+ return NULL;
+ }
+ }
+ return item;
+}
+
+static void drm_deref_user_object(drm_file_t * priv, drm_user_object_t * item)
+{
+ drm_device_t *dev = priv->head->dev;
+ int ret;
+
+ if (atomic_dec_and_test(&item->refcount)) {
+ ret = drm_ht_remove_item(&dev->object_hash, &item->hash);
+ BUG_ON(ret);
+ list_del_init(&item->list);
+ item->remove(priv, item);
+ }
+}
+
+int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item)
+{
+ if (item->owner != priv) {
+ DRM_ERROR("Cannot destroy object not owned by you.\n");
+ return -EINVAL;
+ }
+ item->owner = 0;
+ item->shareable = 0;
+ list_del_init(&item->list);
+ drm_deref_user_object(priv, item);
+ return 0;
+}
+
+static int drm_object_ref_action(drm_file_t * priv, drm_user_object_t * ro,
+ drm_ref_t action)
+{
+ int ret = 0;
+
+ switch (action) {
+ case _DRM_REF_USE:
+ atomic_inc(&ro->refcount);
+ break;
+ default:
+ if (!ro->ref_struct_locked) {
+ DRM_ERROR("Register object called without register"
+ " capabilities\n");
+ ret = -EINVAL;
+ break;
+ } else {
+ ro->ref_struct_locked(priv, ro, action);
+ }
+ }
+ return ret;
+}
+
+int drm_add_ref_object(drm_file_t * priv, drm_user_object_t * referenced_object,
+ drm_ref_t ref_action)
+{
+ int ret = 0;
+ drm_ref_object_t *item;
+ drm_open_hash_t *ht = &priv->refd_object_hash[ref_action];
+
+ if (!referenced_object->shareable && priv != referenced_object->owner) {
+ DRM_ERROR("Not allowed to reference this object\n");
+ return -EINVAL;
+ }
+
+ /*
+ * If this is not a usage reference, Check that usage has been registered
+ * first. Otherwise strange things may happen on destruction.
+ */
+
+ if ((ref_action != _DRM_REF_USE) && priv != referenced_object->owner) {
+ item =
+ drm_lookup_ref_object(priv, referenced_object,
+ _DRM_REF_USE);
+ if (!item) {
+ DRM_ERROR
+ ("Object not registered for usage by this client\n");
+ return -EINVAL;
+ }
+ }
+
+ if (NULL !=
+ (item =
+ drm_lookup_ref_object(priv, referenced_object, ref_action))) {
+ atomic_inc(&item->refcount);
+ return drm_object_ref_action(priv, referenced_object,
+ ref_action);
+ }
+
+ item = drm_calloc(1, sizeof(*item), DRM_MEM_OBJECTS);
+ if (item == NULL) {
+ DRM_ERROR("Could not allocate reference object\n");
+ return -ENOMEM;
+ }
+
+ atomic_set(&item->refcount, 1);
+ item->hash.key = (unsigned long)referenced_object;
+ ret = drm_ht_insert_item(ht, &item->hash);
+
+ if (ret)
+ goto out;
+
+ list_add(&item->list, &priv->refd_objects);
+ ret = drm_object_ref_action(priv, referenced_object, ref_action);
+ out:
+ return ret;
+}
+
+drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv,
+ drm_user_object_t * referenced_object,
+ drm_ref_t ref_action)
+{
+ drm_hash_item_t *hash;
+ int ret;
+
+ ret = drm_ht_find_item(&priv->refd_object_hash[ref_action],
+ (unsigned long)referenced_object, &hash);
+ if (ret)
+ return NULL;
+
+ return drm_hash_entry(hash, drm_ref_object_t, hash);
+}
+
+static void drm_remove_other_references(drm_file_t * priv,
+ drm_user_object_t * ro)
+{
+ int i;
+ drm_open_hash_t *ht;
+ drm_hash_item_t *hash;
+ drm_ref_object_t *item;
+
+ for (i = _DRM_REF_USE + 1; i < _DRM_NO_REF_TYPES; ++i) {
+ ht = &priv->refd_object_hash[i];
+ while (!drm_ht_find_item(ht, (unsigned long)ro, &hash)) {
+ item = drm_hash_entry(hash, drm_ref_object_t, hash);
+ drm_remove_ref_object(priv, item);
+ }
+ }
+}
+
+void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item)
+{
+ int ret;
+ drm_user_object_t *user_object = (drm_user_object_t *) item->hash.key;
+ drm_open_hash_t *ht = &priv->refd_object_hash[item->unref_action];
+ drm_ref_t unref_action;
+
+ unref_action = item->unref_action;
+ if (atomic_dec_and_test(&item->refcount)) {
+ ret = drm_ht_remove_item(ht, &item->hash);
+ BUG_ON(ret);
+ list_del_init(&item->list);
+ if (unref_action == _DRM_REF_USE)
+ drm_remove_other_references(priv, user_object);
+ drm_free(item, sizeof(*item), DRM_MEM_OBJECTS);
+ }
+
+ switch (unref_action) {
+ case _DRM_REF_USE:
+ drm_deref_user_object(priv, user_object);
+ break;
+ default:
+ BUG_ON(!user_object->unref);
+ user_object->unref(priv, user_object, unref_action);
+ break;
+ }
+
+}
+
+int drm_user_object_ref(drm_file_t * priv, uint32_t user_token,
+ drm_object_type_t type, drm_user_object_t ** object)
+{
+ drm_device_t *dev = priv->head->dev;
+ drm_user_object_t *uo;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+ uo = drm_lookup_user_object(priv, user_token);
+ if (!uo || (uo->type != type)) {
+ ret = -EINVAL;
+ goto out_err;
+ }
+ ret = drm_add_ref_object(priv, uo, _DRM_REF_USE);
+ if (ret)
+ goto out_err;
+ mutex_unlock(&dev->struct_mutex);
+ *object = uo;
+ DRM_ERROR("Referenced an object\n");
+ return 0;
+ out_err:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int drm_user_object_unref(drm_file_t * priv, uint32_t user_token,
+ drm_object_type_t type)
+{
+ drm_device_t *dev = priv->head->dev;
+ drm_user_object_t *uo;
+ drm_ref_object_t *ro;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+ uo = drm_lookup_user_object(priv, user_token);
+ if (!uo || (uo->type != type)) {
+ ret = -EINVAL;
+ goto out_err;
+ }
+ ro = drm_lookup_ref_object(priv, uo, _DRM_REF_USE);
+ if (!ro) {
+ ret = -EINVAL;
+ goto out_err;
+ }
+ drm_remove_ref_object(priv, ro);
+ mutex_unlock(&dev->struct_mutex);
+ DRM_ERROR("Unreferenced an object\n");
+ return 0;
+ out_err:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c
index 25bb5f33..6182141a 100644
--- a/linux-core/drm_stub.c
+++ b/linux-core/drm_stub.c
@@ -87,6 +87,12 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
return -ENOMEM;
}
+ if (drm_ht_create(&dev->object_hash, 12)) {
+ drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
+ drm_ht_remove(&dev->map_hash);
+ return -ENOMEM;
+ }
+
/* the DRM has 6 counters */
dev->counters = 6;
dev->types[0] = _DRM_STAT_LOCK;
@@ -127,6 +133,7 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
goto error_out_unreg;
}
+ drm_fence_manager_init(dev);
return 0;
error_out_unreg:
diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c
new file mode 100644
index 00000000..df4c312c
--- /dev/null
+++ b/linux-core/drm_ttm.c
@@ -0,0 +1,802 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+
+#include "drmP.h"
+#include <asm/tlbflush.h>
+
+typedef struct p_mm_entry {
+ struct list_head head;
+ struct mm_struct *mm;
+ atomic_t refcount;
+} p_mm_entry_t;
+
+typedef struct drm_val_action {
+ int needs_rx_flush;
+ int evicted_tt;
+ int evicted_vram;
+ int validated;
+} drm_val_action_t;
+
+/*
+ * We may be manipulating other processes page tables, so for each TTM, keep track of
+ * which mm_structs are currently mapping the ttm so that we can take the appropriate
+ * locks when we modify their page tables. A typical application is when we evict another
+ * process' buffers.
+ */
+
+int drm_ttm_add_mm_to_list(drm_ttm_t * ttm, struct mm_struct *mm)
+{
+ p_mm_entry_t *entry, *n_entry;
+
+ list_for_each_entry(entry, &ttm->p_mm_list, head) {
+ if (mm == entry->mm) {
+ atomic_inc(&entry->refcount);
+ return 0;
+ } else if ((unsigned long)mm < (unsigned long)entry->mm) ;
+ }
+
+ n_entry = drm_alloc(sizeof(*n_entry), DRM_MEM_TTM);
+ if (!entry) {
+ DRM_ERROR("Allocation of process mm pointer entry failed\n");
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&n_entry->head);
+ n_entry->mm = mm;
+ atomic_set(&n_entry->refcount, 0);
+ atomic_inc(&ttm->shared_count);
+ ttm->mm_list_seq++;
+
+ list_add_tail(&n_entry->head, &entry->head);
+
+ return 0;
+}
+
+void drm_ttm_delete_mm(drm_ttm_t * ttm, struct mm_struct *mm)
+{
+ p_mm_entry_t *entry, *n;
+ list_for_each_entry_safe(entry, n, &ttm->p_mm_list, head) {
+ if (mm == entry->mm) {
+ if (atomic_add_negative(-1, &entry->refcount)) {
+ list_del(&entry->head);
+ drm_free(entry, sizeof(*entry), DRM_MEM_TTM);
+ atomic_dec(&ttm->shared_count);
+ ttm->mm_list_seq++;
+ }
+ return;
+ }
+ }
+ BUG_ON(1);
+}
+
+static void drm_ttm_lock_mm(drm_ttm_t * ttm, int mm_sem, int page_table)
+{
+ p_mm_entry_t *entry;
+
+ list_for_each_entry(entry, &ttm->p_mm_list, head) {
+ if (mm_sem) {
+ down_write(&entry->mm->mmap_sem);
+ }
+ if (page_table) {
+ spin_lock(&entry->mm->page_table_lock);
+ }
+ }
+}
+
+static void drm_ttm_unlock_mm(drm_ttm_t * ttm, int mm_sem, int page_table)
+{
+ p_mm_entry_t *entry;
+
+ list_for_each_entry(entry, &ttm->p_mm_list, head) {
+ if (page_table) {
+ spin_unlock(&entry->mm->page_table_lock);
+ }
+ if (mm_sem) {
+ up_write(&entry->mm->mmap_sem);
+ }
+ }
+}
+
+static int ioremap_vmas(drm_ttm_t * ttm, unsigned long page_offset,
+ unsigned long num_pages, unsigned long aper_offset)
+{
+ struct list_head *list;
+ int ret = 0;
+
+ list_for_each(list, &ttm->vma_list->head) {
+ drm_ttm_vma_list_t *entry =
+ list_entry(list, drm_ttm_vma_list_t, head);
+
+ ret = io_remap_pfn_range(entry->vma,
+ entry->vma->vm_start +
+ (page_offset << PAGE_SHIFT),
+ (ttm->aperture_base >> PAGE_SHIFT) +
+ aper_offset, num_pages << PAGE_SHIFT,
+ drm_io_prot(_DRM_AGP, entry->vma));
+ if (ret)
+ break;
+ }
+ global_flush_tlb();
+ return ret;
+}
+
+/*
+ * Unmap all vma pages from vmas mapping this ttm.
+ */
+
+static int unmap_vma_pages(drm_ttm_t * ttm, unsigned long page_offset,
+ unsigned long num_pages)
+{
+ struct list_head *list;
+ struct page **first_page = ttm->pages + page_offset;
+ struct page **last_page = ttm->pages + (page_offset + num_pages);
+ struct page **cur_page;
+#if !defined(flush_tlb_mm) && defined(MODULE)
+ int flush_tlb = 0;
+#endif
+ list_for_each(list, &ttm->vma_list->head) {
+ drm_ttm_vma_list_t *entry =
+ list_entry(list, drm_ttm_vma_list_t, head);
+ drm_clear_vma(entry->vma,
+ entry->vma->vm_start +
+ (page_offset << PAGE_SHIFT),
+ entry->vma->vm_start +
+ ((page_offset + num_pages) << PAGE_SHIFT));
+#if !defined(flush_tlb_mm) && defined(MODULE)
+ flush_tlb = 1;
+#endif
+ }
+#if !defined(flush_tlb_mm) && defined(MODULE)
+ if (flush_tlb)
+ global_flush_tlb();
+#endif
+
+ for (cur_page = first_page; cur_page != last_page; ++cur_page) {
+ if (page_mapcount(*cur_page) != 0) {
+ DRM_ERROR("Mapped page detected. Map count is %d\n",
+ page_mapcount(*cur_page));
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Free all resources associated with a ttm.
+ */
+
+int drm_destroy_ttm(drm_ttm_t * ttm)
+{
+
+ int i;
+ struct list_head *list, *next;
+ struct page **cur_page;
+
+ if (!ttm)
+ return 0;
+
+ if (atomic_read(&ttm->vma_count) > 0) {
+ ttm->destroy = 1;
+ DRM_DEBUG("VMAs are still alive. Skipping destruction.\n");
+ return -EBUSY;
+ }
+
+ if (ttm->be_list) {
+ list_for_each_safe(list, next, &ttm->be_list->head) {
+ drm_ttm_backend_list_t *entry =
+ list_entry(list, drm_ttm_backend_list_t, head);
+ drm_destroy_ttm_region(entry);
+ }
+
+ drm_free(ttm->be_list, sizeof(*ttm->be_list), DRM_MEM_TTM);
+ ttm->be_list = NULL;
+ }
+
+ if (ttm->pages) {
+ for (i = 0; i < ttm->num_pages; ++i) {
+ cur_page = ttm->pages + i;
+ if (ttm->page_flags &&
+ (ttm->page_flags[i] & DRM_TTM_PAGE_UNCACHED) &&
+ *cur_page && !PageHighMem(*cur_page)) {
+ change_page_attr(*cur_page, 1, PAGE_KERNEL);
+ }
+ if (*cur_page) {
+ ClearPageReserved(*cur_page);
+ __free_page(*cur_page);
+ }
+ }
+ global_flush_tlb();
+ vfree(ttm->pages);
+ ttm->pages = NULL;
+ }
+
+ if (ttm->page_flags) {
+ vfree(ttm->page_flags);
+ ttm->page_flags = NULL;
+ }
+
+ if (ttm->vma_list) {
+ list_for_each_safe(list, next, &ttm->vma_list->head) {
+ drm_ttm_vma_list_t *entry =
+ list_entry(list, drm_ttm_vma_list_t, head);
+ list_del(list);
+ entry->vma->vm_private_data = NULL;
+ drm_free(entry, sizeof(*entry), DRM_MEM_TTM);
+ }
+ drm_free(ttm->vma_list, sizeof(*ttm->vma_list), DRM_MEM_TTM);
+ ttm->vma_list = NULL;
+ }
+
+ drm_free(ttm, sizeof(*ttm), DRM_MEM_TTM);
+
+ return 0;
+}
+
+/*
+ * Initialize a ttm.
+ * FIXME: Avoid using vmalloc for the page- and page_flags tables?
+ */
+
+static drm_ttm_t *drm_init_ttm(struct drm_device * dev, unsigned long size)
+{
+
+ drm_ttm_t *ttm;
+
+ if (!dev->driver->bo_driver)
+ return NULL;
+
+ ttm = drm_calloc(1, sizeof(*ttm), DRM_MEM_TTM);
+ if (!ttm)
+ return NULL;
+
+ ttm->lhandle = 0;
+ atomic_set(&ttm->vma_count, 0);
+ ttm->destroy = 0;
+ ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ ttm->page_flags = vmalloc(ttm->num_pages * sizeof(*ttm->page_flags));
+ if (!ttm->page_flags) {
+ drm_destroy_ttm(ttm);
+ DRM_ERROR("Failed allocating page_flags table\n");
+ return NULL;
+ }
+ memset(ttm->page_flags, 0, ttm->num_pages * sizeof(*ttm->page_flags));
+
+ ttm->pages = vmalloc(ttm->num_pages * sizeof(*ttm->pages));
+ if (!ttm->pages) {
+ drm_destroy_ttm(ttm);
+ DRM_ERROR("Failed allocating page table\n");
+ return NULL;
+ }
+ memset(ttm->pages, 0, ttm->num_pages * sizeof(*ttm->pages));
+
+ ttm->be_list = drm_calloc(1, sizeof(*ttm->be_list), DRM_MEM_TTM);
+ if (!ttm->be_list) {
+ DRM_ERROR("Alloc be regions failed\n");
+ drm_destroy_ttm(ttm);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&ttm->be_list->head);
+ INIT_LIST_HEAD(&ttm->p_mm_list);
+ atomic_set(&ttm->shared_count, 0);
+ ttm->mm_list_seq = 0;
+
+ ttm->vma_list = drm_calloc(1, sizeof(*ttm->vma_list), DRM_MEM_TTM);
+ if (!ttm->vma_list) {
+ DRM_ERROR("Alloc vma list failed\n");
+ drm_destroy_ttm(ttm);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&ttm->vma_list->head);
+
+ ttm->lhandle = (unsigned long)ttm;
+ ttm->dev = dev;
+
+ return ttm;
+}
+
+/*
+ * Lock the mmap_sems for processes that are mapping this ttm.
+ * This looks a bit clumsy, since we need to maintain the correct
+ * locking order
+ * mm->mmap_sem
+ * dev->struct_sem;
+ * and while we release dev->struct_sem to lock the mmap_sems,
+ * the mmap_sem list may have been updated. We need to revalidate
+ * it after relocking dev->struc_sem.
+ */
+
+static int drm_ttm_lock_mmap_sem(drm_ttm_t * ttm)
+{
+ struct mm_struct **mm_list = NULL, **mm_list_p;
+ uint32_t list_seq;
+ uint32_t cur_count, shared_count;
+ p_mm_entry_t *entry;
+ unsigned i;
+
+ cur_count = 0;
+ list_seq = ttm->mm_list_seq;
+ shared_count = atomic_read(&ttm->shared_count);
+
+ do {
+ if (shared_count > cur_count) {
+ if (mm_list)
+ drm_free(mm_list, sizeof(*mm_list) * cur_count,
+ DRM_MEM_TTM);
+ cur_count = shared_count + 10;
+ mm_list =
+ drm_alloc(sizeof(*mm_list) * cur_count, DRM_MEM_TTM);
+ if (!mm_list)
+ return -ENOMEM;
+ }
+
+ mm_list_p = mm_list;
+ list_for_each_entry(entry, &ttm->p_mm_list, head) {
+ *mm_list_p++ = entry->mm;
+ }
+
+ mutex_unlock(&ttm->dev->struct_mutex);
+ mm_list_p = mm_list;
+ for (i = 0; i < shared_count; ++i, ++mm_list_p) {
+ down_write(&((*mm_list_p)->mmap_sem));
+ }
+
+ mutex_lock(&ttm->dev->struct_mutex);
+
+ if (list_seq != ttm->mm_list_seq) {
+ mm_list_p = mm_list;
+ for (i = 0; i < shared_count; ++i, ++mm_list_p) {
+ up_write(&((*mm_list_p)->mmap_sem));
+ }
+
+ }
+ shared_count = atomic_read(&ttm->shared_count);
+
+ } while (list_seq != ttm->mm_list_seq);
+
+ if (mm_list)
+ drm_free(mm_list, sizeof(*mm_list) * cur_count, DRM_MEM_TTM);
+
+ return 0;
+}
+
+/*
+ * Change caching policy for range of pages in a ttm.
+ */
+
+static int drm_set_caching(drm_ttm_t * ttm, unsigned long page_offset,
+ unsigned long num_pages, int noncached,
+ int do_tlbflush)
+{
+ int i, cur;
+ struct page **cur_page;
+ pgprot_t attr = (noncached) ? PAGE_KERNEL_NOCACHE : PAGE_KERNEL;
+
+ drm_ttm_lock_mm(ttm, 0, 1);
+ unmap_vma_pages(ttm, page_offset, num_pages);
+ drm_ttm_unlock_mm(ttm, 0, 1);
+
+ for (i = 0; i < num_pages; ++i) {
+ cur = page_offset + i;
+ cur_page = ttm->pages + cur;
+ if (*cur_page) {
+ if (PageHighMem(*cur_page)) {
+ if (noncached
+ && page_address(*cur_page) != NULL) {
+ DRM_ERROR
+ ("Illegal mapped HighMem Page\n");
+ return -EINVAL;
+ }
+ } else if ((ttm->page_flags[cur] &
+ DRM_TTM_PAGE_UNCACHED) != noncached) {
+ DRM_MASK_VAL(ttm->page_flags[cur],
+ DRM_TTM_PAGE_UNCACHED, noncached);
+ change_page_attr(*cur_page, 1, attr);
+ }
+ }
+ }
+ if (do_tlbflush)
+ global_flush_tlb();
+ return 0;
+}
+
+/*
+ * Unbind a ttm region from the aperture.
+ */
+
+int drm_evict_ttm_region(drm_ttm_backend_list_t * entry)
+{
+ drm_ttm_backend_t *be = entry->be;
+ drm_ttm_t *ttm = entry->owner;
+ int ret;
+
+ if (be) {
+ switch (entry->state) {
+ case ttm_bound:
+ if (ttm && be->needs_cache_adjust(be)) {
+ ret = drm_ttm_lock_mmap_sem(ttm);
+ if (ret)
+ return ret;
+ drm_ttm_lock_mm(ttm, 0, 1);
+ unmap_vma_pages(ttm, entry->page_offset,
+ entry->num_pages);
+ global_flush_tlb();
+ drm_ttm_unlock_mm(ttm, 0, 1);
+ }
+ be->unbind(entry->be);
+ if (ttm && be->needs_cache_adjust(be)) {
+ drm_set_caching(ttm, entry->page_offset,
+ entry->num_pages, 0, 1);
+ drm_ttm_unlock_mm(ttm, 1, 0);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ entry->state = ttm_evicted;
+ return 0;
+}
+
+void drm_unbind_ttm_region(drm_ttm_backend_list_t * entry)
+{
+ drm_evict_ttm_region(entry);
+ entry->state = ttm_unbound;
+}
+
+/*
+ * Destroy and clean up all resources associated with a ttm region.
+ * FIXME: release pages to OS when doing this operation.
+ */
+
+void drm_destroy_ttm_region(drm_ttm_backend_list_t * entry)
+{
+ drm_ttm_backend_t *be = entry->be;
+ drm_ttm_t *ttm = entry->owner;
+ uint32_t *cur_page_flags;
+ int i;
+
+ list_del_init(&entry->head);
+
+ drm_unbind_ttm_region(entry);
+ if (be) {
+ be->clear(entry->be);
+ if (be->needs_cache_adjust(be)) {
+ int ret = drm_ttm_lock_mmap_sem(ttm);
+ drm_set_caching(ttm, entry->page_offset,
+ entry->num_pages, 0, 1);
+ if (!ret)
+ drm_ttm_unlock_mm(ttm, 1, 0);
+ }
+ be->destroy(be);
+ }
+ cur_page_flags = ttm->page_flags + entry->page_offset;
+ for (i = 0; i < entry->num_pages; ++i) {
+ DRM_MASK_VAL(*cur_page_flags, DRM_TTM_PAGE_USED, 0);
+ cur_page_flags++;
+ }
+
+ drm_free(entry, sizeof(*entry), DRM_MEM_TTM);
+}
+
+/*
+ * Create a ttm region from a range of ttm pages.
+ */
+
+int drm_create_ttm_region(drm_ttm_t * ttm, unsigned long page_offset,
+ unsigned long n_pages, int cached,
+ drm_ttm_backend_list_t ** region)
+{
+ struct page **cur_page;
+ uint32_t *cur_page_flags;
+ drm_ttm_backend_list_t *entry;
+ drm_ttm_backend_t *be;
+ int ret, i;
+
+ if ((page_offset + n_pages) > ttm->num_pages || n_pages == 0) {
+ DRM_ERROR("Region Doesn't fit ttm\n");
+ return -EINVAL;
+ }
+
+ cur_page_flags = ttm->page_flags + page_offset;
+ for (i = 0; i < n_pages; ++i, ++cur_page_flags) {
+ if (*cur_page_flags & DRM_TTM_PAGE_USED) {
+ DRM_ERROR("TTM region overlap\n");
+ return -EINVAL;
+ } else {
+ DRM_MASK_VAL(*cur_page_flags, DRM_TTM_PAGE_USED,
+ DRM_TTM_PAGE_USED);
+ }
+ }
+
+ entry = drm_calloc(1, sizeof(*entry), DRM_MEM_TTM);
+ if (!entry)
+ return -ENOMEM;
+
+ be = ttm->dev->driver->bo_driver->create_ttm_backend_entry(ttm->dev, cached);
+ if (!be) {
+ drm_free(entry, sizeof(*entry), DRM_MEM_TTM);
+ DRM_ERROR("Couldn't create backend.\n");
+ return -EINVAL;
+ }
+ entry->state = ttm_unbound;
+ entry->page_offset = page_offset;
+ entry->num_pages = n_pages;
+ entry->be = be;
+ entry->owner = ttm;
+
+ INIT_LIST_HEAD(&entry->head);
+ list_add_tail(&entry->head, &ttm->be_list->head);
+
+ for (i = 0; i < entry->num_pages; ++i) {
+ cur_page = ttm->pages + (page_offset + i);
+ if (!*cur_page) {
+ *cur_page = alloc_page(GFP_KERNEL);
+ if (!*cur_page) {
+ DRM_ERROR("Page allocation failed\n");
+ drm_destroy_ttm_region(entry);
+ return -ENOMEM;
+ }
+ SetPageReserved(*cur_page);
+ }
+ }
+
+ if ((ret = be->populate(be, n_pages, ttm->pages + page_offset))) {
+ drm_destroy_ttm_region(entry);
+ DRM_ERROR("Couldn't populate backend.\n");
+ return ret;
+ }
+ ttm->aperture_base = be->aperture_base;
+
+ *region = entry;
+ return 0;
+}
+
+/*
+ * Bind a ttm region. Set correct caching policy.
+ */
+
+int drm_bind_ttm_region(drm_ttm_backend_list_t * region,
+ unsigned long aper_offset)
+{
+
+ int i;
+ uint32_t *cur_page_flag;
+ int ret = 0;
+ drm_ttm_backend_t *be;
+ drm_ttm_t *ttm;
+
+ if (!region || region->state == ttm_bound)
+ return -EINVAL;
+
+ be = region->be;
+ ttm = region->owner;
+
+ if (ttm && be->needs_cache_adjust(be)) {
+ ret = drm_ttm_lock_mmap_sem(ttm);
+ if (ret)
+ return ret;
+ drm_set_caching(ttm, region->page_offset, region->num_pages,
+ DRM_TTM_PAGE_UNCACHED, 1);
+ } else {
+ DRM_DEBUG("Binding cached\n");
+ }
+
+ if ((ret = be->bind(be, aper_offset))) {
+ if (ttm && be->needs_cache_adjust(be))
+ drm_ttm_unlock_mm(ttm, 1, 0);
+ drm_unbind_ttm_region(region);
+ DRM_ERROR("Couldn't bind backend.\n");
+ return ret;
+ }
+
+ cur_page_flag = ttm->page_flags + region->page_offset;
+ for (i = 0; i < region->num_pages; ++i) {
+ DRM_MASK_VAL(*cur_page_flag, DRM_TTM_MASK_PFN,
+ (i + aper_offset) << PAGE_SHIFT);
+ cur_page_flag++;
+ }
+
+ if (ttm && be->needs_cache_adjust(be)) {
+ ioremap_vmas(ttm, region->page_offset, region->num_pages,
+ aper_offset);
+ drm_ttm_unlock_mm(ttm, 1, 0);
+ }
+
+ region->state = ttm_bound;
+ return 0;
+}
+
+int drm_rebind_ttm_region(drm_ttm_backend_list_t * entry,
+ unsigned long aper_offset)
+{
+ return drm_bind_ttm_region(entry, aper_offset);
+
+}
+
+/*
+ * Destroy an anonymous ttm region.
+ */
+
+void drm_user_destroy_region(drm_ttm_backend_list_t * entry)
+{
+ drm_ttm_backend_t *be;
+ struct page **cur_page;
+ int i;
+
+ if (!entry || entry->owner)
+ return;
+
+ be = entry->be;
+ if (!be) {
+ drm_free(entry, sizeof(*entry), DRM_MEM_TTM);
+ return;
+ }
+
+ be->unbind(be);
+
+ if (entry->anon_pages) {
+ cur_page = entry->anon_pages;
+ for (i = 0; i < entry->anon_locked; ++i) {
+ if (!PageReserved(*cur_page))
+ SetPageDirty(*cur_page);
+ page_cache_release(*cur_page);
+ cur_page++;
+ }
+ vfree(entry->anon_pages);
+ }
+
+ be->destroy(be);
+ drm_free(entry, sizeof(*entry), DRM_MEM_TTM);
+ return;
+}
+
+/*
+ * Create a ttm region from an arbitrary region of user pages.
+ * Since this region has no backing ttm, it's owner is set to
+ * null, and it is registered with the file of the caller.
+ * Gets destroyed when the file is closed. We call this an
+ * anonymous ttm region.
+ */
+
+int drm_user_create_region(drm_device_t * dev, unsigned long start, int len,
+ drm_ttm_backend_list_t ** entry)
+{
+ drm_ttm_backend_list_t *tmp;
+ drm_ttm_backend_t *be;
+ int ret;
+
+ if (len <= 0)
+ return -EINVAL;
+ if (!dev->driver->bo_driver->create_ttm_backend_entry)
+ return -EFAULT;
+
+ tmp = drm_calloc(1, sizeof(*tmp), DRM_MEM_TTM);
+
+ if (!tmp)
+ return -ENOMEM;
+
+ be = dev->driver->bo_driver->create_ttm_backend_entry(dev, 1);
+ tmp->be = be;
+
+ if (!be) {
+ drm_user_destroy_region(tmp);
+ return -ENOMEM;
+ }
+ if (be->needs_cache_adjust(be)) {
+ drm_user_destroy_region(tmp);
+ return -EFAULT;
+ }
+
+ tmp->anon_pages = vmalloc(sizeof(*(tmp->anon_pages)) * len);
+
+ if (!tmp->anon_pages) {
+ drm_user_destroy_region(tmp);
+ return -ENOMEM;
+ }
+
+ down_read(&current->mm->mmap_sem);
+ ret = get_user_pages(current, current->mm, start, len, 1, 0,
+ tmp->anon_pages, NULL);
+ up_read(&current->mm->mmap_sem);
+
+ if (ret != len) {
+ drm_user_destroy_region(tmp);
+ DRM_ERROR("Could not lock %d pages. Return code was %d\n",
+ len, ret);
+ return -EPERM;
+ }
+ tmp->anon_locked = len;
+
+ ret = be->populate(be, len, tmp->anon_pages);
+
+ if (ret) {
+ drm_user_destroy_region(tmp);
+ return ret;
+ }
+
+ tmp->state = ttm_unbound;
+ *entry = tmp;
+
+ return 0;
+}
+
+/*
+ * Create a ttm and add it to the drm book-keeping.
+ */
+
+int drm_add_ttm(drm_device_t * dev, unsigned size, drm_map_list_t ** maplist)
+{
+ drm_map_list_t *list;
+ drm_map_t *map;
+ drm_ttm_t *ttm;
+
+ map = drm_alloc(sizeof(*map), DRM_MEM_TTM);
+ if (!map)
+ return -ENOMEM;
+
+ ttm = drm_init_ttm(dev, size);
+
+ if (!ttm) {
+ DRM_ERROR("Could not create ttm\n");
+ drm_free(map, sizeof(*map), DRM_MEM_TTM);
+ return -ENOMEM;
+ }
+
+ map->offset = ttm->lhandle;
+ map->type = _DRM_TTM;
+ map->flags = _DRM_REMOVABLE;
+ map->size = size;
+
+ list = drm_calloc(1, sizeof(*list), DRM_MEM_TTM);
+ if (!list) {
+ drm_destroy_ttm(ttm);
+ drm_free(map, sizeof(*map), DRM_MEM_TTM);
+ return -ENOMEM;
+ }
+ map->handle = (void *)list;
+
+ if (drm_ht_just_insert_please(&dev->map_hash, &list->hash,
+ (unsigned long) map->handle,
+ 32 - PAGE_SHIFT - 3, PAGE_SHIFT,
+ DRM_MAP_HASH_OFFSET)) {
+ drm_destroy_ttm(ttm);
+ drm_free(map, sizeof(*map), DRM_MEM_TTM);
+ drm_free(list, sizeof(*list), DRM_MEM_TTM);
+ return -ENOMEM;
+ }
+
+ list->user_token =
+ (list->hash.key << PAGE_SHIFT) + DRM_MAP_HASH_OFFSET;
+ list->map = map;
+
+ *maplist = list;
+
+ return 0;
+}
diff --git a/linux-core/drm_ttm.h b/linux-core/drm_ttm.h
new file mode 100644
index 00000000..ea9a8372
--- /dev/null
+++ b/linux-core/drm_ttm.h
@@ -0,0 +1,167 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifndef _DRM_TTM_H
+#define _DRM_TTM_H
+#define DRM_HAS_TTM
+
+/*
+ * The backend GART interface. (In our case AGP). Any similar type of device (PCIE?)
+ * needs only to implement these functions to be usable with the "TTM" interface.
+ * The AGP backend implementation lives in drm_agpsupport.c
+ * basically maps these calls to available functions in agpgart. Each drm device driver gets an
+ * additional function pointer that creates these types,
+ * so that the device can choose the correct aperture.
+ * (Multiple AGP apertures, etc.)
+ * Most device drivers will let this point to the standard AGP implementation.
+ */
+
+typedef struct drm_ttm_backend {
+ unsigned long aperture_base;
+ void *private;
+ int (*needs_cache_adjust) (struct drm_ttm_backend * backend);
+ int (*populate) (struct drm_ttm_backend * backend,
+ unsigned long num_pages, struct page ** pages);
+ void (*clear) (struct drm_ttm_backend * backend);
+ int (*bind) (struct drm_ttm_backend * backend, unsigned long offset);
+ int (*unbind) (struct drm_ttm_backend * backend);
+ void (*destroy) (struct drm_ttm_backend * backend);
+} drm_ttm_backend_t;
+
+#define DRM_FLUSH_READ (0x01)
+#define DRM_FLUSH_WRITE (0x02)
+#define DRM_FLUSH_EXE (0x04)
+
+typedef struct drm_ttm_backend_list {
+ uint32_t flags;
+ struct list_head head;
+ drm_ttm_backend_t *be;
+ unsigned page_offset;
+ unsigned num_pages;
+ struct drm_ttm *owner;
+ drm_file_t *anon_owner;
+ struct page **anon_pages;
+ int anon_locked;
+ enum {
+ ttm_bound,
+ ttm_evicted,
+ ttm_unbound
+ } state;
+} drm_ttm_backend_list_t;
+
+typedef struct drm_ttm_vma_list {
+ struct list_head head;
+ pgprot_t orig_protection;
+ struct vm_area_struct *vma;
+ drm_map_t *map;
+} drm_ttm_vma_list_t;
+
+typedef struct drm_ttm {
+ struct list_head p_mm_list;
+ atomic_t shared_count;
+ uint32_t mm_list_seq;
+ unsigned long aperture_base;
+ struct page **pages;
+ uint32_t *page_flags;
+ unsigned long lhandle;
+ unsigned long num_pages;
+ drm_ttm_vma_list_t *vma_list;
+ struct drm_device *dev;
+ drm_ttm_backend_list_t *be_list;
+ atomic_t vma_count;
+ int mmap_sem_locked;
+ int destroy;
+} drm_ttm_t;
+
+int drm_add_ttm(struct drm_device * dev, unsigned size, drm_map_list_t ** maplist);
+
+/*
+ * Bind a part of the ttm starting at page_offset size n_pages into the GTT, at
+ * aperture offset aper_offset. The region handle will be used to reference this
+ * bound region in the future. Note that the region may be the whole ttm.
+ * Regions should not overlap.
+ * This function sets all affected pages as noncacheable and flushes cashes and TLB.
+ */
+
+int drm_create_ttm_region(drm_ttm_t * ttm, unsigned long page_offset,
+ unsigned long n_pages, int cached,
+ drm_ttm_backend_list_t ** region);
+
+int drm_bind_ttm_region(drm_ttm_backend_list_t * region,
+ unsigned long aper_offset);
+
+/*
+ * Unbind a ttm region. Restores caching policy. Flushes caches and TLB.
+ */
+
+void drm_unbind_ttm_region(drm_ttm_backend_list_t * entry);
+void drm_destroy_ttm_region(drm_ttm_backend_list_t * entry);
+
+/*
+ * Evict a ttm region. Keeps Aperture caching policy.
+ */
+
+int drm_evict_ttm_region(drm_ttm_backend_list_t * entry);
+
+/*
+ * Rebind an already evicted region into a possibly new location in the aperture.
+ */
+
+int drm_rebind_ttm_region(drm_ttm_backend_list_t * entry,
+ unsigned long aper_offset);
+
+/*
+ * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this,
+ * which calls this function iff there are no vmas referencing it anymore. Otherwise it is called
+ * when the last vma exits.
+ */
+
+extern int drm_destroy_ttm(drm_ttm_t * ttm);
+extern void drm_user_destroy_region(drm_ttm_backend_list_t * entry);
+extern int drm_ttm_add_mm_to_list(drm_ttm_t * ttm, struct mm_struct *mm);
+extern void drm_ttm_delete_mm(drm_ttm_t * ttm, struct mm_struct *mm);
+
+#define DRM_MASK_VAL(dest, mask, val) \
+ (dest) = ((dest) & ~(mask)) | ((val) & (mask));
+
+#define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1)
+#define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS)
+
+/*
+ * Page flags.
+ */
+
+#define DRM_TTM_PAGE_UNCACHED 0x01
+#define DRM_TTM_PAGE_USED 0x02
+#define DRM_TTM_PAGE_BOUND 0x04
+#define DRM_TTM_PAGE_PRESENT 0x08
+
+#endif
diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c
index cf3bc3cf..85b39490 100644
--- a/linux-core/drm_vm.c
+++ b/linux-core/drm_vm.c
@@ -34,12 +34,42 @@
*/
#include "drmP.h"
+
#if defined(__ia64__)
#include <linux/efi.h>
#endif
static void drm_vm_open(struct vm_area_struct *vma);
static void drm_vm_close(struct vm_area_struct *vma);
+static void drm_vm_ttm_close(struct vm_area_struct *vma);
+static int drm_vm_ttm_open(struct vm_area_struct *vma);
+static void drm_vm_ttm_open_wrapper(struct vm_area_struct *vma);
+
+
+pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
+{
+ pgprot_t tmp = drm_prot_map(vma->vm_flags);
+
+#if defined(__i386__) || defined(__x86_64__)
+ if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
+ pgprot_val(tmp) |= _PAGE_PCD;
+ pgprot_val(tmp) &= ~_PAGE_PWT;
+ }
+#elif defined(__powerpc__)
+ pgprot_val(tmp) |= _PAGE_NO_CACHE;
+ if (map->type == _DRM_REGISTERS)
+ pgprot_val(tmp) |= _PAGE_GUARDED;
+#endif
+#if defined(__ia64__)
+ if (efi_range_is_wc(vma->vm_start, vma->vm_end -
+ vma->vm_start))
+ tmp = pgprot_writecombine(tmp);
+ else
+ tmp = pgprot_noncached(tmp);
+#endif
+ return tmp;
+}
+
/**
* \c nopage method for AGP virtual memory.
@@ -129,6 +159,131 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
}
#endif /* __OS_HAS_AGP */
+
+static int drm_ttm_remap_bound_pfn(struct vm_area_struct *vma,
+ unsigned long address,
+ unsigned long size)
+{
+ unsigned long
+ page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
+ unsigned long
+ num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ drm_ttm_vma_list_t *entry = (drm_ttm_vma_list_t *)
+ vma->vm_private_data;
+ drm_map_t *map = entry->map;
+ drm_ttm_t *ttm = (drm_ttm_t *) map->offset;
+ unsigned long i, cur_pfn;
+ unsigned long start = 0;
+ unsigned long end = 0;
+ unsigned long last_pfn = 0;
+ unsigned long start_pfn = 0;
+ int bound_sequence = FALSE;
+ int ret = 0;
+ uint32_t cur_flags;
+
+ for (i=page_offset; i<page_offset + num_pages; ++i) {
+ cur_flags = ttm->page_flags[i];
+
+ if (!bound_sequence && (cur_flags & DRM_TTM_PAGE_UNCACHED)) {
+
+ start = i;
+ end = i;
+ last_pfn = (cur_flags & DRM_TTM_MASK_PFN) >> PAGE_SHIFT;
+ start_pfn = last_pfn;
+ bound_sequence = TRUE;
+
+ } else if (bound_sequence) {
+
+ cur_pfn = (cur_flags & DRM_TTM_MASK_PFN) >> PAGE_SHIFT;
+
+ if ( !(cur_flags & DRM_TTM_PAGE_UNCACHED) ||
+ (cur_pfn != last_pfn + 1)) {
+
+ ret = io_remap_pfn_range(vma,
+ vma->vm_start + (start << PAGE_SHIFT),
+ (ttm->aperture_base >> PAGE_SHIFT)
+ + start_pfn,
+ (end - start + 1) << PAGE_SHIFT,
+ drm_io_prot(_DRM_AGP, vma));
+
+ if (ret)
+ break;
+
+ bound_sequence = (cur_flags & DRM_TTM_PAGE_UNCACHED);
+ if (!bound_sequence)
+ continue;
+
+ start = i;
+ end = i;
+ last_pfn = cur_pfn;
+ start_pfn = last_pfn;
+
+ } else {
+
+ end++;
+ last_pfn = cur_pfn;
+
+ }
+ }
+ }
+
+ if (!ret && bound_sequence) {
+ ret = io_remap_pfn_range(vma,
+ vma->vm_start + (start << PAGE_SHIFT),
+ (ttm->aperture_base >> PAGE_SHIFT)
+ + start_pfn,
+ (end - start + 1) << PAGE_SHIFT,
+ drm_io_prot(_DRM_AGP, vma));
+ }
+
+ if (ret) {
+ DRM_ERROR("Map returned %c\n", ret);
+ }
+ return ret;
+}
+
+static __inline__ struct page *drm_do_vm_ttm_nopage(struct vm_area_struct *vma,
+ unsigned long address)
+{
+ drm_ttm_vma_list_t *entry = (drm_ttm_vma_list_t *)
+ vma->vm_private_data;
+ drm_map_t *map;
+ unsigned long page_offset;
+ struct page *page;
+ drm_ttm_t *ttm;
+ pgprot_t default_prot;
+ uint32_t page_flags;
+
+ if (address > vma->vm_end)
+ return NOPAGE_SIGBUS; /* Disallow mremap */
+ if (!entry)
+ return NOPAGE_OOM; /* Nothing allocated */
+
+ map = (drm_map_t *) entry->map;
+ ttm = (drm_ttm_t *) map->offset;
+ page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
+ page = ttm->pages[page_offset];
+
+ page_flags = ttm->page_flags[page_offset];
+
+ if (!page) {
+ page = ttm->pages[page_offset] =
+ alloc_page(GFP_KERNEL);
+ SetPageReserved(page);
+ }
+ if (!page)
+ return NOPAGE_OOM;
+
+ get_page(page);
+
+ default_prot = drm_prot_map(vma->vm_flags);
+
+ BUG_ON(page_flags & DRM_TTM_PAGE_UNCACHED);
+ vma->vm_page_prot = default_prot;
+
+ return page;
+}
+
/**
* \c nopage method for shared virtual memory.
*
@@ -243,6 +398,9 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
dmah.size = map->size;
__drm_pci_free(dev, &dmah);
break;
+ case _DRM_TTM:
+ BUG_ON(1);
+ break;
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
}
@@ -358,6 +516,15 @@ static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
return drm_do_vm_sg_nopage(vma, address);
}
+static struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
+ unsigned long address, int *type)
+{
+ if (type)
+ *type = VM_FAULT_MINOR;
+ return drm_do_vm_ttm_nopage(vma, address);
+}
+
+
#else /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */
static struct page *drm_vm_nopage(struct vm_area_struct *vma,
@@ -384,6 +551,13 @@ static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
return drm_do_vm_sg_nopage(vma, address);
}
+static struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
+ unsigned long address, int unused)
+{
+ return drm_do_vm_ttm_nopage(vma, address);
+}
+
+
#endif
/** AGP virtual memory operations */
@@ -414,6 +588,12 @@ static struct vm_operations_struct drm_vm_sg_ops = {
.close = drm_vm_close,
};
+static struct vm_operations_struct drm_vm_ttm_ops = {
+ .nopage = drm_vm_ttm_nopage,
+ .open = drm_vm_ttm_open_wrapper,
+ .close = drm_vm_ttm_close,
+};
+
/**
* \c open method for shared virtual memory.
*
@@ -443,6 +623,46 @@ static void drm_vm_open(struct vm_area_struct *vma)
}
}
+static int drm_vm_ttm_open(struct vm_area_struct *vma) {
+
+ drm_ttm_vma_list_t *entry, *tmp_vma =
+ (drm_ttm_vma_list_t *) vma->vm_private_data;
+ drm_map_t *map;
+ drm_ttm_t *ttm;
+ drm_file_t *priv = vma->vm_file->private_data;
+ drm_device_t *dev = priv->head->dev;
+ int ret = 0;
+
+ drm_vm_open(vma);
+ mutex_lock(&dev->struct_mutex);
+ entry = drm_calloc(1, sizeof(*entry), DRM_MEM_VMAS);
+ if (entry) {
+ *entry = *tmp_vma;
+ map = (drm_map_t *) entry->map;
+ ttm = (drm_ttm_t *) map->offset;
+ ret = drm_ttm_add_mm_to_list(ttm, vma->vm_mm);
+ if (!ret) {
+ atomic_inc(&ttm->vma_count);
+ INIT_LIST_HEAD(&entry->head);
+ entry->vma = vma;
+ entry->orig_protection = vma->vm_page_prot;
+ list_add_tail(&entry->head, &ttm->vma_list->head);
+ vma->vm_private_data = (void *) entry;
+ DRM_DEBUG("Added VMA to ttm at 0x%016lx\n",
+ (unsigned long) ttm);
+ }
+ } else {
+ ret = -ENOMEM;
+ }
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+static void drm_vm_ttm_open_wrapper(struct vm_area_struct *vma)
+{
+ drm_vm_ttm_open(vma);
+}
+
/**
* \c close method for all virtual memory types.
*
@@ -476,6 +696,39 @@ static void drm_vm_close(struct vm_area_struct *vma)
mutex_unlock(&dev->struct_mutex);
}
+
+static void drm_vm_ttm_close(struct vm_area_struct *vma)
+{
+ drm_ttm_vma_list_t *ttm_vma =
+ (drm_ttm_vma_list_t *) vma->vm_private_data;
+ drm_map_t *map;
+ drm_ttm_t *ttm;
+ int found_maps;
+ struct list_head *list;
+ drm_device_t *dev;
+ int ret;
+
+ drm_vm_close(vma);
+ if (ttm_vma) {
+ map = (drm_map_t *) ttm_vma->map;
+ ttm = (drm_ttm_t *) map->offset;
+ dev = ttm->dev;
+ mutex_lock(&dev->struct_mutex);
+ drm_ttm_delete_mm(ttm, vma->vm_mm);
+ drm_free(ttm_vma, sizeof(*ttm_vma), DRM_MEM_VMAS);
+ if (atomic_dec_and_test(&ttm->vma_count)) {
+ if (ttm->destroy) {
+ ret = drm_destroy_ttm(ttm);
+ BUG_ON(ret);
+ drm_free(map, sizeof(*map), DRM_MEM_TTM);
+ }
+ }
+ mutex_unlock(&dev->struct_mutex);
+ }
+ return;
+}
+
+
/**
* mmap DMA memory.
*
@@ -620,27 +873,9 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
/* fall through to _DRM_FRAME_BUFFER... */
case _DRM_FRAME_BUFFER:
case _DRM_REGISTERS:
-#if defined(__i386__) || defined(__x86_64__)
- if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
- pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
- pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
- }
-#elif defined(__powerpc__)
- pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
- if (map->type == _DRM_REGISTERS)
- pgprot_val(vma->vm_page_prot) |= _PAGE_GUARDED;
-#endif
- vma->vm_flags |= VM_IO; /* not in core dump */
-#if defined(__ia64__)
- if (efi_range_is_wc(vma->vm_start, vma->vm_end -
- vma->vm_start))
- vma->vm_page_prot =
- pgprot_writecombine(vma->vm_page_prot);
- else
- vma->vm_page_prot =
- pgprot_noncached(vma->vm_page_prot);
-#endif
offset = dev->driver->get_reg_ofs(dev);
+ vma->vm_flags |= VM_IO; /* not in core dump */
+ vma->vm_page_prot = drm_io_prot(map->type, vma);
#ifdef __sparc__
if (io_remap_pfn_range(vma, vma->vm_start,
(map->offset + offset) >>PAGE_SHIFT,
@@ -687,6 +922,22 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags |= VM_RESERVED;
#endif
break;
+ case _DRM_TTM: {
+ drm_ttm_vma_list_t tmp_vma;
+ tmp_vma.orig_protection = vma->vm_page_prot;
+ tmp_vma.map = map;
+ vma->vm_ops = &drm_vm_ttm_ops;
+ vma->vm_private_data = (void *) &tmp_vma;
+ vma->vm_file = filp;
+ vma->vm_flags |= VM_RESERVED | VM_IO;
+ if (drm_ttm_remap_bound_pfn(vma,
+ vma->vm_start,
+ vma->vm_end - vma->vm_start))
+ return -EAGAIN;
+ if (drm_vm_ttm_open(vma))
+ return -EAGAIN;
+ return 0;
+ }
default:
return -EINVAL; /* This should never happen. */
}
diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c
new file mode 100644
index 00000000..bedbd41c
--- /dev/null
+++ b/linux-core/i915_buffer.c
@@ -0,0 +1,40 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+
+drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t *dev, int cached)
+{
+ if (cached)
+ return drm_agp_init_ttm_cached(dev);
+ else
+ return drm_agp_init_ttm_uncached(dev);
+}
diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c
index c6e25f9b..64ab3f50 100644
--- a/linux-core/i915_drv.c
+++ b/linux-core/i915_drv.c
@@ -38,6 +38,22 @@ static struct pci_device_id pciidlist[] = {
i915_PCI_IDS
};
+static drm_fence_driver_t i915_fence_driver = {
+ .no_types = 2,
+ .wrap_diff = (1 << 30),
+ .flush_diff = (1 << 29),
+ .sequence_mask = 0xffffffffU,
+ .lazy_capable = 1,
+ .emit = i915_fence_emit_sequence,
+ .poke_flush = i915_poke_flush,
+};
+
+static drm_bo_driver_t i915_bo_driver = {
+ .cached_pages = 1,
+ .create_ttm_backend_entry = i915_create_ttm_backend_entry
+};
+
+
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static struct drm_driver driver = {
/* don't use mtrr's here, the Xserver or user space app should
@@ -78,6 +94,9 @@ static struct drm_driver driver = {
.remove = __devexit_p(drm_cleanup_pci),
},
+ .fence_driver = &i915_fence_driver,
+ .bo_driver = &i915_bo_driver,
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/linux-core/i915_fence.c b/linux-core/i915_fence.c
new file mode 100644
index 00000000..46a2a728
--- /dev/null
+++ b/linux-core/i915_fence.c
@@ -0,0 +1,121 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+/*
+ * Implements an intel sync flush operation.
+ */
+
+static void i915_perform_flush(drm_device_t * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_driver_t *driver = dev->driver->fence_driver;
+ int flush_completed = 0;
+ uint32_t flush_flags = 0;
+ uint32_t flush_sequence = 0;
+ uint32_t i_status;
+ uint32_t diff;
+ uint32_t sequence;
+
+ if (fm->pending_exe_flush) {
+ sequence = READ_BREADCRUMB(dev_priv);
+ diff = sequence - fm->last_exe_flush;
+ if (diff < driver->wrap_diff && diff != 0) {
+ drm_fence_handler(dev, sequence, DRM_FENCE_EXE);
+ diff = sequence - fm->exe_flush_sequence;
+ if (diff < driver->wrap_diff) {
+ fm->pending_exe_flush = 0;
+ /*
+ * Turn off user IRQs
+ */
+ } else {
+ /*
+ * Turn on user IRQs
+ */
+ }
+ }
+ }
+ if (dev_priv->flush_pending) {
+ i_status = READ_HWSP(dev_priv, 0);
+ if ((i_status & (1 << 12)) !=
+ (dev_priv->saved_flush_status & (1 << 12))) {
+ flush_completed = 1;
+ flush_flags = dev_priv->flush_flags;
+ flush_sequence = dev_priv->flush_sequence;
+ dev_priv->flush_pending = 0;
+ } else {
+ }
+ }
+ if (flush_completed) {
+ drm_fence_handler(dev, flush_sequence, flush_flags);
+ }
+ if (fm->pending_flush && !dev_priv->flush_pending) {
+ dev_priv->flush_sequence = (uint32_t) READ_BREADCRUMB(dev_priv);
+ dev_priv->flush_flags = fm->pending_flush;
+ dev_priv->saved_flush_status = READ_HWSP(dev_priv, 0);
+ I915_WRITE(I915REG_INSTPM, (1 << 5) | (1 << 21));
+ dev_priv->flush_pending = 1;
+ fm->pending_flush = 0;
+ }
+}
+
+void i915_poke_flush(drm_device_t * dev)
+{
+ drm_fence_manager_t *fm = &dev->fm;
+ unsigned long flags;
+
+ write_lock_irqsave(&fm->lock, flags);
+ i915_perform_flush(dev);
+ write_unlock_irqrestore(&fm->lock, flags);
+}
+
+int i915_fence_emit_sequence(drm_device_t * dev, uint32_t * sequence)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ i915_emit_irq(dev);
+ *sequence = (uint32_t) dev_priv->counter;
+ return 0;
+}
+
+void i915_fence_handler(drm_device_t * dev)
+{
+ drm_fence_manager_t *fm = &dev->fm;
+
+ write_lock(&fm->lock);
+ i915_perform_flush(dev);
+ i915_perform_flush(dev);
+ write_unlock(&fm->lock);
+}
diff --git a/shared-core/drm.h b/shared-core/drm.h
index 87f8da6b..cd2b1907 100644
--- a/shared-core/drm.h
+++ b/shared-core/drm.h
@@ -259,7 +259,8 @@ typedef enum drm_map_type {
_DRM_SHM = 2, /**< shared, cached */
_DRM_AGP = 3, /**< AGP/GART */
_DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
- _DRM_CONSISTENT = 5 /**< Consistent memory for PCI DMA */
+ _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
+ _DRM_TTM = 6
} drm_map_type_t;
/**
@@ -629,6 +630,32 @@ typedef struct drm_set_version {
int drm_dd_minor;
} drm_set_version_t;
+#define DRM_FENCE_FLAG_EMIT 0x00000001
+#define DRM_FENCE_FLAG_SHAREABLE 0x00000002
+#define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004
+#define DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS 0x00000008
+
+#define DRM_FENCE_EXE 0x00000001
+
+typedef struct drm_fence_arg {
+ unsigned handle;
+ int class;
+ unsigned type;
+ unsigned flags;
+ unsigned signaled;
+ enum {
+ drm_fence_create,
+ drm_fence_destroy,
+ drm_fence_reference,
+ drm_fence_unreference,
+ drm_fence_signaled,
+ drm_fence_flush,
+ drm_fence_wait,
+ drm_fence_emit
+ } op;
+} drm_fence_arg_t;
+
+
/**
* \name Ioctls Definitions
*/
@@ -694,6 +721,9 @@ typedef struct drm_set_version {
#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, drm_wait_vblank_t)
+#define DRM_IOCTL_FENCE DRM_IOWR(0x3b, drm_fence_arg_t)
+
+
/*@}*/
/**
diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c
index ba8c56ee..d6bb6c8e 100644
--- a/shared-core/i915_dma.c
+++ b/shared-core/i915_dma.c
@@ -434,15 +434,15 @@ static void i915_emit_breadcrumb(drm_device_t *dev)
dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
- if (dev_priv->counter > 0x7FFFFFFFUL)
- dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
-
BEGIN_LP_RING(4);
OUT_RING(CMD_STORE_DWORD_IDX);
OUT_RING(20);
OUT_RING(dev_priv->counter);
OUT_RING(0);
ADVANCE_LP_RING();
+#ifdef I915_HAVE_FENCE
+ drm_fence_flush_old(dev, dev_priv->counter);
+#endif
}
static int i915_dispatch_cmdbuffer(drm_device_t * dev,
@@ -565,7 +565,9 @@ static int i915_dispatch_flip(drm_device_t * dev)
OUT_RING(dev_priv->counter);
OUT_RING(0);
ADVANCE_LP_RING();
-
+#ifdef I915_HAVE_FENCE
+ drm_fence_flush_old(dev, dev_priv->counter);
+#endif
dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
return 0;
}
diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h
index a87075b1..403124c9 100644
--- a/shared-core/i915_drv.h
+++ b/shared-core/i915_drv.h
@@ -51,6 +51,11 @@
#define DRIVER_MINOR 5
#define DRIVER_PATCHLEVEL 0
+#if defined(__linux__)
+#define I915_HAVE_FENCE
+#define I915_HAVE_BUFFER
+#endif
+
typedef struct _drm_i915_ring_buffer {
int tail_mask;
unsigned long Start;
@@ -81,7 +86,7 @@ typedef struct drm_i915_private {
drm_dma_handle_t *status_page_dmah;
void *hw_status_page;
dma_addr_t dma_status_page;
- unsigned long counter;
+ uint32_t counter;
int back_offset;
int front_offset;
@@ -98,6 +103,14 @@ typedef struct drm_i915_private {
struct mem_block *agp_heap;
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int vblank_pipe;
+
+#ifdef I915_HAVE_FENCE
+ uint32_t flush_sequence;
+ uint32_t flush_flags;
+ uint32_t flush_pending;
+ uint32_t saved_flush_status;
+#endif
+
} drm_i915_private_t;
extern drm_ioctl_desc_t i915_ioctls[];
@@ -123,6 +136,7 @@ extern void i915_driver_irq_postinstall(drm_device_t * dev);
extern void i915_driver_irq_uninstall(drm_device_t * dev);
extern int i915_vblank_pipe_set(DRM_IOCTL_ARGS);
extern int i915_vblank_pipe_get(DRM_IOCTL_ARGS);
+extern int i915_emit_irq(drm_device_t * dev);
/* i915_mem.c */
extern int i915_mem_alloc(DRM_IOCTL_ARGS);
@@ -132,6 +146,19 @@ extern int i915_mem_destroy_heap(DRM_IOCTL_ARGS);
extern void i915_mem_takedown(struct mem_block **heap);
extern void i915_mem_release(drm_device_t * dev,
DRMFILE filp, struct mem_block *heap);
+#ifdef I915_HAVE_FENCE
+/* i915_fence.c */
+extern void i915_fence_handler(drm_device_t *dev);
+extern int i915_fence_emit_sequence(drm_device_t *dev, uint32_t *sequence);
+extern void i915_poke_flush(drm_device_t *dev);
+extern void i915_sync_flush(drm_device_t *dev);
+#endif
+
+#ifdef I915_HAVE_BUFFER
+/* i915_buffer.c */
+extern drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t *dev,
+ int cached);
+#endif
#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
@@ -191,6 +218,7 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
#define I915REG_INT_IDENTITY_R 0x020a4
#define I915REG_INT_MASK_R 0x020a8
#define I915REG_INT_ENABLE_R 0x020a0
+#define I915REG_INSTPM 0x020c0
#define SRX_INDEX 0x3c4
#define SRX_DATA 0x3c5
@@ -272,6 +300,6 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
-#define READ_BREADCRUMB(dev_priv) (((u32*)(dev_priv->hw_status_page))[5])
-
+#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5])
+#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
#endif
diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c
index 14213b58..08d3140b 100644
--- a/shared-core/i915_irq.c
+++ b/shared-core/i915_irq.c
@@ -56,8 +56,12 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
- if (temp & USER_INT_FLAG)
+ if (temp & USER_INT_FLAG) {
DRM_WAKEUP(&dev_priv->irq_queue);
+#ifdef I915_HAVE_FENCE
+ i915_fence_handler(dev);
+#endif
+ }
if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) {
atomic_inc(&dev->vbl_received);
@@ -68,7 +72,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
return IRQ_HANDLED;
}
-static int i915_emit_irq(drm_device_t * dev)
+int i915_emit_irq(drm_device_t * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -260,7 +264,7 @@ void i915_driver_irq_preinstall(drm_device_t * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- I915_WRITE16(I915REG_HWSTAM, 0xfffe);
+ I915_WRITE16(I915REG_HWSTAM, 0xeffe);
I915_WRITE16(I915REG_INT_MASK_R, 0x0);
I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
}