summaryrefslogtreecommitdiff
path: root/linux-core
diff options
context:
space:
mode:
Diffstat (limited to 'linux-core')
-rw-r--r--linux-core/Makefile.kernel6
-rw-r--r--linux-core/drmP.h297
-rw-r--r--linux-core/drm_agpsupport.c13
-rw-r--r--linux-core/drm_bo.c1542
-rw-r--r--linux-core/drm_bo_move.c411
-rw-r--r--linux-core/drm_compat.c265
-rw-r--r--linux-core/drm_compat.h65
-rw-r--r--linux-core/drm_fence.c265
-rw-r--r--linux-core/drm_fops.c57
-rw-r--r--linux-core/drm_irq.c4
-rw-r--r--linux-core/drm_lock.c159
-rw-r--r--linux-core/drm_mm.c1
-rw-r--r--linux-core/drm_object.c14
-rw-r--r--linux-core/drm_objects.h470
-rw-r--r--linux-core/drm_stub.c2
-rw-r--r--linux-core/drm_ttm.c296
-rw-r--r--linux-core/drm_ttm.h146
-rw-r--r--linux-core/drm_vm.c426
-rw-r--r--linux-core/i810_dma.c4
-rw-r--r--linux-core/i830_dma.c2
-rw-r--r--linux-core/i915_buffer.c175
-rw-r--r--linux-core/i915_drv.c18
-rw-r--r--linux-core/i915_fence.c47
-rw-r--r--linux-core/sis_drv.c2
-rw-r--r--linux-core/via_buffer.c163
-rw-r--r--linux-core/via_fence.c230
26 files changed, 3249 insertions, 1831 deletions
diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel
index 05d6e149..08c0fb2a 100644
--- a/linux-core/Makefile.kernel
+++ b/linux-core/Makefile.kernel
@@ -13,7 +13,7 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \
drm_memory_debug.o ati_pcigart.o drm_sman.o \
drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \
- drm_fence.o drm_ttm.o drm_bo.o
+ drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o
tdfx-objs := tdfx_drv.o
r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
@@ -29,8 +29,8 @@ sis-objs := sis_drv.o sis_mm.o
ffb-objs := ffb_drv.o ffb_context.o
savage-objs := savage_drv.o savage_bci.o savage_state.o
via-objs := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o \
- via_video.o via_dmablit.o
-mach64-objs := mach64_drv.o mach64_dma.o mach64_irq.o mach64_state.o
+ via_video.o via_dmablit.o via_fence.o via_buffer.o
+mach64-objs := mach64_drv.o mach64_dma.o mach64_irq.o mach64_state.o
nv-objs := nv_drv.o
ifeq ($(CONFIG_COMPAT),y)
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index 9c748e6e..9b5f5bdd 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -458,6 +458,10 @@ typedef struct drm_lock_data {
struct file *filp; /**< File descr of lock holder (0=kernel) */
wait_queue_head_t lock_queue; /**< Queue of blocked processes */
unsigned long lock_time; /**< Time of last lock in jiffies */
+ spinlock_t spinlock;
+ uint32_t kernel_waiters;
+ uint32_t user_waiters;
+ int idle_has_lock;
} drm_lock_data_t;
/**
@@ -591,78 +595,8 @@ typedef struct ati_pcigart_info {
drm_local_map_t mapping;
} drm_ati_pcigart_info;
-/*
- * User space objects and their references.
- */
-
-#define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
-
-typedef enum {
- drm_fence_type,
- drm_buffer_type,
- drm_ttm_type
-
- /*
- * Add other user space object types here.
- */
-
-} drm_object_type_t;
-
-
-
-
-/*
- * A user object is a structure that helps the drm give out user handles
- * to kernel internal objects and to keep track of these objects so that
- * they can be destroyed, for example when the user space process exits.
- * Designed to be accessible using a user space 32-bit handle.
- */
-
-typedef struct drm_user_object{
- drm_hash_item_t hash;
- struct list_head list;
- drm_object_type_t type;
- atomic_t refcount;
- int shareable;
- drm_file_t *owner;
- void (*ref_struct_locked) (drm_file_t *priv, struct drm_user_object *obj,
- drm_ref_t ref_action);
- void (*unref)(drm_file_t *priv, struct drm_user_object *obj,
- drm_ref_t unref_action);
- void (*remove)(drm_file_t *priv, struct drm_user_object *obj);
-} drm_user_object_t;
-
-/*
- * A ref object is a structure which is used to
- * keep track of references to user objects and to keep track of these
- * references so that they can be destroyed for example when the user space
- * process exits. Designed to be accessible using a pointer to the _user_ object.
- */
-
-
-typedef struct drm_ref_object {
- drm_hash_item_t hash;
- struct list_head list;
- atomic_t refcount;
- drm_ref_t unref_action;
-} drm_ref_object_t;
-
-
-#include "drm_ttm.h"
-
-/*
- * buffer object driver
- */
-
-typedef struct drm_bo_driver{
- int cached[DRM_BO_MEM_TYPES];
- drm_local_map_t *iomap[DRM_BO_MEM_TYPES];
- drm_ttm_backend_t *(*create_ttm_backend_entry)
- (struct drm_device *dev);
- int (*fence_type)(uint32_t flags, uint32_t *class, uint32_t *type);
- int (*invalidate_caches)(struct drm_device *dev, uint32_t flags);
-} drm_bo_driver_t;
+#include "drm_objects.h"
/**
* DRM driver structure. This structure represent the common code for
@@ -712,6 +646,8 @@ struct drm_driver {
void (*reclaim_buffers) (struct drm_device *dev, struct file * filp);
void (*reclaim_buffers_locked) (struct drm_device *dev,
struct file * filp);
+ void (*reclaim_buffers_idlelocked) (struct drm_device *dev,
+ struct file * filp);
unsigned long (*get_map_ofs) (drm_map_t * map);
unsigned long (*get_reg_ofs) (struct drm_device * dev);
void (*set_version) (struct drm_device * dev, drm_set_version_t * sv);
@@ -749,63 +685,6 @@ typedef struct drm_head {
} drm_head_t;
-typedef struct drm_fence_driver{
- int no_types;
- uint32_t wrap_diff;
- uint32_t flush_diff;
- uint32_t sequence_mask;
- int lazy_capable;
- int (*emit) (struct drm_device *dev, uint32_t flags,
- uint32_t *breadcrumb,
- uint32_t *native_type);
- void (*poke_flush) (struct drm_device *dev);
-} drm_fence_driver_t;
-
-#define _DRM_FENCE_TYPE_EXE 0x00
-
-typedef struct drm_fence_manager{
- int initialized;
- rwlock_t lock;
-
- /*
- * The list below should be maintained in sequence order and
- * access is protected by the above spinlock.
- */
-
- struct list_head ring;
- struct list_head *fence_types[32];
- volatile uint32_t pending_flush;
- wait_queue_head_t fence_queue;
- int pending_exe_flush;
- uint32_t last_exe_flush;
- uint32_t exe_flush_sequence;
- atomic_t count;
-} drm_fence_manager_t;
-
-typedef struct drm_buffer_manager{
- struct mutex init_mutex;
- int nice_mode;
- int initialized;
- drm_file_t *last_to_validate;
- int has_type[DRM_BO_MEM_TYPES];
- int use_type[DRM_BO_MEM_TYPES];
- drm_mm_t manager[DRM_BO_MEM_TYPES];
- struct list_head lru[DRM_BO_MEM_TYPES];
- struct list_head pinned[DRM_BO_MEM_TYPES];
- struct list_head unfenced;
- struct list_head ddestroy;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
- struct work_struct wq;
-#else
- struct delayed_work wq;
-#endif
- uint32_t fence_type;
- unsigned long cur_pages;
- atomic_t count;
-} drm_buffer_manager_t;
-
-
-
/**
* DRM device structure. This structure represent a complete card that
* may contain multiple heads.
@@ -960,62 +839,6 @@ typedef struct drm_agp_ttm_priv {
} drm_agp_ttm_priv;
#endif
-typedef struct drm_fence_object{
- drm_user_object_t base;
- atomic_t usage;
-
- /*
- * The below three fields are protected by the fence manager spinlock.
- */
-
- struct list_head ring;
- int class;
- uint32_t native_type;
- uint32_t type;
- uint32_t signaled;
- uint32_t sequence;
- uint32_t flush_mask;
- uint32_t submitted_flush;
-} drm_fence_object_t;
-
-
-typedef struct drm_buffer_object{
- drm_device_t *dev;
- drm_user_object_t base;
-
- /*
- * If there is a possibility that the usage variable is zero,
- * then dev->struct_mutext should be locked before incrementing it.
- */
-
- atomic_t usage;
- drm_ttm_object_t *ttm_object;
- drm_ttm_t *ttm;
- unsigned long num_pages;
- unsigned long buffer_start;
- drm_bo_type_t type;
- unsigned long offset;
- uint32_t page_alignment;
- atomic_t mapped;
- uint32_t flags;
- uint32_t mask;
- uint32_t mem_type;
-
- drm_mm_node_t *mm_node; /* MM node for on-card RAM */
- struct list_head lru;
- struct list_head ddestroy;
-
- uint32_t fence_type;
- uint32_t fence_class;
- drm_fence_object_t *fence;
- uint32_t priv_flags;
- wait_queue_head_t event_queue;
- struct mutex mutex;
-} drm_buffer_object_t;
-
-#define _DRM_BO_FLAG_UNFENCED 0x00000001
-#define _DRM_BO_FLAG_EVICTED 0x00000002
-
static __inline__ int drm_core_check_feature(struct drm_device *dev,
int feature)
@@ -1193,12 +1016,14 @@ extern int drm_lock(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_unlock(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
-extern int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context);
-extern int drm_lock_free(drm_device_t * dev,
- __volatile__ unsigned int *lock, unsigned int context);
+extern int drm_lock_take(drm_lock_data_t *lock_data, unsigned int context);
+extern int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context);
+extern void drm_idlelock_take(drm_lock_data_t *lock_data);
+extern void drm_idlelock_release(drm_lock_data_t *lock_data);
+
/*
* These are exported to drivers so that they can implement fencing using
- * DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
+ * DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
*/
extern int drm_i_have_hw_lock(struct file *filp);
@@ -1357,106 +1182,10 @@ static inline drm_mm_t *drm_get_mm(drm_mm_node_t *block)
}
-/*
- * User space object bookkeeping (drm_object.c)
- */
-
-/*
- * Must be called with the struct_mutex held.
- */
-
-extern int drm_add_user_object(drm_file_t *priv, drm_user_object_t *item,
-
-/*
- * Must be called with the struct_mutex held.
- */
- int shareable);
-extern drm_user_object_t *drm_lookup_user_object(drm_file_t *priv, uint32_t key);
-
-/*
- * Must be called with the struct_mutex held.
- * If "item" has been obtained by a call to drm_lookup_user_object. You may not
- * release the struct_mutex before calling drm_remove_ref_object.
- * This function may temporarily release the struct_mutex.
- */
-
-extern int drm_remove_user_object(drm_file_t *priv, drm_user_object_t *item);
-
-/*
- * Must be called with the struct_mutex held. May temporarily release it.
- */
-
-extern int drm_add_ref_object(drm_file_t *priv, drm_user_object_t *referenced_object,
- drm_ref_t ref_action);
-
-/*
- * Must be called with the struct_mutex held.
- */
-
-drm_ref_object_t *drm_lookup_ref_object(drm_file_t *priv,
- drm_user_object_t *referenced_object,
- drm_ref_t ref_action);
-/*
- * Must be called with the struct_mutex held.
- * If "item" has been obtained by a call to drm_lookup_ref_object. You may not
- * release the struct_mutex before calling drm_remove_ref_object.
- * This function may temporarily release the struct_mutex.
- */
-
-extern void drm_remove_ref_object(drm_file_t *priv, drm_ref_object_t *item);
-extern int drm_user_object_ref(drm_file_t *priv, uint32_t user_token, drm_object_type_t type,
- drm_user_object_t **object);
-extern int drm_user_object_unref(drm_file_t *priv, uint32_t user_token, drm_object_type_t type);
-
-
-
-/*
- * fence objects (drm_fence.c)
- */
-
-extern void drm_fence_handler(drm_device_t *dev, uint32_t breadcrumb, uint32_t type);
-extern void drm_fence_manager_init(drm_device_t *dev);
-extern void drm_fence_manager_takedown(drm_device_t *dev);
-extern void drm_fence_flush_old(drm_device_t *dev, uint32_t sequence);
-extern int drm_fence_object_flush(drm_device_t * dev,
- volatile drm_fence_object_t * fence,
- uint32_t type);
-extern int drm_fence_object_signaled(volatile drm_fence_object_t * fence,
- uint32_t type);
-extern void drm_fence_usage_deref_locked(drm_device_t * dev,
- drm_fence_object_t * fence);
-extern void drm_fence_usage_deref_unlocked(drm_device_t * dev,
- drm_fence_object_t * fence);
-extern int drm_fence_object_wait(drm_device_t * dev,
- volatile drm_fence_object_t * fence,
- int lazy, int ignore_signals, uint32_t mask);
-extern int drm_fence_object_create(drm_device_t *dev, uint32_t type,
- uint32_t fence_flags,
- drm_fence_object_t **c_fence);
-extern int drm_fence_add_user_object(drm_file_t *priv,
- drm_fence_object_t *fence,
- int shareable);
-
-extern int drm_fence_ioctl(DRM_IOCTL_ARGS);
-
-/*
- * buffer objects (drm_bo.c)
- */
-
-extern int drm_bo_ioctl(DRM_IOCTL_ARGS);
-extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS);
-extern int drm_bo_driver_finish(drm_device_t *dev);
-extern int drm_bo_driver_init(drm_device_t *dev);
-extern int drm_fence_buffer_objects(drm_file_t * priv,
- struct list_head *list,
- uint32_t fence_flags,
- drm_fence_object_t *fence,
- drm_fence_object_t **used_fence);
-
extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c
index 9cdbdaf0..6b93d249 100644
--- a/linux-core/drm_agpsupport.c
+++ b/linux-core/drm_agpsupport.c
@@ -606,8 +606,8 @@ static int drm_agp_bind_ttm(drm_ttm_backend_t *backend,
int ret;
DRM_DEBUG("drm_agp_bind_ttm\n");
- DRM_MASK_VAL(backend->flags, DRM_BE_FLAG_BOUND_CACHED,
- (cached) ? DRM_BE_FLAG_BOUND_CACHED : 0);
+ DRM_FLAG_MASKED(backend->flags, (cached) ? DRM_BE_FLAG_BOUND_CACHED : 0,
+ DRM_BE_FLAG_BOUND_CACHED);
mem->is_flushed = TRUE;
mem->type = (cached) ? agp_priv->cached_type : agp_priv->uncached_type;
ret = drm_agp_bind_memory(mem, offset);
@@ -620,7 +620,7 @@ static int drm_agp_bind_ttm(drm_ttm_backend_t *backend,
static int drm_agp_unbind_ttm(drm_ttm_backend_t *backend) {
drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
-
+
DRM_DEBUG("drm_agp_unbind_ttm\n");
if (agp_priv->mem->is_bound)
return drm_agp_unbind_memory(agp_priv->mem);
@@ -710,7 +710,6 @@ drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev,
agp_priv->uncached_type = AGP_USER_MEMORY;
agp_priv->bridge = dev->agp->bridge;
agp_priv->populated = FALSE;
- agp_be->aperture_base = dev->agp->agp_info.aper_base;
agp_be->private = (void *) agp_priv;
agp_be->needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust;
agp_be->populate = drm_agp_populate;
@@ -718,10 +717,8 @@ drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev,
agp_be->bind = drm_agp_bind_ttm;
agp_be->unbind = drm_agp_unbind_ttm;
agp_be->destroy = drm_agp_destroy_ttm;
- DRM_MASK_VAL(agp_be->flags, DRM_BE_FLAG_NEEDS_FREE,
- (backend == NULL) ? DRM_BE_FLAG_NEEDS_FREE : 0);
- DRM_MASK_VAL(agp_be->flags, DRM_BE_FLAG_CBA,
- (dev->agp->cant_use_aperture) ? DRM_BE_FLAG_CBA : 0);
+ DRM_FLAG_MASKED(agp_be->flags, (backend == NULL) ? DRM_BE_FLAG_NEEDS_FREE : 0,
+ DRM_BE_FLAG_NEEDS_FREE);
agp_be->drm_map_type = _DRM_AGP;
return agp_be;
}
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c
index 2b960c75..548ce14f 100644
--- a/linux-core/drm_bo.c
+++ b/linux-core/drm_bo.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -10,6 +10,10 @@
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
@@ -18,11 +22,6 @@
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
*
**************************************************************************/
/*
@@ -32,34 +31,28 @@
#include "drmP.h"
/*
- * Buffer object locking policy:
- * Lock dev->struct_mutex;
- * Increase usage
- * Unlock dev->struct_mutex;
- * Lock buffer->mutex;
- * Do whatever you want;
- * Unlock buffer->mutex;
- * Decrease usage. Call destruction if zero.
+ * Locking may look a bit complicated but isn't really:
+ *
+ * The buffer usage atomic_t needs to be protected by dev->struct_mutex
+ * when there is a chance that it can be zero before or after the operation.
+ *
+ * dev->struct_mutex also protects all lists and list heads. Hash tables and hash
+ * heads.
*
- * User object visibility ups usage just once, since it has its own
- * refcounting.
+ * bo->mutex protects the buffer object itself excluding the usage field.
+ * bo->mutex does also protect the buffer list heads, so to manipulate those, we need
+ * both the bo->mutex and the dev->struct_mutex.
*
- * Destruction:
- * lock dev->struct_mutex;
- * Verify that usage is zero. Otherwise unlock and continue.
- * Destroy object.
- * unlock dev->struct_mutex;
+ * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit
+ * complicated. When dev->struct_mutex is released to grab bo->mutex, the list
+ * traversal will, in general, need to be restarted.
*
- * Mutex and spinlock locking orders:
- * 1.) Buffer mutex
- * 2.) Refer to ttm locking orders.
*/
-static void drm_bo_destroy_locked(drm_buffer_object_t *bo);
-
-#define DRM_FLAG_MASKED(_old, _new, _mask) {\
-(_old) ^= (((_old) ^ (_new)) & (_mask)); \
-}
+static void drm_bo_destroy_locked(drm_buffer_object_t * bo);
+static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo);
+static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo);
+static void drm_bo_unmap_virtual(drm_buffer_object_t * bo);
static inline uint32_t drm_bo_type_flags(unsigned type)
{
@@ -70,76 +63,189 @@ static inline uint32_t drm_bo_type_flags(unsigned type)
* bo locked. dev->struct_mutex locked.
*/
-static void drm_bo_add_to_lru(drm_buffer_object_t * bo,
- drm_buffer_manager_t * bm)
+void drm_bo_add_to_pinned_lru(drm_buffer_object_t * bo)
{
- struct list_head *list;
- bo->mem_type = 0;
+ drm_mem_type_manager_t *man;
- switch(bo->flags & DRM_BO_MASK_MEM) {
- case DRM_BO_FLAG_MEM_TT:
- bo->mem_type = DRM_BO_MEM_TT;
- break;
- case DRM_BO_FLAG_MEM_VRAM:
- bo->mem_type = DRM_BO_MEM_VRAM;
- break;
- case DRM_BO_FLAG_MEM_LOCAL:
- bo->mem_type = DRM_BO_MEM_LOCAL;
- break;
- default:
- BUG_ON(1);
+ man = &bo->dev->bm.man[bo->pinned_mem_type];
+ list_add_tail(&bo->pinned_lru, &man->pinned);
+}
+
+void drm_bo_add_to_lru(drm_buffer_object_t * bo)
+{
+ drm_mem_type_manager_t *man;
+
+ if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) {
+ man = &bo->dev->bm.man[bo->mem.mem_type];
+ list_add_tail(&bo->lru, &man->lru);
+ } else {
+ INIT_LIST_HEAD(&bo->lru);
}
- list = (bo->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ?
- &bm->pinned[bo->mem_type] : &bm->lru[bo->mem_type];
- list_add_tail(&bo->lru, list);
- return;
+}
+
+static int drm_bo_vm_pre_move(drm_buffer_object_t * bo, int old_is_pci)
+{
+#ifdef DRM_ODD_MM_COMPAT
+ int ret;
+
+ ret = drm_bo_lock_kmm(bo);
+ if (ret)
+ return ret;
+ drm_bo_unmap_virtual(bo);
+ if (old_is_pci)
+ drm_bo_finish_unmap(bo);
+#else
+ drm_bo_unmap_virtual(bo);
+#endif
+ return 0;
+}
+
+static void drm_bo_vm_post_move(drm_buffer_object_t * bo)
+{
+#ifdef DRM_ODD_MM_COMPAT
+ int ret;
+
+ ret = drm_bo_remap_bound(bo);
+ if (ret) {
+ DRM_ERROR("Failed to remap a bound buffer object.\n"
+ "\tThis might cause a sigbus later.\n");
+ }
+ drm_bo_unlock_kmm(bo);
+#endif
}
/*
- * bo locked.
+ * Call bo->mutex locked.
*/
-static int drm_move_tt_to_local(drm_buffer_object_t * bo, int evict,
- int force_no_move)
+static int drm_bo_add_ttm(drm_buffer_object_t * bo)
{
drm_device_t *dev = bo->dev;
- int ret;
+ int ret = 0;
+ bo->ttm = NULL;
- if (bo->mm_node) {
- mutex_lock(&dev->struct_mutex);
- if (evict)
- ret = drm_evict_ttm(bo->ttm);
- else
- ret = drm_unbind_ttm(bo->ttm);
+ switch (bo->type) {
+ case drm_bo_type_dc:
+ bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
+ if (!bo->ttm)
+ ret = -ENOMEM;
+ break;
+ case drm_bo_type_user:
+ case drm_bo_type_fake:
+ break;
+ default:
+ DRM_ERROR("Illegal buffer object type\n");
+ ret = -EINVAL;
+ break;
+ }
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- if (ret == -EAGAIN)
- schedule();
- return ret;
- }
+ return ret;
+}
+
+static int drm_bo_handle_move_mem(drm_buffer_object_t * bo,
+ drm_bo_mem_reg_t * mem,
+ int evict, int no_wait)
+{
+ drm_device_t *dev = bo->dev;
+ drm_buffer_manager_t *bm = &dev->bm;
+ int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
+ int new_is_pci = drm_mem_reg_is_pci(dev, mem);
+ drm_mem_type_manager_t *old_man = &bm->man[bo->mem.mem_type];
+ drm_mem_type_manager_t *new_man = &bm->man[mem->mem_type];
+ int ret = 0;
- if (!(bo->flags & DRM_BO_FLAG_NO_MOVE) || force_no_move) {
- drm_mm_put_block(bo->mm_node);
- bo->mm_node = NULL;
+ if (old_is_pci || new_is_pci)
+ ret = drm_bo_vm_pre_move(bo, old_is_pci);
+ if (ret)
+ return ret;
+
+ /*
+ * Create and bind a ttm if required.
+ */
+
+ if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
+ ret = drm_bo_add_ttm(bo);
+ if (ret)
+ goto out_err;
+
+ if (mem->mem_type != DRM_BO_MEM_LOCAL) {
+ ret = drm_bind_ttm(bo->ttm, new_man->flags &
+ DRM_BO_FLAG_CACHED,
+ mem->mm_node->start);
+ if (ret)
+ goto out_err;
}
- mutex_unlock(&dev->struct_mutex);
}
- bo->flags &= ~DRM_BO_FLAG_MEM_TT;
- bo->flags |= DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
+ if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) {
+
+ drm_bo_mem_reg_t *old_mem = &bo->mem;
+ uint32_t save_flags = old_mem->flags;
+ uint32_t save_mask = old_mem->mask;
+
+ *old_mem = *mem;
+ mem->mm_node = NULL;
+ old_mem->mask = save_mask;
+ DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);
+
+ } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
+ !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
+
+ ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
+
+ } else if (dev->driver->bo_driver->move) {
+ ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
+
+ } else {
+
+ ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
+
+ }
+
+ if (ret)
+ goto out_err;
+
+ if (old_is_pci || new_is_pci)
+ drm_bo_vm_post_move(bo);
+
+ if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
+ ret =
+ dev->driver->bo_driver->invalidate_caches(dev,
+ bo->mem.flags);
+ if (ret)
+ DRM_ERROR("Can not flush read caches\n");
+ }
+
+ DRM_FLAG_MASKED(bo->priv_flags,
+ (evict) ? _DRM_BO_FLAG_EVICTED : 0,
+ _DRM_BO_FLAG_EVICTED);
+
+ if (bo->mem.mm_node)
+ bo->offset = bo->mem.mm_node->start << PAGE_SHIFT;
return 0;
-}
+ out_err:
+ if (old_is_pci || new_is_pci)
+ drm_bo_vm_post_move(bo);
+
+ new_man = &bm->man[bo->mem.mem_type];
+ if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
+ drm_ttm_unbind(bo->ttm);
+ drm_destroy_ttm(bo->ttm);
+ bo->ttm = NULL;
+ }
+
+ return ret;
+}
/*
* Call bo->mutex locked.
* Wait until the buffer is idle.
*/
-static int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
- int no_wait)
+int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
+ int no_wait)
{
drm_fence_object_t *fence = bo->fence;
@@ -168,14 +274,44 @@ static int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
return 0;
}
+static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors)
+{
+ drm_device_t *dev = bo->dev;
+ drm_buffer_manager_t *bm = &dev->bm;
+
+ if (bo->fence) {
+ if (bm->nice_mode) {
+ unsigned long _end = jiffies + 3 * DRM_HZ;
+ int ret;
+ do {
+ ret = drm_bo_wait(bo, 0, 1, 0);
+ if (ret && allow_errors)
+ return ret;
+
+ } while (ret && !time_after_eq(jiffies, _end));
+
+ if (bo->fence) {
+ bm->nice_mode = 0;
+ DRM_ERROR("Detected GPU lockup or "
+ "fence driver was taken down. "
+ "Evicting buffer.\n");
+ }
+ }
+ if (bo->fence) {
+ drm_fence_usage_deref_unlocked(dev, bo->fence);
+ bo->fence = NULL;
+ }
+ }
+ return 0;
+}
+
/*
* Call dev->struct_mutex locked.
* Attempts to remove all private references to a buffer by expiring its
* fence object and removing from lru lists and memory managers.
*/
-
-static void drm_bo_cleanup_refs(drm_buffer_object_t *bo, int remove_all)
+static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all)
{
drm_device_t *dev = bo->dev;
drm_buffer_manager_t *bm = &dev->bm;
@@ -186,32 +322,14 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t *bo, int remove_all)
DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
- if (bo->fence && drm_fence_object_signaled(bo->fence,
- bo->fence_type)) {
+ if (bo->fence && drm_fence_object_signaled(bo->fence, bo->fence_type)) {
drm_fence_usage_deref_locked(dev, bo->fence);
bo->fence = NULL;
}
- if (bo->fence && remove_all) {
- if (bm->nice_mode) {
- unsigned long _end = jiffies + 3 * DRM_HZ;
- int ret;
- do {
- ret = drm_bo_wait(bo, 0, 1, 0);
- } while (ret && !time_after_eq(jiffies, _end));
+ if (bo->fence && remove_all)
+ (void)drm_bo_expire_fence(bo, 0);
- if (bo->fence) {
- bm->nice_mode = 0;
- DRM_ERROR("Detected GPU lockup or "
- "fence driver was taken down. "
- "Evicting waiting buffers.\n");
- }
- if (bo->fence) {
- drm_fence_usage_deref_unlocked(dev, bo->fence);
- bo->fence = NULL;
- }
- }
- }
mutex_lock(&dev->struct_mutex);
if (!atomic_dec_and_test(&bo->usage)) {
@@ -220,9 +338,16 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t *bo, int remove_all)
if (!bo->fence) {
list_del_init(&bo->lru);
- if (bo->mm_node) {
- drm_mm_put_block(bo->mm_node);
- bo->mm_node = NULL;
+ if (bo->mem.mm_node) {
+ drm_mm_put_block(bo->mem.mm_node);
+ if (bo->pinned_node == bo->mem.mm_node)
+ bo->pinned_node = NULL;
+ bo->mem.mm_node = NULL;
+ }
+ list_del_init(&bo->pinned_lru);
+ if (bo->pinned_node) {
+ drm_mm_put_block(bo->pinned_node);
+ bo->pinned_node = NULL;
}
list_del_init(&bo->ddestroy);
mutex_unlock(&bo->mutex);
@@ -234,50 +359,42 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t *bo, int remove_all)
drm_fence_object_flush(dev, bo->fence, bo->fence_type);
list_add_tail(&bo->ddestroy, &bm->ddestroy);
schedule_delayed_work(&bm->wq,
- ((DRM_HZ / 100) <
- 1) ? 1 : DRM_HZ / 100);
+ ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
}
-out:
+ out:
mutex_unlock(&bo->mutex);
return;
}
-
/*
* Verify that refcount is 0 and that there are no internal references
* to the buffer object. Then destroy it.
*/
-static void drm_bo_destroy_locked(drm_buffer_object_t *bo)
+static void drm_bo_destroy_locked(drm_buffer_object_t * bo)
{
drm_device_t *dev = bo->dev;
drm_buffer_manager_t *bm = &dev->bm;
- if (list_empty(&bo->lru) && bo->mm_node == NULL && atomic_read(&bo->usage) == 0) {
- BUG_ON(bo->fence != NULL);
-
- if (bo->ttm) {
- unsigned long _end = jiffies + DRM_HZ;
- int ret;
-
- do {
- ret = drm_unbind_ttm(bo->ttm);
- if (ret == -EAGAIN) {
- mutex_unlock(&dev->struct_mutex);
- schedule();
- mutex_lock(&dev->struct_mutex);
- }
- } while (ret == -EAGAIN && !time_after_eq(jiffies, _end));
-
- if (ret) {
- DRM_ERROR("Couldn't unbind TTM region while destroying a buffer. "
- "Bad. Continuing anyway\n");
- }
+ if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
+ list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
+ list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
+ if (bo->fence != NULL) {
+ DRM_ERROR("Fence was non-zero.\n");
+ drm_bo_cleanup_refs(bo, 0);
+ return;
}
- if (bo->ttm_object) {
- drm_ttm_object_deref_locked(dev, bo->ttm_object);
+#ifdef DRM_ODD_MM_COMPAT
+ BUG_ON(!list_empty(&bo->vma_list));
+ BUG_ON(!list_empty(&bo->p_mm_list));
+#endif
+
+ if (bo->ttm) {
+ drm_ttm_unbind(bo->ttm);
+ drm_destroy_ttm(bo->ttm);
+ bo->ttm = NULL;
}
atomic_dec(&bm->count);
@@ -297,7 +414,6 @@ static void drm_bo_destroy_locked(drm_buffer_object_t *bo)
return;
}
-
/*
* Call dev->struct_mutex locked.
*/
@@ -325,7 +441,6 @@ static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all)
atomic_dec(&nentry->usage);
}
}
-
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
@@ -338,11 +453,11 @@ static void drm_bo_delayed_workqueue(struct work_struct *work)
drm_device_t *dev = (drm_device_t *) data;
drm_buffer_manager_t *bm = &dev->bm;
#else
- drm_buffer_manager_t *bm = container_of(work, drm_buffer_manager_t, wq.work);
+ drm_buffer_manager_t *bm =
+ container_of(work, drm_buffer_manager_t, wq.work);
drm_device_t *dev = container_of(bm, drm_device_t, bm);
#endif
-
DRM_DEBUG("Delayed delete Worker\n");
mutex_lock(&dev->struct_mutex);
@@ -358,7 +473,7 @@ static void drm_bo_delayed_workqueue(struct work_struct *work)
mutex_unlock(&dev->struct_mutex);
}
-static void drm_bo_usage_deref_locked(drm_buffer_object_t * bo)
+void drm_bo_usage_deref_locked(drm_buffer_object_t * bo)
{
if (atomic_dec_and_test(&bo->usage)) {
drm_bo_destroy_locked(bo);
@@ -367,8 +482,11 @@ static void drm_bo_usage_deref_locked(drm_buffer_object_t * bo)
static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo)
{
- drm_bo_usage_deref_locked(drm_user_object_entry(uo, drm_buffer_object_t,
- base));
+ drm_buffer_object_t *bo =
+ drm_user_object_entry(uo, drm_buffer_object_t, base);
+
+ drm_bo_takedown_vm_locked(bo);
+ drm_bo_usage_deref_locked(bo);
}
static void drm_bo_usage_deref_unlocked(drm_buffer_object_t * bo)
@@ -443,7 +561,7 @@ int drm_fence_buffer_objects(drm_file_t * priv,
}
} else {
mutex_unlock(&dev->struct_mutex);
- ret = drm_fence_object_create(dev, fence_type,
+ ret = drm_fence_object_create(dev, 0, fence_type,
fence_flags | DRM_FENCE_FLAG_EMIT,
&fence);
mutex_lock(&dev->struct_mutex);
@@ -454,6 +572,7 @@ int drm_fence_buffer_objects(drm_file_t * priv,
count = 0;
l = f_list.next;
while (l != &f_list) {
+ prefetch(l->next);
entry = list_entry(l, drm_buffer_object_t, lru);
atomic_inc(&entry->usage);
mutex_unlock(&dev->struct_mutex);
@@ -468,7 +587,7 @@ int drm_fence_buffer_objects(drm_file_t * priv,
DRM_FLAG_MASKED(entry->priv_flags, 0,
_DRM_BO_FLAG_UNFENCED);
DRM_WAKEUP(&entry->event_queue);
- drm_bo_add_to_lru(entry, bm);
+ drm_bo_add_to_lru(entry);
}
mutex_unlock(&entry->mutex);
drm_bo_usage_deref_locked(entry);
@@ -489,11 +608,11 @@ EXPORT_SYMBOL(drm_fence_buffer_objects);
*/
static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
- int no_wait, int force_no_move)
+ int no_wait)
{
int ret = 0;
drm_device_t *dev = bo->dev;
- drm_buffer_manager_t *bm = &dev->bm;
+ drm_bo_mem_reg_t evict_mem;
/*
* Someone might have modified the buffer before we took the buffer mutex.
@@ -501,70 +620,93 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
goto out;
- if (!(bo->flags & drm_bo_type_flags(mem_type)))
+ if (bo->mem.mem_type != mem_type)
goto out;
ret = drm_bo_wait(bo, 0, 0, no_wait);
+ if (ret && ret != -EAGAIN) {
+ DRM_ERROR("Failed to expire fence before "
+ "buffer eviction.\n");
+ goto out;
+ }
+
+ evict_mem = bo->mem;
+ evict_mem.mm_node = NULL;
+
+ if (bo->type == drm_bo_type_fake) {
+ bo->mem.mem_type = DRM_BO_MEM_LOCAL;
+ bo->mem.mm_node = NULL;
+ goto out1;
+ }
+
+ evict_mem = bo->mem;
+ evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
+ ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
+
if (ret) {
if (ret != -EAGAIN)
- DRM_ERROR("Failed to expire fence before "
- "buffer eviction.\n");
+ DRM_ERROR("Failed to find memory space for "
+ "buffer 0x%p eviction.\n", bo);
goto out;
}
- if (mem_type == DRM_BO_MEM_TT) {
- ret = drm_move_tt_to_local(bo, 1, force_no_move);
- if (ret)
- goto out;
- mutex_lock(&dev->struct_mutex);
- list_del_init(&bo->lru);
- drm_bo_add_to_lru(bo, bm);
- mutex_unlock(&dev->struct_mutex);
- }
+ ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
- if (ret)
+ if (ret) {
+ if (ret != -EAGAIN)
+ DRM_ERROR("Buffer eviction failed\n");
goto out;
+ }
+
+ out1:
+ mutex_lock(&dev->struct_mutex);
+ if (evict_mem.mm_node) {
+ if (evict_mem.mm_node != bo->pinned_node)
+ drm_mm_put_block(evict_mem.mm_node);
+ evict_mem.mm_node = NULL;
+ }
+ list_del(&bo->lru);
+ drm_bo_add_to_lru(bo);
+ mutex_unlock(&dev->struct_mutex);
DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
_DRM_BO_FLAG_EVICTED);
+
out:
return ret;
}
-/*
- * bo->mutex locked.
- */
-
-int drm_bo_alloc_space(drm_buffer_object_t * bo, unsigned mem_type,
- int no_wait)
+static int drm_bo_mem_force_space(drm_device_t * dev,
+ drm_bo_mem_reg_t * mem,
+ uint32_t mem_type, int no_wait)
{
- drm_device_t *dev = bo->dev;
drm_mm_node_t *node;
drm_buffer_manager_t *bm = &dev->bm;
drm_buffer_object_t *entry;
- drm_mm_t *mm = &bm->manager[mem_type];
+ drm_mem_type_manager_t *man = &bm->man[mem_type];
struct list_head *lru;
- unsigned long size = bo->num_pages;
+ unsigned long num_pages = mem->num_pages;
int ret;
mutex_lock(&dev->struct_mutex);
do {
- node = drm_mm_search_free(mm, size, bo->page_alignment, 1);
+ node = drm_mm_search_free(&man->manager, num_pages,
+ mem->page_alignment, 1);
if (node)
break;
- lru = &bm->lru[mem_type];
+ lru = &man->lru;
if (lru->next == lru)
break;
entry = list_entry(lru->next, drm_buffer_object_t, lru);
-
atomic_inc(&entry->usage);
mutex_unlock(&dev->struct_mutex);
mutex_lock(&entry->mutex);
- BUG_ON(bo->flags & DRM_BO_FLAG_NO_MOVE);
- ret = drm_bo_evict(entry, mem_type, no_wait, 0);
+ BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
+
+ ret = drm_bo_evict(entry, mem_type, no_wait);
mutex_unlock(&entry->mutex);
drm_bo_usage_deref_unlocked(entry);
if (ret)
@@ -573,146 +715,163 @@ int drm_bo_alloc_space(drm_buffer_object_t * bo, unsigned mem_type,
} while (1);
if (!node) {
- DRM_ERROR("Out of videoram / aperture space\n");
mutex_unlock(&dev->struct_mutex);
return -ENOMEM;
}
- node = drm_mm_get_block(node, size, bo->page_alignment);
+ node = drm_mm_get_block(node, num_pages, mem->page_alignment);
mutex_unlock(&dev->struct_mutex);
- BUG_ON(!node);
- node->private = (void *)bo;
-
- bo->mm_node = node;
- bo->offset = node->start * PAGE_SIZE;
+ mem->mm_node = node;
+ mem->mem_type = mem_type;
return 0;
}
-static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait)
+static int drm_bo_mt_compatible(drm_mem_type_manager_t * man,
+ uint32_t mem_type,
+ uint32_t mask, uint32_t * res_mask)
{
- drm_device_t *dev = bo->dev;
- drm_ttm_backend_t *be;
- int ret;
+ uint32_t cur_flags = drm_bo_type_flags(mem_type);
+ uint32_t flag_diff;
- if (!(bo->mm_node && (bo->flags & DRM_BO_FLAG_NO_MOVE))) {
- BUG_ON(bo->mm_node);
- ret = drm_bo_alloc_space(bo, DRM_BO_MEM_TT, no_wait);
- if (ret)
- return ret;
- }
+ if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
+ cur_flags |= DRM_BO_FLAG_CACHED;
+ if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
+ cur_flags |= DRM_BO_FLAG_MAPPABLE;
+ if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
+ DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
- DRM_DEBUG("Flipping in to AGP 0x%08lx\n", bo->mm_node->start);
-
- mutex_lock(&dev->struct_mutex);
- ret = drm_bind_ttm(bo->ttm, bo->flags & DRM_BO_FLAG_BIND_CACHED,
- bo->mm_node->start);
- if (ret) {
- drm_mm_put_block(bo->mm_node);
- bo->mm_node = NULL;
- }
- mutex_unlock(&dev->struct_mutex);
+ if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
+ return 0;
- if (ret) {
- return ret;
+ if (mem_type == DRM_BO_MEM_LOCAL) {
+ *res_mask = cur_flags;
+ return 1;
}
- be = bo->ttm->be;
- if (be->needs_ub_cache_adjust(be))
- bo->flags &= ~DRM_BO_FLAG_CACHED;
- bo->flags &= ~DRM_BO_MASK_MEM;
- bo->flags |= DRM_BO_FLAG_MEM_TT;
+ flag_diff = (mask ^ cur_flags);
+ if ((flag_diff & DRM_BO_FLAG_CACHED) &&
+ (!(mask & DRM_BO_FLAG_CACHED) ||
+ (mask & DRM_BO_FLAG_FORCE_CACHING)))
+ return 0;
- if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
- ret = dev->driver->bo_driver->invalidate_caches(dev, bo->flags);
- if (ret)
- DRM_ERROR("Could not flush read caches\n");
- }
- DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_EVICTED);
+ if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
+ ((mask & DRM_BO_FLAG_MAPPABLE) ||
+ (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) )
+ return 0;
- return 0;
+ *res_mask = cur_flags;
+ return 1;
}
-static int drm_bo_new_flags(drm_device_t * dev,
- uint32_t flags, uint32_t new_mask, uint32_t hint,
- int init, uint32_t * n_flags, uint32_t * n_mask)
+int drm_bo_mem_space(drm_buffer_object_t * bo,
+ drm_bo_mem_reg_t * mem, int no_wait)
{
- uint32_t new_flags = 0;
- uint32_t new_props;
- drm_bo_driver_t *driver = dev->driver->bo_driver;
+ drm_device_t *dev = bo->dev;
drm_buffer_manager_t *bm = &dev->bm;
- unsigned i;
+ drm_mem_type_manager_t *man;
+
+ uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
+ const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
+ uint32_t i;
+ uint32_t mem_type = DRM_BO_MEM_LOCAL;
+ uint32_t cur_flags;
+ int type_found = 0;
+ int type_ok = 0;
+ int has_eagain = 0;
+ drm_mm_node_t *node = NULL;
+ int ret;
- /*
- * First adjust the mask to take away nonexistant memory types.
- */
+ mem->mm_node = NULL;
+ for (i = 0; i < num_prios; ++i) {
+ mem_type = prios[i];
+ man = &bm->man[mem_type];
- for (i = 0; i < DRM_BO_MEM_TYPES; ++i) {
- if (!bm->use_type[i])
- new_mask &= ~drm_bo_type_flags(i);
- }
+ type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
+ &cur_flags);
- if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
- DRM_ERROR
- ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
- "processes\n");
- return -EPERM;
- }
- if (new_mask & DRM_BO_FLAG_BIND_CACHED) {
- if (((new_mask & DRM_BO_FLAG_MEM_TT) &&
- !driver->cached[DRM_BO_MEM_TT]) &&
- ((new_mask & DRM_BO_FLAG_MEM_VRAM)
- && !driver->cached[DRM_BO_MEM_VRAM])) {
- new_mask &= ~DRM_BO_FLAG_BIND_CACHED;
- } else {
- if (!driver->cached[DRM_BO_MEM_TT])
- new_flags &= DRM_BO_FLAG_MEM_TT;
- if (!driver->cached[DRM_BO_MEM_VRAM])
- new_flags &= DRM_BO_FLAG_MEM_VRAM;
+ if (!type_ok)
+ continue;
+
+ if (mem_type == DRM_BO_MEM_LOCAL)
+ break;
+
+ if ((mem_type == bo->pinned_mem_type) &&
+ (bo->pinned_node != NULL)) {
+ node = bo->pinned_node;
+ break;
}
- }
- if ((new_mask & DRM_BO_FLAG_READ_CACHED) &&
- !(new_mask & DRM_BO_FLAG_BIND_CACHED)) {
- if ((new_mask & DRM_BO_FLAG_NO_EVICT) &&
- !(new_mask & DRM_BO_FLAG_MEM_LOCAL)) {
- DRM_ERROR
- ("Cannot read cached from a pinned VRAM / TT buffer\n");
- return -EINVAL;
+ mutex_lock(&dev->struct_mutex);
+ if (man->has_type && man->use_type) {
+ type_found = 1;
+ node = drm_mm_search_free(&man->manager, mem->num_pages,
+ mem->page_alignment, 1);
+ if (node)
+ node = drm_mm_get_block(node, mem->num_pages,
+ mem->page_alignment);
}
+ mutex_unlock(&dev->struct_mutex);
+ if (node)
+ break;
}
- /*
- * Determine new memory location:
- */
+ if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
+ mem->mm_node = node;
+ mem->mem_type = mem_type;
+ mem->flags = cur_flags;
+ return 0;
+ }
- if (!(flags & new_mask & DRM_BO_MASK_MEM) || init) {
+ if (!type_found)
+ return -EINVAL;
- new_flags = new_mask & DRM_BO_MASK_MEM;
+ num_prios = dev->driver->bo_driver->num_mem_busy_prio;
+ prios = dev->driver->bo_driver->mem_busy_prio;
- if (!new_flags) {
- DRM_ERROR("Invalid buffer object memory flags\n");
- return -EINVAL;
- }
+ for (i = 0; i < num_prios; ++i) {
+ mem_type = prios[i];
+ man = &bm->man[mem_type];
- if (new_flags & DRM_BO_FLAG_MEM_LOCAL) {
- if ((hint & DRM_BO_HINT_AVOID_LOCAL) &&
- new_flags & (DRM_BO_FLAG_MEM_VRAM |
- DRM_BO_FLAG_MEM_TT)) {
- new_flags &= ~DRM_BO_FLAG_MEM_LOCAL;
- } else {
- new_flags = DRM_BO_FLAG_MEM_LOCAL;
- }
- }
- if (new_flags & DRM_BO_FLAG_MEM_TT) {
- if ((new_mask & DRM_BO_FLAG_PREFER_VRAM) &&
- new_flags & DRM_BO_FLAG_MEM_VRAM) {
- new_flags = DRM_BO_FLAG_MEM_VRAM;
- } else {
- new_flags = DRM_BO_FLAG_MEM_TT;
- }
+ if (!drm_bo_mt_compatible(man, mem_type, mem->mask, &cur_flags))
+ continue;
+
+ ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
+
+ if (ret == 0) {
+ mem->flags = cur_flags;
+ return 0;
}
- } else {
- new_flags = flags & DRM_BO_MASK_MEM;
+
+ if (ret == -EAGAIN)
+ has_eagain = 1;
+ }
+
+ ret = (has_eagain) ? -EAGAIN : -ENOMEM;
+ return ret;
+}
+
+EXPORT_SYMBOL(drm_bo_mem_space);
+
+static int drm_bo_new_mask(drm_buffer_object_t * bo,
+ uint32_t new_mask, uint32_t hint)
+{
+ uint32_t new_props;
+
+ if (bo->type == drm_bo_type_user) {
+ DRM_ERROR("User buffers are not supported yet\n");
+ return -EINVAL;
+ }
+ if (bo->type == drm_bo_type_fake &&
+ !(new_mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) {
+ DRM_ERROR("Fake buffers must be pinned.\n");
+ return -EINVAL;
+ }
+
+ if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
+ DRM_ERROR
+ ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
+ "processes\n");
+ return -EPERM;
}
new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
@@ -723,22 +882,7 @@ static int drm_bo_new_flags(drm_device_t * dev,
return -EINVAL;
}
- new_flags |= new_mask & ~DRM_BO_MASK_MEM;
-
- if (((flags ^ new_flags) & DRM_BO_FLAG_BIND_CACHED) &&
- (new_flags & DRM_BO_FLAG_NO_EVICT) &&
- (flags & (DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MEM_VRAM))) {
- if (!(flags & DRM_BO_FLAG_CACHED)) {
- DRM_ERROR
- ("Cannot change caching policy of pinned buffer\n");
- return -EINVAL;
- } else {
- new_flags &= ~DRM_BO_FLAG_CACHED;
- }
- }
-
- *n_flags = new_flags;
- *n_mask = new_mask;
+ bo->mem.mask = new_mask;
return 0;
}
@@ -825,8 +969,8 @@ static int drm_bo_read_cached(drm_buffer_object_t * bo)
int ret = 0;
BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
- if (bo->mm_node)
- ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1, 0);
+ if (bo->mem.mm_node)
+ ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
return ret;
}
@@ -916,21 +1060,15 @@ static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo,
drm_bo_arg_reply_t * rep)
{
rep->handle = bo->base.hash.key;
- rep->flags = bo->flags;
- rep->size = bo->num_pages * PAGE_SIZE;
+ rep->flags = bo->mem.flags;
+ rep->size = bo->mem.num_pages * PAGE_SIZE;
rep->offset = bo->offset;
-
- if (bo->ttm_object) {
- rep->arg_handle = bo->ttm_object->map_list.user_token;
- } else {
- rep->arg_handle = 0;
- }
-
- rep->mask = bo->mask;
+ rep->arg_handle = bo->map_list.user_token;
+ rep->mask = bo->mem.mask;
rep->buffer_start = bo->buffer_start;
rep->fence_flags = bo->fence_type;
rep->rep_flags = 0;
- rep->page_alignment = bo->page_alignment;
+ rep->page_alignment = bo->mem.page_alignment;
if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
@@ -988,14 +1126,14 @@ static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
}
if ((map_flags & DRM_BO_FLAG_READ) &&
- (bo->flags & DRM_BO_FLAG_READ_CACHED) &&
- (!(bo->flags & DRM_BO_FLAG_CACHED))) {
+ (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
+ (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
drm_bo_read_cached(bo);
}
break;
} else if ((map_flags & DRM_BO_FLAG_READ) &&
- (bo->flags & DRM_BO_FLAG_READ_CACHED) &&
- (!(bo->flags & DRM_BO_FLAG_CACHED))) {
+ (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
+ (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
/*
* We are already mapped with different flags.
@@ -1078,46 +1216,124 @@ static void drm_buffer_user_object_unmap(drm_file_t * priv,
/*
* bo->mutex locked.
+ * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
*/
-static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_flags,
- int no_wait, int force_no_move)
+int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags,
+ int no_wait, int move_unfenced)
{
+ drm_device_t *dev = bo->dev;
+ drm_buffer_manager_t *bm = &dev->bm;
int ret = 0;
-
+ drm_bo_mem_reg_t mem;
/*
* Flush outstanding fences.
*/
+
drm_bo_busy(bo);
/*
- * Make sure we're not mapped.
+ * Wait for outstanding fences.
*/
- ret = drm_bo_wait_unmapped(bo, no_wait);
+ ret = drm_bo_wait(bo, 0, 0, no_wait);
if (ret)
return ret;
+ mem.num_pages = bo->mem.num_pages;
+ mem.size = mem.num_pages << PAGE_SHIFT;
+ mem.mask = new_mem_flags;
+ mem.page_alignment = bo->mem.page_alignment;
+
+ mutex_lock(&bm->evict_mutex);
+ mutex_lock(&dev->struct_mutex);
+ list_del(&bo->lru);
+ list_add_tail(&bo->lru, &bm->unfenced);
+ DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
+ _DRM_BO_FLAG_UNFENCED);
+ mutex_unlock(&dev->struct_mutex);
+
/*
- * Wait for outstanding fences.
+ * Determine where to move the buffer.
*/
+ ret = drm_bo_mem_space(bo, &mem, no_wait);
+ if (ret)
+ goto out_unlock;
- ret = drm_bo_wait(bo, 0, 0, no_wait);
+ ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
- if (ret == -EINTR)
- return -EAGAIN;
- if (ret)
- return ret;
+ out_unlock:
+ if (ret || !move_unfenced) {
+ mutex_lock(&dev->struct_mutex);
+ if (mem.mm_node) {
+ if (mem.mm_node != bo->pinned_node)
+ drm_mm_put_block(mem.mm_node);
+ mem.mm_node = NULL;
+ }
+ DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
+ DRM_WAKEUP(&bo->event_queue);
+ list_del(&bo->lru);
+ drm_bo_add_to_lru(bo);
+ mutex_unlock(&dev->struct_mutex);
+ }
- if (new_flags & DRM_BO_FLAG_MEM_TT) {
- ret = drm_move_local_to_tt(bo, no_wait);
- if (ret)
- return ret;
- } else {
- drm_move_tt_to_local(bo, 0, force_no_move);
+ mutex_unlock(&bm->evict_mutex);
+ return ret;
+}
+
+static int drm_bo_mem_compat(drm_bo_mem_reg_t * mem)
+{
+ uint32_t flag_diff = (mem->mask ^ mem->flags);
+
+ if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
+ return 0;
+ if ((flag_diff & DRM_BO_FLAG_CACHED) &&
+ (!(mem->mask & DRM_BO_FLAG_CACHED) ||
+ (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) {
+ return 0;
+ }
+ if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
+ ((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
+ (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
+ return 0;
+ return 1;
+}
+
+static int drm_bo_check_fake(drm_device_t * dev, drm_bo_mem_reg_t * mem)
+{
+ drm_buffer_manager_t *bm = &dev->bm;
+ drm_mem_type_manager_t *man;
+ uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
+ const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
+ uint32_t i;
+ int type_ok = 0;
+ uint32_t mem_type = 0;
+ uint32_t cur_flags;
+
+ if (drm_bo_mem_compat(mem))
+ return 0;
+
+ BUG_ON(mem->mm_node);
+
+ for (i = 0; i < num_prios; ++i) {
+ mem_type = prios[i];
+ man = &bm->man[mem_type];
+ type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
+ &cur_flags);
+ if (type_ok)
+ break;
}
- return 0;
+ if (type_ok) {
+ mem->mm_node = NULL;
+ mem->mem_type = mem_type;
+ mem->flags = cur_flags;
+ DRM_FLAG_MASKED(mem->flags, mem->mask, ~DRM_BO_MASK_MEMTYPE);
+ return 0;
+ }
+
+ DRM_ERROR("Illegal fake buffer flags 0x%08x\n", mem->mask);
+ return -EINVAL;
}
/*
@@ -1125,98 +1341,105 @@ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_flags,
*/
static int drm_buffer_object_validate(drm_buffer_object_t * bo,
- uint32_t new_flags,
int move_unfenced, int no_wait)
{
drm_device_t *dev = bo->dev;
drm_buffer_manager_t *bm = &dev->bm;
- uint32_t flag_diff = (new_flags ^ bo->flags);
drm_bo_driver_t *driver = dev->driver->bo_driver;
-
int ret;
- if (new_flags & DRM_BO_FLAG_MEM_VRAM) {
- DRM_ERROR("Vram support not implemented yet\n");
- return -EINVAL;
- }
-
- DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", new_flags, bo->flags);
- ret = driver->fence_type(new_flags, &bo->fence_class, &bo->fence_type);
+ DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", bo->mem.mask,
+ bo->mem.flags);
+ ret =
+ driver->fence_type(bo, &bo->fence_class, &bo->fence_type);
if (ret) {
DRM_ERROR("Driver did not support given buffer permissions\n");
return ret;
}
+ ret = drm_bo_wait_unmapped(bo, no_wait);
+ if (ret)
+ return ret;
+
+ if (bo->type == drm_bo_type_fake) {
+ ret = drm_bo_check_fake(dev, &bo->mem);
+ if (ret)
+ return ret;
+ }
+
/*
- * Move out if we need to change caching policy.
+ * Check whether we need to move buffer.
*/
- if ((flag_diff & DRM_BO_FLAG_BIND_CACHED) &&
- !(bo->flags & DRM_BO_FLAG_MEM_LOCAL)) {
- if (bo->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
- DRM_ERROR("Cannot change caching policy of "
- "pinned buffer.\n");
- return -EINVAL;
- }
- ret = drm_bo_move_buffer(bo, DRM_BO_FLAG_MEM_LOCAL, no_wait, 0);
+ if (!drm_bo_mem_compat(&bo->mem)) {
+ ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
+ move_unfenced);
if (ret) {
if (ret != -EAGAIN)
DRM_ERROR("Failed moving buffer.\n");
return ret;
}
}
- DRM_MASK_VAL(bo->flags, DRM_BO_FLAG_BIND_CACHED, new_flags);
- flag_diff = (new_flags ^ bo->flags);
/*
- * Check whether we dropped no_move policy, and in that case,
- * release reserved manager regions.
+ * Pinned buffers.
*/
- if ((flag_diff & DRM_BO_FLAG_NO_MOVE) &&
- !(new_flags & DRM_BO_FLAG_NO_MOVE)) {
+ if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
+ bo->pinned_mem_type = bo->mem.mem_type;
mutex_lock(&dev->struct_mutex);
- if (bo->mm_node) {
- drm_mm_put_block(bo->mm_node);
- bo->mm_node = NULL;
+ list_del_init(&bo->pinned_lru);
+ drm_bo_add_to_pinned_lru(bo);
+
+ if (bo->pinned_node != bo->mem.mm_node) {
+ if (bo->pinned_node != NULL)
+ drm_mm_put_block(bo->pinned_node);
+ bo->pinned_node = bo->mem.mm_node;
}
+
mutex_unlock(&dev->struct_mutex);
+
+ } else if (bo->pinned_node != NULL) {
+
+ mutex_lock(&dev->struct_mutex);
+ drm_mm_put_block(bo->pinned_node);
+ list_del_init(&bo->pinned_lru);
+ bo->pinned_node = NULL;
+ mutex_unlock(&dev->struct_mutex);
+
}
/*
- * Check whether we need to move buffer.
+ * We might need to add a TTM.
*/
- if ((bo->type != drm_bo_type_fake) && (flag_diff & DRM_BO_MASK_MEM)) {
- ret = drm_bo_move_buffer(bo, new_flags, no_wait, 1);
- if (ret) {
- if (ret != -EAGAIN)
- DRM_ERROR("Failed moving buffer.\n");
+ if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
+ ret = drm_bo_add_ttm(bo);
+ if (ret)
return ret;
- }
}
+ DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
- if (move_unfenced) {
-
- /*
- * Place on unfenced list.
- */
+ /*
+ * Finally, adjust lru to be sure.
+ */
+ mutex_lock(&dev->struct_mutex);
+ list_del(&bo->lru);
+ if (move_unfenced) {
+ list_add_tail(&bo->lru, &bm->unfenced);
DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
_DRM_BO_FLAG_UNFENCED);
- mutex_lock(&dev->struct_mutex);
- list_del(&bo->lru);
- list_add_tail(&bo->lru, &bm->unfenced);
- mutex_unlock(&dev->struct_mutex);
} else {
-
- mutex_lock(&dev->struct_mutex);
- list_del_init(&bo->lru);
- drm_bo_add_to_lru(bo, bm);
- mutex_unlock(&dev->struct_mutex);
+ drm_bo_add_to_lru(bo);
+ if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
+ DRM_WAKEUP(&bo->event_queue);
+ DRM_FLAG_MASKED(bo->priv_flags, 0,
+ _DRM_BO_FLAG_UNFENCED);
+ }
}
+ mutex_unlock(&dev->struct_mutex);
- bo->flags = new_flags;
return 0;
}
@@ -1225,10 +1448,8 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
drm_bo_arg_reply_t * rep)
{
drm_buffer_object_t *bo;
- drm_device_t *dev = priv->head->dev;
int ret;
int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
- uint32_t new_flags;
bo = drm_lookup_buffer_object(priv, handle, 1);
if (!bo) {
@@ -1241,22 +1462,20 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
if (ret)
goto out;
- ret = drm_bo_new_flags(dev, bo->flags,
- (flags & mask) | (bo->mask & ~mask), hint,
- 0, &new_flags, &bo->mask);
-
+ DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
+ ret = drm_bo_new_mask(bo, flags, hint);
if (ret)
goto out;
ret =
- drm_buffer_object_validate(bo, new_flags,
- !(hint & DRM_BO_HINT_DONT_FENCE),
+ drm_buffer_object_validate(bo, !(hint & DRM_BO_HINT_DONT_FENCE),
no_wait);
drm_bo_fill_rep_arg(bo, rep);
out:
mutex_unlock(&bo->mutex);
+
drm_bo_usage_deref_unlocked(bo);
return ret;
}
@@ -1307,90 +1526,6 @@ static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle,
return ret;
}
-/*
- * Call bo->mutex locked.
- */
-
-static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo)
-{
- drm_device_t *dev = bo->dev;
- drm_ttm_object_t *to = NULL;
- int ret = 0;
- uint32_t ttm_flags = 0;
-
- bo->ttm_object = NULL;
- bo->ttm = NULL;
-
- switch (bo->type) {
- case drm_bo_type_dc:
- mutex_lock(&dev->struct_mutex);
- ret = drm_ttm_object_create(dev, bo->num_pages * PAGE_SIZE,
- ttm_flags, &to);
- mutex_unlock(&dev->struct_mutex);
- break;
- case drm_bo_type_user:
- case drm_bo_type_fake:
- break;
- default:
- DRM_ERROR("Illegal buffer object type\n");
- ret = -EINVAL;
- break;
- }
-
- if (ret) {
- return ret;
- }
-
- if (to) {
- bo->ttm_object = to;
- bo->ttm = drm_ttm_from_object(to);
- }
- return ret;
-}
-
-/*
- * Transfer a buffer object's memory and LRU status to a newly
- * created object. User-space references remains with the old
- * object. Call bo->mutex locked.
- */
-
-int drm_buffer_object_transfer(drm_buffer_object_t *bo,
- drm_buffer_object_t **new_obj)
-{
- drm_buffer_object_t *fbo;
- drm_device_t *dev = bo->dev;
- drm_buffer_manager_t *bm = &dev->bm;
-
- fbo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
- if (!fbo)
- return -ENOMEM;
-
- *fbo = *bo;
- mutex_init(&fbo->mutex);
- mutex_lock(&fbo->mutex);
- mutex_lock(&dev->struct_mutex);
-
- INIT_LIST_HEAD(&fbo->ddestroy);
- INIT_LIST_HEAD(&fbo->lru);
- list_splice_init(&bo->lru, &fbo->lru);
-
- bo->mm_node = NULL;
- bo->ttm = NULL;
- bo->ttm_object = NULL;
- bo->fence = NULL;
- bo->flags = 0;
-
- fbo->mm_node->private = (void *)fbo;
- atomic_set(&fbo->usage, 1);
- atomic_inc(&bm->count);
- mutex_unlock(&dev->struct_mutex);
- mutex_unlock(&fbo->mutex);
-
- *new_obj = fbo;
- return 0;
-}
-
-
int drm_buffer_object_create(drm_file_t * priv,
unsigned long size,
drm_bo_type_t type,
@@ -1404,7 +1539,6 @@ int drm_buffer_object_create(drm_file_t * priv,
drm_buffer_manager_t *bm = &dev->bm;
drm_buffer_object_t *bo;
int ret = 0;
- uint32_t new_flags;
unsigned long num_pages;
if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) {
@@ -1429,12 +1563,18 @@ int drm_buffer_object_create(drm_file_t * priv,
atomic_set(&bo->mapped, -1);
DRM_INIT_WAITQUEUE(&bo->event_queue);
INIT_LIST_HEAD(&bo->lru);
+ INIT_LIST_HEAD(&bo->pinned_lru);
INIT_LIST_HEAD(&bo->ddestroy);
+#ifdef DRM_ODD_MM_COMPAT
+ INIT_LIST_HEAD(&bo->p_mm_list);
+ INIT_LIST_HEAD(&bo->vma_list);
+#endif
bo->dev = dev;
bo->type = type;
- bo->num_pages = num_pages;
- bo->mm_node = NULL;
- bo->page_alignment = page_alignment;
+ bo->mem.mem_type = DRM_BO_MEM_LOCAL;
+ bo->mem.num_pages = num_pages;
+ bo->mem.mm_node = NULL;
+ bo->mem.page_alignment = page_alignment;
if (bo->type == drm_bo_type_fake) {
bo->offset = buffer_start;
bo->buffer_start = 0;
@@ -1442,18 +1582,22 @@ int drm_buffer_object_create(drm_file_t * priv,
bo->buffer_start = buffer_start;
}
bo->priv_flags = 0;
- bo->flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
+ bo->mem.flags = 0;
+ bo->mem.mask = 0;
atomic_inc(&bm->count);
- ret = drm_bo_new_flags(dev, bo->flags, mask, hint,
- 1, &new_flags, &bo->mask);
- if (ret)
- goto out_err;
- ret = drm_bo_add_ttm(priv, bo);
+ ret = drm_bo_new_mask(bo, mask, hint);
+
if (ret)
goto out_err;
- ret = drm_buffer_object_validate(bo, new_flags, 0,
- hint & DRM_BO_HINT_DONT_BLOCK);
+ if (bo->type == drm_bo_type_dc) {
+ mutex_lock(&dev->struct_mutex);
+ ret = drm_bo_setup_vm_locked(bo);
+ mutex_unlock(&dev->struct_mutex);
+ if (ret)
+ goto out_err;
+ }
+ ret = drm_buffer_object_validate(bo, 0, hint & DRM_BO_HINT_DONT_BLOCK);
if (ret)
goto out_err;
@@ -1463,6 +1607,7 @@ int drm_buffer_object_create(drm_file_t * priv,
out_err:
mutex_unlock(&bo->mutex);
+
drm_bo_usage_deref_unlocked(bo);
return ret;
}
@@ -1630,111 +1775,174 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS)
return 0;
}
+/**
+ *Clean the unfenced list and put on regular LRU.
+ *This is part of the memory manager cleanup and should only be
+ *called with the DRI lock held.
+ *Call dev->struct_sem locked.
+ */
+
+static void drm_bo_clean_unfenced(drm_device_t *dev)
+{
+ drm_buffer_manager_t *bm = &dev->bm;
+ struct list_head *head, *list;
+ drm_buffer_object_t *entry;
+
+ head = &bm->unfenced;
+
+ list = head->next;
+ while(list != head) {
+ prefetch(list->next);
+ entry = list_entry(list, drm_buffer_object_t, lru);
+
+ atomic_inc(&entry->usage);
+ mutex_unlock(&dev->struct_mutex);
+ mutex_lock(&entry->mutex);
+ mutex_lock(&dev->struct_mutex);
+
+ list_del(&entry->lru);
+ DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
+ drm_bo_add_to_lru(entry);
+ mutex_unlock(&entry->mutex);
+ list = head->next;
+ }
+}
+
+static int drm_bo_leave_list(drm_buffer_object_t * bo,
+ uint32_t mem_type,
+ int free_pinned, int allow_errors)
+{
+ drm_device_t *dev = bo->dev;
+ int ret = 0;
+
+ mutex_lock(&bo->mutex);
+
+ ret = drm_bo_expire_fence(bo, allow_errors);
+ if (ret)
+ goto out;
+
+ if (free_pinned) {
+ DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
+ mutex_lock(&dev->struct_mutex);
+ list_del_init(&bo->pinned_lru);
+ if (bo->pinned_node == bo->mem.mm_node)
+ bo->pinned_node = NULL;
+ if (bo->pinned_node != NULL) {
+ drm_mm_put_block(bo->pinned_node);
+ bo->pinned_node = NULL;
+ }
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
+ DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
+ "cleanup. Removing flag and evicting.\n");
+ bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
+ bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
+ }
+
+ if (bo->mem.mem_type == mem_type)
+ ret = drm_bo_evict(bo, mem_type, 0);
+
+ if (ret) {
+ if (allow_errors) {
+ goto out;
+ } else {
+ ret = 0;
+ DRM_ERROR("Cleanup eviction failed\n");
+ }
+ }
+
+ out:
+ mutex_unlock(&bo->mutex);
+ return ret;
+}
+
+
+static drm_buffer_object_t *drm_bo_entry(struct list_head *list,
+ int pinned_list)
+{
+ if (pinned_list)
+ return list_entry(list, drm_buffer_object_t, pinned_lru);
+ else
+ return list_entry(list, drm_buffer_object_t, lru);
+}
+
/*
- * dev->struct_sem locked.
+ * dev->struct_mutex locked.
*/
static int drm_bo_force_list_clean(drm_device_t * dev,
struct list_head *head,
unsigned mem_type,
- int force_no_move, int allow_errors)
+ int free_pinned,
+ int allow_errors,
+ int pinned_list)
{
- drm_buffer_manager_t *bm = &dev->bm;
struct list_head *list, *next, *prev;
- drm_buffer_object_t *entry;
+ drm_buffer_object_t *entry, *nentry;
int ret;
- int clean;
+ int do_restart;
- retry:
- clean = 1;
+ /*
+ * The list traversal is a bit odd here, because an item may
+ * disappear from the list when we release the struct_mutex or
+ * when we decrease the usage count. Also we're not guaranteed
+ * to drain pinned lists, so we can't always restart.
+ */
+
+restart:
+ nentry = NULL;
list_for_each_safe(list, next, head) {
prev = list->prev;
- entry = list_entry(list, drm_buffer_object_t, lru);
+
+ entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
atomic_inc(&entry->usage);
- mutex_unlock(&dev->struct_mutex);
- mutex_lock(&entry->mutex);
- mutex_lock(&dev->struct_mutex);
+ if (nentry) {
+ atomic_dec(&nentry->usage);
+ nentry = NULL;
+ }
- if (prev != list->prev || next != list->next) {
- mutex_unlock(&entry->mutex);
- drm_bo_usage_deref_locked(entry);
- goto retry;
+ /*
+ * Protect the next item from destruction, so we can check
+ * its list pointers later on.
+ */
+
+ if (next != head) {
+ nentry = drm_bo_entry(next, pinned_list);
+ atomic_inc(&nentry->usage);
}
- if (entry->mm_node) {
- clean = 0;
+ mutex_unlock(&dev->struct_mutex);
- /*
- * Expire the fence.
- */
+ ret = drm_bo_leave_list(entry, mem_type, free_pinned,
+ allow_errors);
+ mutex_lock(&dev->struct_mutex);
- mutex_unlock(&dev->struct_mutex);
- if (entry->fence && bm->nice_mode) {
- unsigned long _end = jiffies + 3 * DRM_HZ;
- do {
- ret = drm_bo_wait(entry, 0, 1, 0);
- if (ret && allow_errors) {
- if (ret == -EINTR)
- ret = -EAGAIN;
- goto out_err;
- }
- } while (ret && !time_after_eq(jiffies, _end));
-
- if (entry->fence) {
- bm->nice_mode = 0;
- DRM_ERROR("Detected GPU hang or "
- "fence manager was taken down. "
- "Evicting waiting buffers\n");
- }
- }
- if (entry->fence) {
- drm_fence_usage_deref_unlocked(dev,
- entry->fence);
- entry->fence = NULL;
- }
+ drm_bo_usage_deref_locked(entry);
+ if (ret)
+ return ret;
- DRM_MASK_VAL(entry->priv_flags, _DRM_BO_FLAG_UNFENCED,
- 0);
+ /*
+ * Has the next item disappeared from the list?
+ */
- if (force_no_move) {
- DRM_MASK_VAL(entry->flags, DRM_BO_FLAG_NO_MOVE,
- 0);
- }
- if (entry->flags & DRM_BO_FLAG_NO_EVICT) {
- DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
- "cleanup. Removing flag and evicting.\n");
- entry->flags &= ~DRM_BO_FLAG_NO_EVICT;
- entry->mask &= ~DRM_BO_FLAG_NO_EVICT;
- }
+ do_restart = ((next->prev != list) && (next->prev != prev));
- ret = drm_bo_evict(entry, mem_type, 1, force_no_move);
- if (ret) {
- if (allow_errors) {
- goto out_err;
- } else {
- DRM_ERROR("Aargh. Eviction failed.\n");
- }
- }
- mutex_lock(&dev->struct_mutex);
- }
- mutex_unlock(&entry->mutex);
- drm_bo_usage_deref_locked(entry);
- if (prev != list->prev || next != list->next) {
- goto retry;
+ if (nentry != NULL && do_restart) {
+ drm_bo_usage_deref_locked(nentry);
+ nentry = NULL;
}
+
+ if (do_restart)
+ goto restart;
}
- if (!clean)
- goto retry;
return 0;
- out_err:
- mutex_unlock(&entry->mutex);
- drm_bo_usage_deref_unlocked(entry);
- mutex_lock(&dev->struct_mutex);
- return ret;
}
int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type)
{
drm_buffer_manager_t *bm = &dev->bm;
+ drm_mem_type_manager_t *man = &bm->man[mem_type];
int ret = -EINVAL;
if (mem_type >= DRM_BO_MEM_TYPES) {
@@ -1742,36 +1950,23 @@ int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type)
return ret;
}
- if (!bm->has_type[mem_type]) {
+ if (!man->has_type) {
DRM_ERROR("Trying to take down uninitialized "
"memory manager type\n");
return ret;
}
- bm->use_type[mem_type] = 0;
- bm->has_type[mem_type] = 0;
+ man->use_type = 0;
+ man->has_type = 0;
ret = 0;
if (mem_type > 0) {
- /*
- * Throw out unfenced buffers.
- */
-
- drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 1, 0);
+ drm_bo_clean_unfenced(dev);
+ drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
+ drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
- /*
- * Throw out evicted no-move buffers.
- */
-
- drm_bo_force_list_clean(dev, &bm->pinned[DRM_BO_MEM_LOCAL],
- mem_type, 1, 0);
- drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 1,
- 0);
- drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 1,
- 0);
-
- if (drm_mm_clean(&bm->manager[mem_type])) {
- drm_mm_takedown(&bm->manager[mem_type]);
+ if (drm_mm_clean(&man->manager)) {
+ drm_mm_takedown(&man->manager);
} else {
ret = -EBUSY;
}
@@ -1780,62 +1975,75 @@ int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type)
return ret;
}
+/**
+ *Evict all buffers of a particular mem_type, but leave memory manager
+ *regions for NO_MOVE buffers intact. New buffers cannot be added at this
+ *point since we have the hardware lock.
+ */
+
static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type)
{
int ret;
drm_buffer_manager_t *bm = &dev->bm;
+ drm_mem_type_manager_t *man = &bm->man[mem_type];
if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
DRM_ERROR("Illegal memory manager memory type %u,\n", mem_type);
return -EINVAL;
}
- ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1);
+ drm_bo_clean_unfenced(dev);
+ ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
if (ret)
return ret;
- ret = drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 0, 1);
- if (ret)
- return ret;
- ret =
- drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 0, 1);
+ ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
+
return ret;
}
-static int drm_bo_init_mm(drm_device_t * dev,
- unsigned type,
- unsigned long p_offset, unsigned long p_size)
+int drm_bo_init_mm(drm_device_t * dev,
+ unsigned type,
+ unsigned long p_offset, unsigned long p_size)
{
drm_buffer_manager_t *bm = &dev->bm;
int ret = -EINVAL;
+ drm_mem_type_manager_t *man;
if (type >= DRM_BO_MEM_TYPES) {
DRM_ERROR("Illegal memory type %d\n", type);
return ret;
}
- if (bm->has_type[type]) {
+
+ man = &bm->man[type];
+ if (man->has_type) {
DRM_ERROR("Memory manager already initialized for type %d\n",
type);
return ret;
}
+ ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
+ if (ret)
+ return ret;
+
ret = 0;
if (type != DRM_BO_MEM_LOCAL) {
if (!p_size) {
DRM_ERROR("Zero size memory manager type %d\n", type);
return ret;
}
- ret = drm_mm_init(&bm->manager[type], p_offset, p_size);
+ ret = drm_mm_init(&man->manager, p_offset, p_size);
if (ret)
return ret;
}
- bm->has_type[type] = 1;
- bm->use_type[type] = 1;
+ man->has_type = 1;
+ man->use_type = 1;
- INIT_LIST_HEAD(&bm->lru[type]);
- INIT_LIST_HEAD(&bm->pinned[type]);
+ INIT_LIST_HEAD(&man->lru);
+ INIT_LIST_HEAD(&man->pinned);
return 0;
}
+EXPORT_SYMBOL(drm_bo_init_mm);
/*
* This is called from lastclose, so we don't need to bother about
@@ -1847,6 +2055,7 @@ int drm_bo_driver_finish(drm_device_t * dev)
drm_buffer_manager_t *bm = &dev->bm;
int ret = 0;
unsigned i = DRM_BO_MEM_TYPES;
+ drm_mem_type_manager_t *man;
mutex_lock(&dev->bm.init_mutex);
mutex_lock(&dev->struct_mutex);
@@ -1856,17 +2065,19 @@ int drm_bo_driver_finish(drm_device_t * dev)
bm->initialized = 0;
while (i--) {
- if (bm->has_type[i]) {
- bm->use_type[i] = 0;
+ man = &bm->man[i];
+ if (man->has_type) {
+ man->use_type = 0;
if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
ret = -EBUSY;
DRM_ERROR("DRM memory manager type %d "
"is not clean.\n", i);
}
- bm->has_type[i] = 0;
+ man->has_type = 0;
}
}
mutex_unlock(&dev->struct_mutex);
+
if (!cancel_delayed_work(&bm->wq)) {
flush_scheduled_work();
}
@@ -1875,10 +2086,10 @@ int drm_bo_driver_finish(drm_device_t * dev)
if (list_empty(&bm->ddestroy)) {
DRM_DEBUG("Delayed destroy list was clean\n");
}
- if (list_empty(&bm->lru[0])) {
+ if (list_empty(&bm->man[0].lru)) {
DRM_DEBUG("Swap list was clean\n");
}
- if (list_empty(&bm->pinned[0])) {
+ if (list_empty(&bm->man[0].pinned)) {
DRM_DEBUG("NO_MOVE list was clean\n");
}
if (list_empty(&bm->unfenced)) {
@@ -2006,3 +2217,144 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
return 0;
}
+
+/*
+ * buffer object vm functions.
+ */
+
+int drm_mem_reg_is_pci(drm_device_t * dev, drm_bo_mem_reg_t * mem)
+{
+ drm_buffer_manager_t *bm = &dev->bm;
+ drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
+
+ if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
+ if (mem->mem_type == DRM_BO_MEM_LOCAL)
+ return 0;
+
+ if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
+ return 0;
+
+ if (mem->flags & DRM_BO_FLAG_CACHED)
+ return 0;
+ }
+ return 1;
+}
+
+EXPORT_SYMBOL(drm_mem_reg_is_pci);
+
+/**
+ * \c Get the PCI offset for the buffer object memory.
+ *
+ * \param bo The buffer object.
+ * \param bus_base On return the base of the PCI region
+ * \param bus_offset On return the byte offset into the PCI region
+ * \param bus_size On return the byte size of the buffer object or zero if
+ * the buffer object memory is not accessible through a PCI region.
+ * \return Failure indication.
+ *
+ * Returns -EINVAL if the buffer object is currently not mappable.
+ * Otherwise returns zero.
+ */
+
+int drm_bo_pci_offset(drm_device_t * dev,
+ drm_bo_mem_reg_t * mem,
+ unsigned long *bus_base,
+ unsigned long *bus_offset, unsigned long *bus_size)
+{
+ drm_buffer_manager_t *bm = &dev->bm;
+ drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
+
+ *bus_size = 0;
+ if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
+ return -EINVAL;
+
+ if (drm_mem_reg_is_pci(dev, mem)) {
+ *bus_offset = mem->mm_node->start << PAGE_SHIFT;
+ *bus_size = mem->num_pages << PAGE_SHIFT;
+ *bus_base = man->io_offset;
+ }
+
+ return 0;
+}
+
+/**
+ * \c Kill all user-space virtual mappings of this buffer object.
+ *
+ * \param bo The buffer object.
+ *
+ * Call bo->mutex locked.
+ */
+
+void drm_bo_unmap_virtual(drm_buffer_object_t * bo)
+{
+ drm_device_t *dev = bo->dev;
+ loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
+ loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
+
+ unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
+}
+
+static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo)
+{
+ drm_map_list_t *list = &bo->map_list;
+ drm_local_map_t *map;
+ drm_device_t *dev = bo->dev;
+
+ if (list->user_token) {
+ drm_ht_remove_item(&dev->map_hash, &list->hash);
+ list->user_token = 0;
+ }
+ if (list->file_offset_node) {
+ drm_mm_put_block(list->file_offset_node);
+ list->file_offset_node = NULL;
+ }
+
+ map = list->map;
+ if (!map)
+ return;
+
+ drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
+ list->map = NULL;
+ list->user_token = 0ULL;
+ drm_bo_usage_deref_locked(bo);
+}
+
+static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo)
+{
+ drm_map_list_t *list = &bo->map_list;
+ drm_local_map_t *map;
+ drm_device_t *dev = bo->dev;
+
+ list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
+ if (!list->map)
+ return -ENOMEM;
+
+ map = list->map;
+ map->offset = 0;
+ map->type = _DRM_TTM;
+ map->flags = _DRM_REMOVABLE;
+ map->size = bo->mem.num_pages * PAGE_SIZE;
+ atomic_inc(&bo->usage);
+ map->handle = (void *)bo;
+
+ list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
+ bo->mem.num_pages, 0, 0);
+
+ if (!list->file_offset_node) {
+ drm_bo_takedown_vm_locked(bo);
+ return -ENOMEM;
+ }
+
+ list->file_offset_node = drm_mm_get_block(list->file_offset_node,
+ bo->mem.num_pages, 0);
+
+ list->hash.key = list->file_offset_node->start;
+ if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
+ drm_bo_takedown_vm_locked(bo);
+ return -ENOMEM;
+ }
+
+ list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT;
+
+ return 0;
+}
diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c
new file mode 100644
index 00000000..4f752065
--- /dev/null
+++ b/linux-core/drm_bo_move.c
@@ -0,0 +1,411 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+
+/**
+ * Free the old memory node unless it's a pinned region and we
+ * have not been requested to free also pinned regions.
+ */
+
+static void drm_bo_free_old_node(drm_buffer_object_t * bo)
+{
+ drm_bo_mem_reg_t *old_mem = &bo->mem;
+
+ if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) {
+ mutex_lock(&bo->dev->struct_mutex);
+ drm_mm_put_block(old_mem->mm_node);
+ old_mem->mm_node = NULL;
+ mutex_unlock(&bo->dev->struct_mutex);
+ }
+ old_mem->mm_node = NULL;
+}
+
+int drm_bo_move_ttm(drm_buffer_object_t * bo,
+ int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
+{
+ drm_ttm_t *ttm = bo->ttm;
+ drm_bo_mem_reg_t *old_mem = &bo->mem;
+ uint32_t save_flags = old_mem->flags;
+ uint32_t save_mask = old_mem->mask;
+ int ret;
+
+ if (old_mem->mem_type == DRM_BO_MEM_TT) {
+ if (evict)
+ drm_ttm_evict(ttm);
+ else
+ drm_ttm_unbind(ttm);
+
+ drm_bo_free_old_node(bo);
+ DRM_FLAG_MASKED(old_mem->flags,
+ DRM_BO_FLAG_CACHED | DRM_BO_FLAG_MAPPABLE |
+ DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEMTYPE);
+ old_mem->mem_type = DRM_BO_MEM_LOCAL;
+ save_flags = old_mem->flags;
+ }
+ if (new_mem->mem_type != DRM_BO_MEM_LOCAL) {
+ ret = drm_bind_ttm(ttm,
+ new_mem->flags & DRM_BO_FLAG_CACHED,
+ new_mem->mm_node->start);
+ if (ret)
+ return ret;
+ }
+
+ *old_mem = *new_mem;
+ new_mem->mm_node = NULL;
+ old_mem->mask = save_mask;
+ DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
+ return 0;
+}
+
+EXPORT_SYMBOL(drm_bo_move_ttm);
+
+/**
+ * \c Return a kernel virtual address to the buffer object PCI memory.
+ *
+ * \param bo The buffer object.
+ * \return Failure indication.
+ *
+ * Returns -EINVAL if the buffer object is currently not mappable.
+ * Returns -ENOMEM if the ioremap operation failed.
+ * Otherwise returns zero.
+ *
+ * After a successfull call, bo->iomap contains the virtual address, or NULL
+ * if the buffer object content is not accessible through PCI space.
+ * Call bo->mutex locked.
+ */
+
+int drm_mem_reg_ioremap(drm_device_t * dev, drm_bo_mem_reg_t * mem,
+ void **virtual)
+{
+ drm_buffer_manager_t *bm = &dev->bm;
+ drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
+ unsigned long bus_offset;
+ unsigned long bus_size;
+ unsigned long bus_base;
+ int ret;
+ void *addr;
+
+ *virtual = NULL;
+ ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, &bus_size);
+ if (ret || bus_size == 0)
+ return ret;
+
+ if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP))
+ addr = (void *)(((u8 *) man->io_addr) + bus_offset);
+ else {
+ addr = ioremap_nocache(bus_base + bus_offset, bus_size);
+ if (!addr)
+ return -ENOMEM;
+ }
+ *virtual = addr;
+ return 0;
+}
+
+/**
+ * \c Unmap mapping obtained using drm_bo_ioremap
+ *
+ * \param bo The buffer object.
+ *
+ * Call bo->mutex locked.
+ */
+
+void drm_mem_reg_iounmap(drm_device_t * dev, drm_bo_mem_reg_t * mem,
+ void *virtual)
+{
+ drm_buffer_manager_t *bm;
+ drm_mem_type_manager_t *man;
+
+ bm = &dev->bm;
+ man = &bm->man[mem->mem_type];
+
+ if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) {
+ iounmap(virtual);
+ }
+}
+
+static int drm_copy_io_page(void *dst, void *src, unsigned long page)
+{
+ uint32_t *dstP =
+ (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
+ uint32_t *srcP =
+ (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
+
+ int i;
+ for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
+ iowrite32(ioread32(srcP++), dstP++);
+ return 0;
+}
+
+static int drm_copy_io_ttm_page(drm_ttm_t * ttm, void *src, unsigned long page)
+{
+ struct page *d = drm_ttm_get_page(ttm, page);
+ void *dst;
+
+ if (!d)
+ return -ENOMEM;
+
+ src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
+ dst = kmap(d);
+ if (!dst)
+ return -ENOMEM;
+
+ memcpy_fromio(dst, src, PAGE_SIZE);
+ kunmap(d);
+ return 0;
+}
+
+static int drm_copy_ttm_io_page(drm_ttm_t * ttm, void *dst, unsigned long page)
+{
+ struct page *s = drm_ttm_get_page(ttm, page);
+ void *src;
+
+ if (!s)
+ return -ENOMEM;
+
+ dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
+ src = kmap(s);
+ if (!src)
+ return -ENOMEM;
+
+ memcpy_toio(dst, src, PAGE_SIZE);
+ kunmap(s);
+ return 0;
+}
+
+int drm_bo_move_memcpy(drm_buffer_object_t * bo,
+ int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
+{
+ drm_device_t *dev = bo->dev;
+ drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type];
+ drm_ttm_t *ttm = bo->ttm;
+ drm_bo_mem_reg_t *old_mem = &bo->mem;
+ drm_bo_mem_reg_t old_copy = *old_mem;
+ void *old_iomap;
+ void *new_iomap;
+ int ret;
+ uint32_t save_flags = old_mem->flags;
+ uint32_t save_mask = old_mem->mask;
+ unsigned long i;
+ unsigned long page;
+ unsigned long add = 0;
+ int dir;
+
+ ret = drm_mem_reg_ioremap(dev, old_mem, &old_iomap);
+ if (ret)
+ return ret;
+ ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap);
+ if (ret)
+ goto out;
+
+ if (old_iomap == NULL && new_iomap == NULL)
+ goto out2;
+ if (old_iomap == NULL && ttm == NULL)
+ goto out2;
+
+ add = 0;
+ dir = 1;
+
+ if ((old_mem->mem_type == new_mem->mem_type) &&
+ (new_mem->mm_node->start <
+ old_mem->mm_node->start + old_mem->mm_node->size)) {
+ dir = -1;
+ add = new_mem->num_pages - 1;
+ }
+
+ for (i = 0; i < new_mem->num_pages; ++i) {
+ page = i * dir + add;
+ if (old_iomap == NULL)
+ ret = drm_copy_ttm_io_page(ttm, new_iomap, page);
+ else if (new_iomap == NULL)
+ ret = drm_copy_io_ttm_page(ttm, old_iomap, page);
+ else
+ ret = drm_copy_io_page(new_iomap, old_iomap, page);
+ if (ret)
+ goto out1;
+ }
+ mb();
+ out2:
+ drm_bo_free_old_node(bo);
+
+ *old_mem = *new_mem;
+ new_mem->mm_node = NULL;
+ old_mem->mask = save_mask;
+ DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
+
+ if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) {
+ drm_ttm_unbind(ttm);
+ drm_destroy_ttm(ttm);
+ bo->ttm = NULL;
+ }
+
+ out1:
+ drm_mem_reg_iounmap(dev, new_mem, new_iomap);
+ out:
+ drm_mem_reg_iounmap(dev, &old_copy, old_iomap);
+ return ret;
+}
+
+EXPORT_SYMBOL(drm_bo_move_memcpy);
+
+/*
+ * Transfer a buffer object's memory and LRU status to a newly
+ * created object. User-space references remains with the old
+ * object. Call bo->mutex locked.
+ */
+
+int drm_buffer_object_transfer(drm_buffer_object_t * bo,
+ drm_buffer_object_t ** new_obj)
+{
+ drm_buffer_object_t *fbo;
+ drm_device_t *dev = bo->dev;
+ drm_buffer_manager_t *bm = &dev->bm;
+
+ fbo = drm_ctl_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ);
+ if (!fbo)
+ return -ENOMEM;
+
+ *fbo = *bo;
+ mutex_init(&fbo->mutex);
+ mutex_lock(&fbo->mutex);
+ mutex_lock(&dev->struct_mutex);
+
+ DRM_INIT_WAITQUEUE(&bo->event_queue);
+ INIT_LIST_HEAD(&fbo->ddestroy);
+ INIT_LIST_HEAD(&fbo->lru);
+ INIT_LIST_HEAD(&fbo->pinned_lru);
+#ifdef DRM_ODD_MM_COMPAT
+ INIT_LIST_HEAD(&fbo->vma_list);
+ INIT_LIST_HEAD(&fbo->p_mm_list);
+#endif
+
+ atomic_inc(&bo->fence->usage);
+ fbo->pinned_node = NULL;
+ fbo->mem.mm_node->private = (void *)fbo;
+ atomic_set(&fbo->usage, 1);
+ atomic_inc(&bm->count);
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&fbo->mutex);
+
+ *new_obj = fbo;
+ return 0;
+}
+
+/*
+ * Since move is underway, we need to block signals in this function.
+ * We cannot restart until it has finished.
+ */
+
+int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo,
+ int evict,
+ int no_wait,
+ uint32_t fence_class,
+ uint32_t fence_type,
+ uint32_t fence_flags, drm_bo_mem_reg_t * new_mem)
+{
+ drm_device_t *dev = bo->dev;
+ drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type];
+ drm_bo_mem_reg_t *old_mem = &bo->mem;
+ int ret;
+ uint32_t save_flags = old_mem->flags;
+ uint32_t save_mask = old_mem->mask;
+ drm_buffer_object_t *old_obj;
+
+ if (bo->fence)
+ drm_fence_usage_deref_unlocked(dev, bo->fence);
+ ret = drm_fence_object_create(dev, fence_class, fence_type,
+ fence_flags | DRM_FENCE_FLAG_EMIT,
+ &bo->fence);
+ if (ret)
+ return ret;
+
+#ifdef DRM_ODD_MM_COMPAT
+ /*
+ * In this mode, we don't allow pipelining a copy blit,
+ * since the buffer will be accessible from user space
+ * the moment we return and rebuild the page tables.
+ *
+ * With normal vm operation, page tables are rebuilt
+ * on demand using fault(), which waits for buffer idle.
+ */
+ if (1)
+#else
+ if (evict || ((bo->mem.mm_node == bo->pinned_node) &&
+ bo->mem.mm_node != NULL))
+#endif
+ {
+ ret = drm_bo_wait(bo, 0, 1, 0);
+ if (ret)
+ return ret;
+
+ drm_bo_free_old_node(bo);
+
+ if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) {
+ drm_ttm_unbind(bo->ttm);
+ drm_destroy_ttm(bo->ttm);
+ bo->ttm = NULL;
+ }
+ } else {
+
+ /* This should help pipeline ordinary buffer moves.
+ *
+ * Hang old buffer memory on a new buffer object,
+ * and leave it to be released when the GPU
+ * operation has completed.
+ */
+
+ ret = drm_buffer_object_transfer(bo, &old_obj);
+
+ if (ret)
+ return ret;
+
+ if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED))
+ old_obj->ttm = NULL;
+ else
+ bo->ttm = NULL;
+
+ mutex_lock(&dev->struct_mutex);
+ list_del_init(&old_obj->lru);
+ DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
+ drm_bo_add_to_lru(old_obj);
+
+ drm_bo_usage_deref_locked(old_obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ }
+
+ *old_mem = *new_mem;
+ new_mem->mm_node = NULL;
+ old_mem->mask = save_mask;
+ DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
+ return 0;
+}
+
+EXPORT_SYMBOL(drm_bo_move_accel_cleanup);
diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c
index 6bb58424..4825f0c0 100644
--- a/linux-core/drm_compat.c
+++ b/linux-core/drm_compat.c
@@ -79,54 +79,14 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags)
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
/*
- * vm code for kernels below 2,6,15 in which version a major vm write
+ * vm code for kernels below 2.6.15 in which version a major vm write
* occured. This implement a simple straightforward
* version similar to what's going to be
- * in kernel 2.6.20+?
+ * in kernel 2.6.19+
+ * Kernels below 2.6.15 use nopage whereas 2.6.19 and upwards use
+ * nopfn.
*/
-static int drm_pte_is_clear(struct vm_area_struct *vma,
- unsigned long addr)
-{
- struct mm_struct *mm = vma->vm_mm;
- int ret = 1;
- pte_t *pte;
- pmd_t *pmd;
- pud_t *pud;
- pgd_t *pgd;
-
-
- spin_lock(&mm->page_table_lock);
- pgd = pgd_offset(mm, addr);
- if (pgd_none(*pgd))
- goto unlock;
- pud = pud_offset(pgd, addr);
- if (pud_none(*pud))
- goto unlock;
- pmd = pmd_offset(pud, addr);
- if (pmd_none(*pmd))
- goto unlock;
- pte = pte_offset_map(pmd, addr);
- if (!pte)
- goto unlock;
- ret = pte_none(*pte);
- pte_unmap(pte);
- unlock:
- spin_unlock(&mm->page_table_lock);
- return ret;
-}
-
-int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
- unsigned long pfn, pgprot_t pgprot)
-{
- int ret;
- if (!drm_pte_is_clear(vma, addr))
- return -EBUSY;
-
- ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, pgprot);
- return ret;
-}
-
static struct {
spinlock_t lock;
struct page *dummy_page;
@@ -160,7 +120,7 @@ void free_nopage_retry(void)
}
}
-struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
+struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
unsigned long address,
int *type)
{
@@ -171,7 +131,7 @@ struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
data.address = address;
data.vma = vma;
- drm_vm_ttm_fault(vma, &data);
+ drm_bo_vm_fault(vma, &data);
switch (data.type) {
case VM_FAULT_OOM:
return NOPAGE_OOM;
@@ -186,10 +146,85 @@ struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
#endif
+#if !defined(DRM_FULL_MM_COMPAT) && \
+ ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) || \
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)))
+
+static int drm_pte_is_clear(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ int ret = 1;
+ pte_t *pte;
+ pmd_t *pmd;
+ pud_t *pud;
+ pgd_t *pgd;
+
+ spin_lock(&mm->page_table_lock);
+ pgd = pgd_offset(mm, addr);
+ if (pgd_none(*pgd))
+ goto unlock;
+ pud = pud_offset(pgd, addr);
+ if (pud_none(*pud))
+ goto unlock;
+ pmd = pmd_offset(pud, addr);
+ if (pmd_none(*pmd))
+ goto unlock;
+ pte = pte_offset_map(pmd, addr);
+ if (!pte)
+ goto unlock;
+ ret = pte_none(*pte);
+ pte_unmap(pte);
+ unlock:
+ spin_unlock(&mm->page_table_lock);
+ return ret;
+}
+
+int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn)
+{
+ int ret;
+ if (!drm_pte_is_clear(vma, addr))
+ return -EBUSY;
+
+ ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, vma->vm_page_prot);
+ return ret;
+}
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19) && !defined(DRM_FULL_MM_COMPAT))
+
+/**
+ * While waiting for the fault() handler to appear in
+ * we accomplish approximately
+ * the same wrapping it with nopfn.
+ */
+
+unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma,
+ unsigned long address)
+{
+ struct fault_data data;
+ data.address = address;
+
+ (void) drm_bo_vm_fault(vma, &data);
+ if (data.type == VM_FAULT_OOM)
+ return NOPFN_OOM;
+ else if (data.type == VM_FAULT_SIGBUS)
+ return NOPFN_SIGBUS;
+
+ /*
+ * pfn already set.
+ */
+
+ return 0;
+}
+#endif
+
+
#ifdef DRM_ODD_MM_COMPAT
/*
- * VM compatibility code for 2.6.15-2.6.19(?). This code implements a complicated
+ * VM compatibility code for 2.6.15-2.6.18. This code implements a complicated
* workaround for a single BUG statement in do_no_page in these versions. The
* tricky thing is that we need to take the mmap_sem in exclusive mode for _all_
* vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to
@@ -212,109 +247,100 @@ typedef struct vma_entry {
} vma_entry_t;
-struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
+struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
unsigned long address,
int *type)
{
- drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
+ drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
unsigned long page_offset;
struct page *page;
drm_ttm_t *ttm;
- drm_buffer_manager_t *bm;
drm_device_t *dev;
- /*
- * FIXME: Check can't map aperture flag.
- */
+ mutex_lock(&bo->mutex);
if (type)
*type = VM_FAULT_MINOR;
- if (!map)
- return NOPAGE_OOM;
-
- if (address > vma->vm_end)
- return NOPAGE_SIGBUS;
+ if (address > vma->vm_end) {
+ page = NOPAGE_SIGBUS;
+ goto out_unlock;
+ }
+
+ dev = bo->dev;
- ttm = (drm_ttm_t *) map->offset;
- dev = ttm->dev;
- mutex_lock(&dev->struct_mutex);
- drm_fixup_ttm_caching(ttm);
- BUG_ON(ttm->page_flags & DRM_TTM_PAGE_UNCACHED);
+ if (drm_mem_reg_is_pci(dev, &bo->mem)) {
+ DRM_ERROR("Invalid compat nopage.\n");
+ page = NOPAGE_SIGBUS;
+ goto out_unlock;
+ }
- bm = &dev->bm;
+ ttm = bo->ttm;
+ drm_ttm_fixup_caching(ttm);
page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
- page = ttm->pages[page_offset];
-
+ page = drm_ttm_get_page(ttm, page_offset);
if (!page) {
- if (drm_alloc_memctl(PAGE_SIZE)) {
- page = NOPAGE_OOM;
- goto out;
- }
- page = ttm->pages[page_offset] =
- alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
- if (!page) {
- drm_free_memctl(PAGE_SIZE);
- page = NOPAGE_OOM;
- goto out;
- }
- ++bm->cur_pages;
- SetPageLocked(page);
+ page = NOPAGE_OOM;
+ goto out_unlock;
}
get_page(page);
- out:
- mutex_unlock(&dev->struct_mutex);
+out_unlock:
+ mutex_unlock(&bo->mutex);
return page;
}
-int drm_ttm_map_bound(struct vm_area_struct *vma)
+int drm_bo_map_bound(struct vm_area_struct *vma)
{
- drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data;
- drm_ttm_t *ttm = (drm_ttm_t *) map->offset;
+ drm_buffer_object_t *bo = (drm_buffer_object_t *)vma->vm_private_data;
int ret = 0;
-
- if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) {
- unsigned long pfn = ttm->aper_offset +
- (ttm->be->aperture_base >> PAGE_SHIFT);
- pgprot_t pgprot = drm_io_prot(ttm->be->drm_map_type, vma);
-
+ unsigned long bus_base;
+ unsigned long bus_offset;
+ unsigned long bus_size;
+
+ ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
+ &bus_offset, &bus_size);
+ BUG_ON(ret);
+
+ if (bus_size) {
+ drm_mem_type_manager_t *man = &bo->dev->bm.man[bo->mem.mem_type];
+ unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT;
+ pgprot_t pgprot = drm_io_prot(man->drm_bus_maptype, vma);
ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
vma->vm_end - vma->vm_start,
pgprot);
}
+
return ret;
}
-int drm_ttm_add_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
+int drm_bo_add_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma)
{
p_mm_entry_t *entry, *n_entry;
vma_entry_t *v_entry;
- drm_local_map_t *map = (drm_local_map_t *)
- vma->vm_private_data;
struct mm_struct *mm = vma->vm_mm;
- v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_TTM);
+ v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_BUFOBJ);
if (!v_entry) {
DRM_ERROR("Allocation of vma pointer entry failed\n");
return -ENOMEM;
}
v_entry->vma = vma;
- map->handle = (void *) v_entry;
- list_add_tail(&v_entry->head, &ttm->vma_list);
- list_for_each_entry(entry, &ttm->p_mm_list, head) {
+ list_add_tail(&v_entry->head, &bo->vma_list);
+
+ list_for_each_entry(entry, &bo->p_mm_list, head) {
if (mm == entry->mm) {
atomic_inc(&entry->refcount);
return 0;
} else if ((unsigned long)mm < (unsigned long)entry->mm) ;
}
- n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_TTM);
+ n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_BUFOBJ);
if (!n_entry) {
DRM_ERROR("Allocation of process mm pointer entry failed\n");
return -ENOMEM;
@@ -328,29 +354,29 @@ int drm_ttm_add_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
return 0;
}
-void drm_ttm_delete_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
+void drm_bo_delete_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma)
{
p_mm_entry_t *entry, *n;
vma_entry_t *v_entry, *v_n;
int found = 0;
struct mm_struct *mm = vma->vm_mm;
- list_for_each_entry_safe(v_entry, v_n, &ttm->vma_list, head) {
+ list_for_each_entry_safe(v_entry, v_n, &bo->vma_list, head) {
if (v_entry->vma == vma) {
found = 1;
list_del(&v_entry->head);
- drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_TTM);
+ drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_BUFOBJ);
break;
}
}
BUG_ON(!found);
- list_for_each_entry_safe(entry, n, &ttm->p_mm_list, head) {
+ list_for_each_entry_safe(entry, n, &bo->p_mm_list, head) {
if (mm == entry->mm) {
if (atomic_add_negative(-1, &entry->refcount)) {
list_del(&entry->head);
BUG_ON(entry->locked);
- drm_ctl_free(entry, sizeof(*entry), DRM_MEM_TTM);
+ drm_ctl_free(entry, sizeof(*entry), DRM_MEM_BUFOBJ);
}
return;
}
@@ -360,12 +386,12 @@ void drm_ttm_delete_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
-int drm_ttm_lock_mm(drm_ttm_t * ttm)
+int drm_bo_lock_kmm(drm_buffer_object_t * bo)
{
p_mm_entry_t *entry;
int lock_ok = 1;
- list_for_each_entry(entry, &ttm->p_mm_list, head) {
+ list_for_each_entry(entry, &bo->p_mm_list, head) {
BUG_ON(entry->locked);
if (!down_write_trylock(&entry->mm->mmap_sem)) {
lock_ok = 0;
@@ -377,7 +403,7 @@ int drm_ttm_lock_mm(drm_ttm_t * ttm)
if (lock_ok)
return 0;
- list_for_each_entry(entry, &ttm->p_mm_list, head) {
+ list_for_each_entry(entry, &bo->p_mm_list, head) {
if (!entry->locked)
break;
up_write(&entry->mm->mmap_sem);
@@ -392,43 +418,40 @@ int drm_ttm_lock_mm(drm_ttm_t * ttm)
return -EAGAIN;
}
-void drm_ttm_unlock_mm(drm_ttm_t * ttm)
+void drm_bo_unlock_kmm(drm_buffer_object_t * bo)
{
p_mm_entry_t *entry;
- list_for_each_entry(entry, &ttm->p_mm_list, head) {
+ list_for_each_entry(entry, &bo->p_mm_list, head) {
BUG_ON(!entry->locked);
up_write(&entry->mm->mmap_sem);
entry->locked = 0;
}
}
-int drm_ttm_remap_bound(drm_ttm_t *ttm)
+int drm_bo_remap_bound(drm_buffer_object_t *bo)
{
vma_entry_t *v_entry;
int ret = 0;
-
- list_for_each_entry(v_entry, &ttm->vma_list, head) {
- ret = drm_ttm_map_bound(v_entry->vma);
- if (ret)
- break;
+
+ if (drm_mem_reg_is_pci(bo->dev, &bo->mem)) {
+ list_for_each_entry(v_entry, &bo->vma_list, head) {
+ ret = drm_bo_map_bound(v_entry->vma);
+ if (ret)
+ break;
+ }
}
- drm_ttm_unlock_mm(ttm);
return ret;
}
-void drm_ttm_finish_unmap(drm_ttm_t *ttm)
+void drm_bo_finish_unmap(drm_buffer_object_t *bo)
{
vma_entry_t *v_entry;
-
- if (!(ttm->page_flags & DRM_TTM_PAGE_UNCACHED))
- return;
- list_for_each_entry(v_entry, &ttm->vma_list, head) {
+ list_for_each_entry(v_entry, &bo->vma_list, head) {
v_entry->vma->vm_flags &= ~VM_PFNMAP;
}
- drm_ttm_unlock_mm(ttm);
}
#endif
diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h
index 3cb5d202..7741714a 100644
--- a/linux-core/drm_compat.h
+++ b/linux-core/drm_compat.h
@@ -97,8 +97,6 @@
#define __GFP_COMP 0
#endif
-#define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT)
-
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
static inline int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t pgprot)
{
@@ -154,15 +152,25 @@ static __inline__ void *kcalloc(size_t nmemb, size_t size, int flags)
(tmp);})
#endif
+#ifndef list_for_each_entry_safe_reverse
+#define list_for_each_entry_safe_reverse(pos, n, head, member) \
+ for (pos = list_entry((head)->prev, typeof(*pos), member), \
+ n = list_entry(pos->member.prev, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = n, n = list_entry(n->member.prev, typeof(*n), member))
+#endif
#include <linux/mm.h>
#include <asm/page.h>
-#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && \
- (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)))
+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) && \
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)))
#define DRM_ODD_MM_COMPAT
#endif
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21))
+#define DRM_FULL_MM_COMPAT
+#endif
/*
@@ -200,18 +208,23 @@ extern int drm_map_page_into_agp(struct page *page);
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
extern struct page *get_nopage_retry(void);
extern void free_nopage_retry(void);
-struct fault_data;
-extern struct page *drm_vm_ttm_fault(struct vm_area_struct *vma,
- struct fault_data *data);
#define NOPAGE_REFAULT get_nopage_retry()
#endif
+#if !defined(DRM_FULL_MM_COMPAT) && \
+ ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) || \
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)))
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21))
+struct fault_data;
+extern struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
+ struct fault_data *data);
+
+#endif
+#ifndef DRM_FULL_MM_COMPAT
/*
- * Hopefully, real NOPAGE_RETRY functionality will be in 2.6.19.
+ * Hopefully, real NOPAGE_RETRY functionality will be in 2.6.19.
* For now, just return a dummy page that we've allocated out of
* static space. The page will be put by do_nopage() since we've already
* filled out the pte.
@@ -228,17 +241,21 @@ struct fault_data {
extern int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
- unsigned long pfn, pgprot_t pgprot);
-
-extern struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
- unsigned long address,
- int *type);
+ unsigned long pfn);
-#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
+extern struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
+ unsigned long address,
+ int *type);
+#else
+extern unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
+ unsigned long address);
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) */
+#endif /* ndef DRM_FULL_MM_COMPAT */
#ifdef DRM_ODD_MM_COMPAT
-struct drm_ttm;
+struct drm_buffer_object;
/*
@@ -246,14 +263,14 @@ struct drm_ttm;
* process mm pointer to the ttm mm list. Needs the ttm mutex.
*/
-extern int drm_ttm_add_vma(struct drm_ttm * ttm,
+extern int drm_bo_add_vma(struct drm_buffer_object * bo,
struct vm_area_struct *vma);
/*
* Delete a vma and the corresponding mm pointer from the
* ttm lists. Needs the ttm mutex.
*/
-extern void drm_ttm_delete_vma(struct drm_ttm * ttm,
- struct vm_area_struct *vma);
+extern void drm_bo_delete_vma(struct drm_buffer_object * bo,
+ struct vm_area_struct *vma);
/*
* Attempts to lock all relevant mmap_sems for a ttm, while
@@ -262,12 +279,12 @@ extern void drm_ttm_delete_vma(struct drm_ttm * ttm,
* schedule() and try again.
*/
-extern int drm_ttm_lock_mm(struct drm_ttm * ttm);
+extern int drm_bo_lock_kmm(struct drm_buffer_object * bo);
/*
* Unlock all relevant mmap_sems for a ttm.
*/
-extern void drm_ttm_unlock_mm(struct drm_ttm * ttm);
+extern void drm_bo_unlock_kmm(struct drm_buffer_object * bo);
/*
* If the ttm was bound to the aperture, this function shall be called
@@ -277,7 +294,7 @@ extern void drm_ttm_unlock_mm(struct drm_ttm * ttm);
* releases the mmap_sems for this ttm.
*/
-extern void drm_ttm_finish_unmap(struct drm_ttm *ttm);
+extern void drm_bo_finish_unmap(struct drm_buffer_object *bo);
/*
* Remap all vmas of this ttm using io_remap_pfn_range. We cannot
@@ -286,14 +303,14 @@ extern void drm_ttm_finish_unmap(struct drm_ttm *ttm);
* releases the mmap_sems for this ttm.
*/
-extern int drm_ttm_remap_bound(struct drm_ttm *ttm);
+extern int drm_bo_remap_bound(struct drm_buffer_object *bo);
/*
* Remap a vma for a bound ttm. Call with the ttm mutex held and
* the relevant mmap_sem locked.
*/
-extern int drm_ttm_map_bound(struct vm_area_struct *vma);
+extern int drm_bo_map_bound(struct vm_area_struct *vma);
#endif
#endif
diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c
index 06d48255..6dd04a35 100644
--- a/linux-core/drm_fence.c
+++ b/linux-core/drm_fence.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -11,6 +11,10 @@
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
@@ -19,11 +23,6 @@
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- *
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
@@ -35,21 +34,42 @@
* Typically called by the IRQ handler.
*/
-void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type)
+void drm_fence_handler(drm_device_t * dev, uint32_t class,
+ uint32_t sequence, uint32_t type)
{
int wake = 0;
uint32_t diff;
uint32_t relevant;
drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_class_manager_t *fc = &fm->class[class];
drm_fence_driver_t *driver = dev->driver->fence_driver;
- struct list_head *list, *prev;
- drm_fence_object_t *fence;
+ struct list_head *head;
+ drm_fence_object_t *fence, *next;
int found = 0;
+ int is_exe = (type & DRM_FENCE_TYPE_EXE);
+ int ge_last_exe;
+
+
+
+ diff = (sequence - fc->exe_flush_sequence) & driver->sequence_mask;
+
+ if (fc->pending_exe_flush && is_exe && diff < driver->wrap_diff)
+ fc->pending_exe_flush = 0;
- if (list_empty(&fm->ring))
+ diff = (sequence - fc->last_exe_flush) & driver->sequence_mask;
+ ge_last_exe = diff < driver->wrap_diff;
+
+ if (ge_last_exe)
+ fc->pending_flush &= ~type;
+
+ if (is_exe && ge_last_exe) {
+ fc->last_exe_flush = sequence;
+ }
+
+ if (list_empty(&fc->ring))
return;
- list_for_each_entry(fence, &fm->ring, ring) {
+ list_for_each_entry(fence, &fc->ring, ring) {
diff = (sequence - fence->sequence) & driver->sequence_mask;
if (diff > driver->wrap_diff) {
found = 1;
@@ -57,11 +77,11 @@ void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type)
}
}
- list = (found) ? fence->ring.prev : fm->ring.prev;
- prev = list->prev;
+ head = (found) ? &fence->ring : &fc->ring;
- for (; list != &fm->ring; list = prev, prev = list->prev) {
- fence = list_entry(list, drm_fence_object_t, ring);
+ list_for_each_entry_safe_reverse(fence, next, head, ring) {
+ if (&fence->ring == &fc->ring)
+ break;
type |= fence->native_type;
relevant = type & fence->type;
@@ -78,7 +98,7 @@ void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type)
~(fence->signaled | fence->submitted_flush);
if (relevant) {
- fm->pending_flush |= relevant;
+ fc->pending_flush |= relevant;
fence->submitted_flush = fence->flush_mask;
}
@@ -89,9 +109,9 @@ void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type)
}
}
-
+
if (wake) {
- DRM_WAKEUP(&fm->fence_queue);
+ DRM_WAKEUP(&fc->fence_queue);
}
}
@@ -147,7 +167,7 @@ static void drm_fence_object_destroy(drm_file_t * priv,
drm_fence_usage_deref_locked(dev, fence);
}
-static int fence_signaled(drm_device_t * dev, volatile
+static int fence_signaled(drm_device_t * dev,
drm_fence_object_t * fence,
uint32_t mask, int poke_flush)
{
@@ -157,7 +177,7 @@ static int fence_signaled(drm_device_t * dev, volatile
drm_fence_driver_t *driver = dev->driver->fence_driver;
if (poke_flush)
- driver->poke_flush(dev);
+ driver->poke_flush(dev, fence->class);
read_lock_irqsave(&fm->lock, flags);
signaled =
(fence->type & mask & fence->signaled) == (fence->type & mask);
@@ -166,52 +186,35 @@ static int fence_signaled(drm_device_t * dev, volatile
return signaled;
}
-static void drm_fence_flush_exe(drm_fence_manager_t * fm,
+static void drm_fence_flush_exe(drm_fence_class_manager_t * fc,
drm_fence_driver_t * driver, uint32_t sequence)
{
uint32_t diff;
- if (!fm->pending_exe_flush) {
- volatile struct list_head *list;
-
- /*
- * Last_exe_flush is invalid. Find oldest sequence.
- */
-
-/* list = fm->fence_types[_DRM_FENCE_TYPE_EXE];*/
- list = &fm->ring;
- if (list->next == &fm->ring) {
- return;
- } else {
- drm_fence_object_t *fence =
- list_entry(list->next, drm_fence_object_t, ring);
- fm->last_exe_flush = (fence->sequence - 1) &
- driver->sequence_mask;
- }
- diff = (sequence - fm->last_exe_flush) & driver->sequence_mask;
- if (diff >= driver->wrap_diff)
- return;
- fm->exe_flush_sequence = sequence;
- fm->pending_exe_flush = 1;
+ if (!fc->pending_exe_flush) {
+ fc->exe_flush_sequence = sequence;
+ fc->pending_exe_flush = 1;
} else {
diff =
- (sequence - fm->exe_flush_sequence) & driver->sequence_mask;
+ (sequence - fc->exe_flush_sequence) & driver->sequence_mask;
if (diff < driver->wrap_diff) {
- fm->exe_flush_sequence = sequence;
+ fc->exe_flush_sequence = sequence;
}
}
}
-int drm_fence_object_signaled(volatile drm_fence_object_t * fence,
+int drm_fence_object_signaled(drm_fence_object_t * fence,
uint32_t type)
{
return ((fence->signaled & type) == type);
}
int drm_fence_object_flush(drm_device_t * dev,
- volatile drm_fence_object_t * fence, uint32_t type)
+ drm_fence_object_t * fence,
+ uint32_t type)
{
drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_class_manager_t *fc = &fm->class[fence->class];
drm_fence_driver_t *driver = dev->driver->fence_driver;
unsigned long flags;
@@ -226,16 +229,16 @@ int drm_fence_object_flush(drm_device_t * dev,
if (fence->submitted_flush == fence->signaled) {
if ((fence->type & DRM_FENCE_TYPE_EXE) &&
!(fence->submitted_flush & DRM_FENCE_TYPE_EXE)) {
- drm_fence_flush_exe(fm, driver, fence->sequence);
+ drm_fence_flush_exe(fc, driver, fence->sequence);
fence->submitted_flush |= DRM_FENCE_TYPE_EXE;
} else {
- fm->pending_flush |= (fence->flush_mask &
+ fc->pending_flush |= (fence->flush_mask &
~fence->submitted_flush);
fence->submitted_flush = fence->flush_mask;
}
}
write_unlock_irqrestore(&fm->lock, flags);
- driver->poke_flush(dev);
+ driver->poke_flush(dev, fence->class);
return 0;
}
@@ -244,24 +247,35 @@ int drm_fence_object_flush(drm_device_t * dev,
* wrapped around and reused.
*/
-void drm_fence_flush_old(drm_device_t * dev, uint32_t sequence)
+void drm_fence_flush_old(drm_device_t * dev, uint32_t class, uint32_t sequence)
{
drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_class_manager_t *fc = &fm->class[class];
drm_fence_driver_t *driver = dev->driver->fence_driver;
uint32_t old_sequence;
unsigned long flags;
drm_fence_object_t *fence;
uint32_t diff;
+ write_lock_irqsave(&fm->lock, flags);
+ old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask;
+ diff = (old_sequence - fc->last_exe_flush) & driver->sequence_mask;
+
+ if ((diff < driver->wrap_diff) && !fc->pending_exe_flush) {
+ fc->pending_exe_flush = 1;
+ fc->exe_flush_sequence = sequence - (driver->flush_diff / 2);
+ }
+ write_unlock_irqrestore(&fm->lock, flags);
+
mutex_lock(&dev->struct_mutex);
read_lock_irqsave(&fm->lock, flags);
- if (fm->ring.next == &fm->ring) {
+
+ if (list_empty(&fc->ring)) {
read_unlock_irqrestore(&fm->lock, flags);
mutex_unlock(&dev->struct_mutex);
return;
}
- old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask;
- fence = list_entry(fm->ring.next, drm_fence_object_t, ring);
+ fence = list_entry(fc->ring.next, drm_fence_object_t, ring);
atomic_inc(&fence->usage);
mutex_unlock(&dev->struct_mutex);
diff = (old_sequence - fence->sequence) & driver->sequence_mask;
@@ -274,11 +288,40 @@ void drm_fence_flush_old(drm_device_t * dev, uint32_t sequence)
EXPORT_SYMBOL(drm_fence_flush_old);
+static int drm_fence_lazy_wait(drm_device_t *dev,
+ drm_fence_object_t *fence,
+ int ignore_signals,
+ uint32_t mask)
+{
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_class_manager_t *fc = &fm->class[fence->class];
+
+ unsigned long _end = jiffies + 3*DRM_HZ;
+ int ret = 0;
+
+ do {
+ DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ,
+ fence_signaled(dev, fence, mask, 0));
+ if (time_after_eq(jiffies, _end))
+ break;
+ } while (ret == -EINTR && ignore_signals);
+ if (time_after_eq(jiffies, _end) && (ret != 0))
+ ret = -EBUSY;
+ if (ret) {
+ if (ret == -EBUSY) {
+ DRM_ERROR("Fence timeout. "
+ "GPU lockup or fence driver was "
+ "taken down.\n");
+ }
+ return ((ret == -EINTR) ? -EAGAIN : ret);
+ }
+ return 0;
+}
+
int drm_fence_object_wait(drm_device_t * dev,
- volatile drm_fence_object_t * fence,
+ drm_fence_object_t * fence,
int lazy, int ignore_signals, uint32_t mask)
{
- drm_fence_manager_t *fm = &dev->fm;
drm_fence_driver_t *driver = dev->driver->fence_driver;
int ret = 0;
unsigned long _end;
@@ -299,44 +342,29 @@ int drm_fence_object_wait(drm_device_t * dev,
if (lazy && driver->lazy_capable) {
- do {
- DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ,
- fence_signaled(dev, fence, mask, 1));
- if (time_after_eq(jiffies, _end))
- break;
- } while (ret == -EINTR && ignore_signals);
- if (time_after_eq(jiffies, _end) && (ret != 0))
- ret = -EBUSY;
- if (ret) {
- if (ret == -EBUSY) {
- DRM_ERROR("Fence timeout. "
- "GPU lockup or fence driver was "
- "taken down.\n");
- }
- return ((ret == -EINTR) ? -EAGAIN : ret);
- }
- } else if ((fence->class == 0) && (mask & DRM_FENCE_TYPE_EXE) &&
- driver->lazy_capable) {
+ ret = drm_fence_lazy_wait(dev, fence, ignore_signals, mask);
+ if (ret)
+ return ret;
- /*
- * We use IRQ wait for EXE fence if available to gain
- * CPU in some cases.
- */
+ } else {
- do {
- DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ,
- fence_signaled(dev, fence,
- DRM_FENCE_TYPE_EXE, 1));
- if (time_after_eq(jiffies, _end))
- break;
- } while (ret == -EINTR && ignore_signals);
- if (time_after_eq(jiffies, _end) && (ret != 0))
- ret = -EBUSY;
- if (ret)
- return ((ret == -EINTR) ? -EAGAIN : ret);
- }
+ if (driver->has_irq(dev, fence->class,
+ DRM_FENCE_TYPE_EXE)) {
+ ret = drm_fence_lazy_wait(dev, fence, ignore_signals,
+ DRM_FENCE_TYPE_EXE);
+ if (ret)
+ return ret;
+ }
- if (fence_signaled(dev, fence, mask, 0))
+ if (driver->has_irq(dev, fence->class,
+ mask & ~DRM_FENCE_TYPE_EXE)) {
+ ret = drm_fence_lazy_wait(dev, fence, ignore_signals,
+ mask);
+ if (ret)
+ return ret;
+ }
+ }
+ if (drm_fence_object_signaled(fence, mask))
return 0;
/*
@@ -358,33 +386,38 @@ int drm_fence_object_wait(drm_device_t * dev,
}
int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence,
- uint32_t fence_flags, uint32_t type)
+ uint32_t fence_flags, uint32_t class, uint32_t type)
{
drm_fence_manager_t *fm = &dev->fm;
drm_fence_driver_t *driver = dev->driver->fence_driver;
+ drm_fence_class_manager_t *fc = &fm->class[fence->class];
unsigned long flags;
uint32_t sequence;
uint32_t native_type;
int ret;
drm_fence_unring(dev, &fence->ring);
- ret = driver->emit(dev, fence_flags, &sequence, &native_type);
+ ret = driver->emit(dev, class, fence_flags, &sequence, &native_type);
if (ret)
return ret;
write_lock_irqsave(&fm->lock, flags);
+ fence->class = class;
fence->type = type;
fence->flush_mask = 0x00;
fence->submitted_flush = 0x00;
fence->signaled = 0x00;
fence->sequence = sequence;
fence->native_type = native_type;
- list_add_tail(&fence->ring, &fm->ring);
+ if (list_empty(&fc->ring))
+ fc->last_exe_flush = sequence - 1;
+ list_add_tail(&fence->ring, &fc->ring);
write_unlock_irqrestore(&fm->lock, flags);
return 0;
}
-static int drm_fence_object_init(drm_device_t * dev, uint32_t type,
+static int drm_fence_object_init(drm_device_t * dev, uint32_t class,
+ uint32_t type,
uint32_t fence_flags,
drm_fence_object_t * fence)
{
@@ -398,7 +431,7 @@ static int drm_fence_object_init(drm_device_t * dev, uint32_t type,
write_lock_irqsave(&fm->lock, flags);
INIT_LIST_HEAD(&fence->ring);
- fence->class = 0;
+ fence->class = class;
fence->type = type;
fence->flush_mask = 0;
fence->submitted_flush = 0;
@@ -406,7 +439,8 @@ static int drm_fence_object_init(drm_device_t * dev, uint32_t type,
fence->sequence = 0;
write_unlock_irqrestore(&fm->lock, flags);
if (fence_flags & DRM_FENCE_FLAG_EMIT) {
- ret = drm_fence_object_emit(dev, fence, fence_flags, type);
+ ret = drm_fence_object_emit(dev, fence, fence_flags,
+ fence->class, type);
}
return ret;
}
@@ -430,7 +464,7 @@ int drm_fence_add_user_object(drm_file_t * priv, drm_fence_object_t * fence,
EXPORT_SYMBOL(drm_fence_add_user_object);
-int drm_fence_object_create(drm_device_t * dev, uint32_t type,
+int drm_fence_object_create(drm_device_t * dev, uint32_t class, uint32_t type,
unsigned flags, drm_fence_object_t ** c_fence)
{
drm_fence_object_t *fence;
@@ -440,7 +474,7 @@ int drm_fence_object_create(drm_device_t * dev, uint32_t type,
fence = drm_ctl_alloc(sizeof(*fence), DRM_MEM_FENCE);
if (!fence)
return -ENOMEM;
- ret = drm_fence_object_init(dev, type, flags, fence);
+ ret = drm_fence_object_init(dev, class, type, flags, fence);
if (ret) {
drm_fence_usage_deref_unlocked(dev, fence);
return ret;
@@ -456,22 +490,31 @@ EXPORT_SYMBOL(drm_fence_object_create);
void drm_fence_manager_init(drm_device_t * dev)
{
drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_class_manager_t *class;
drm_fence_driver_t *fed = dev->driver->fence_driver;
int i;
+
fm->lock = RW_LOCK_UNLOCKED;
write_lock(&fm->lock);
- INIT_LIST_HEAD(&fm->ring);
- fm->pending_flush = 0;
- DRM_INIT_WAITQUEUE(&fm->fence_queue);
fm->initialized = 0;
- if (fed) {
- fm->initialized = 1;
- atomic_set(&fm->count, 0);
- for (i = 0; i < fed->no_types; ++i) {
- fm->fence_types[i] = &fm->ring;
- }
+ if (!fed)
+ goto out_unlock;
+
+ fm->initialized = 1;
+ fm->num_classes = fed->num_classes;
+ BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES);
+
+ for (i=0; i<fm->num_classes; ++i) {
+ class = &fm->class[i];
+
+ INIT_LIST_HEAD(&class->ring);
+ class->pending_flush = 0;
+ DRM_INIT_WAITQUEUE(&class->fence_queue);
}
+
+ atomic_set(&fm->count, 0);
+ out_unlock:
write_unlock(&fm->lock);
}
@@ -518,7 +561,8 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS)
case drm_fence_create:
if (arg.flags & DRM_FENCE_FLAG_EMIT)
LOCK_TEST_WITH_RETURN(dev, filp);
- ret = drm_fence_object_create(dev, arg.type, arg.flags, &fence);
+ ret = drm_fence_object_create(dev, arg.class,
+ arg.type, arg.flags, &fence);
if (ret)
return ret;
ret = drm_fence_add_user_object(priv, fence,
@@ -581,7 +625,8 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS)
fence = drm_lookup_fence_object(priv, arg.handle);
if (!fence)
return -EINVAL;
- ret = drm_fence_object_emit(dev, fence, arg.flags, arg.type);
+ ret = drm_fence_object_emit(dev, fence, arg.flags, arg.class,
+ arg.type);
break;
case drm_fence_buffers:
if (!dev->bm.initialized) {
diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c
index 84e06c87..faf76726 100644
--- a/linux-core/drm_fops.c
+++ b/linux-core/drm_fops.c
@@ -427,38 +427,51 @@ int drm_release(struct inode *inode, struct file *filp)
dev->open_count);
if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) {
- unsigned long _end = jiffies + DRM_HZ*3;
-
- do {
- retcode = drm_kernel_take_hw_lock(filp);
- } while(retcode && !time_after_eq(jiffies,_end));
-
- if (!retcode) {
+ if (drm_i_have_hw_lock(filp)) {
dev->driver->reclaim_buffers_locked(dev, filp);
-
- drm_lock_free(dev, &dev->lock.hw_lock->lock,
- _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
} else {
+ unsigned long _end=jiffies + 3*DRM_HZ;
+ int locked = 0;
+
+ drm_idlelock_take(&dev->lock);
/*
- * FIXME: This is not a good solution. We should perhaps associate the
- * DRM lock with a process context, and check whether the current process
- * holds the lock. Then we can run reclaim buffers locked anyway.
+ * Wait for a while.
*/
- DRM_ERROR("Reclaim buffers locked deadlock.\n"
- "\tThis is probably a single thread having multiple\n"
- "\tDRM file descriptors open either dying or"
- " closing file descriptors\n"
- "\twhile having the lock. I will not reclaim buffers.\n"
- "\tLocking context is 0x%08x\n",
- _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+ do{
+ spin_lock(&dev->lock.spinlock);
+ locked = dev->lock.idle_has_lock;
+ spin_unlock(&dev->lock.spinlock);
+ if (locked)
+ break;
+ schedule();
+ } while (!time_after_eq(jiffies, _end));
+
+ if (!locked) {
+ DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
+ "\tdriver to use reclaim_buffers_idlelocked() instead.\n"
+ "\tI will go on reclaiming the buffers anyway.\n");
+ }
+
+ dev->driver->reclaim_buffers_locked(dev, filp);
+ drm_idlelock_release(&dev->lock);
}
- } else if (drm_i_have_hw_lock(filp)) {
+ }
+
+ if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) {
+
+ drm_idlelock_take(&dev->lock);
+ dev->driver->reclaim_buffers_idlelocked(dev, filp);
+ drm_idlelock_release(&dev->lock);
+
+ }
+
+ if (drm_i_have_hw_lock(filp)) {
DRM_DEBUG("File %p released, freeing lock for context %d\n",
filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
- drm_lock_free(dev, &dev->lock.hw_lock->lock,
+ drm_lock_free(&dev->lock,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
}
diff --git a/linux-core/drm_irq.c b/linux-core/drm_irq.c
index c365c08e..92cf7f5c 100644
--- a/linux-core/drm_irq.c
+++ b/linux-core/drm_irq.c
@@ -422,7 +422,7 @@ static void drm_locked_tasklet_func(unsigned long data)
spin_lock_irqsave(&dev->tasklet_lock, irqflags);
if (!dev->locked_tasklet_func ||
- !drm_lock_take(&dev->lock.hw_lock->lock,
+ !drm_lock_take(&dev->lock,
DRM_KERNEL_CONTEXT)) {
spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
return;
@@ -433,7 +433,7 @@ static void drm_locked_tasklet_func(unsigned long data)
dev->locked_tasklet_func(dev);
- drm_lock_free(dev, &dev->lock.hw_lock->lock,
+ drm_lock_free(&dev->lock,
DRM_KERNEL_CONTEXT);
dev->locked_tasklet_func = NULL;
diff --git a/linux-core/drm_lock.c b/linux-core/drm_lock.c
index d11c570e..f02df36b 100644
--- a/linux-core/drm_lock.c
+++ b/linux-core/drm_lock.c
@@ -35,12 +35,6 @@
#include "drmP.h"
-#if 0
-static int drm_lock_transfer(drm_device_t * dev,
- __volatile__ unsigned int *lock,
- unsigned int context);
-#endif
-
static int drm_notifier(void *priv);
/**
@@ -83,6 +77,9 @@ int drm_lock(struct inode *inode, struct file *filp,
return -EINVAL;
add_wait_queue(&dev->lock.lock_queue, &entry);
+ spin_lock(&dev->lock.spinlock);
+ dev->lock.user_waiters++;
+ spin_unlock(&dev->lock.spinlock);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
if (!dev->lock.hw_lock) {
@@ -90,7 +87,7 @@ int drm_lock(struct inode *inode, struct file *filp,
ret = -EINTR;
break;
}
- if (drm_lock_take(&dev->lock.hw_lock->lock, lock.context)) {
+ if (drm_lock_take(&dev->lock, lock.context)) {
dev->lock.filp = filp;
dev->lock.lock_time = jiffies;
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
@@ -104,6 +101,9 @@ int drm_lock(struct inode *inode, struct file *filp,
break;
}
}
+ spin_lock(&dev->lock.spinlock);
+ dev->lock.user_waiters--;
+ spin_unlock(&dev->lock.spinlock);
__set_current_state(TASK_RUNNING);
remove_wait_queue(&dev->lock.lock_queue, &entry);
@@ -184,8 +184,7 @@ int drm_unlock(struct inode *inode, struct file *filp,
if (dev->driver->kernel_context_switch_unlock)
dev->driver->kernel_context_switch_unlock(dev);
else {
- if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
- lock.context)) {
+ if (drm_lock_free(&dev->lock,lock.context)) {
/* FIXME: Should really bail out here. */
}
}
@@ -203,18 +202,26 @@ int drm_unlock(struct inode *inode, struct file *filp,
*
* Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
*/
-int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
+int drm_lock_take(drm_lock_data_t *lock_data,
+ unsigned int context)
{
unsigned int old, new, prev;
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
+ spin_lock(&lock_data->spinlock);
do {
old = *lock;
if (old & _DRM_LOCK_HELD)
new = old | _DRM_LOCK_CONT;
- else
- new = context | _DRM_LOCK_HELD | _DRM_LOCK_CONT;
+ else {
+ new = context | _DRM_LOCK_HELD |
+ ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
+ _DRM_LOCK_CONT : 0);
+ }
prev = cmpxchg(lock, old, new);
} while (prev != old);
+ spin_unlock(&lock_data->spinlock);
+
if (_DRM_LOCKING_CONTEXT(old) == context) {
if (old & _DRM_LOCK_HELD) {
if (context != DRM_KERNEL_CONTEXT) {
@@ -224,14 +231,15 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
return 0;
}
}
- if (new == (context | _DRM_LOCK_HELD | _DRM_LOCK_CONT)) {
+
+ if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
/* Have lock */
+
return 1;
}
return 0;
}
-#if 0
/**
* This takes a lock forcibly and hands it to context. Should ONLY be used
* inside *_unlock to give lock to kernel before calling *_dma_schedule.
@@ -244,13 +252,13 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
* Resets the lock file pointer.
* Marks the lock as held by the given context, via the \p cmpxchg instruction.
*/
-static int drm_lock_transfer(drm_device_t * dev,
- __volatile__ unsigned int *lock,
+static int drm_lock_transfer(drm_lock_data_t *lock_data,
unsigned int context)
{
unsigned int old, new, prev;
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
- dev->lock.filp = NULL;
+ lock_data->filp = NULL;
do {
old = *lock;
new = context | _DRM_LOCK_HELD;
@@ -258,7 +266,6 @@ static int drm_lock_transfer(drm_device_t * dev,
} while (prev != old);
return 1;
}
-#endif
/**
* Free lock.
@@ -271,10 +278,19 @@ static int drm_lock_transfer(drm_device_t * dev,
* Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
* waiting on the lock queue.
*/
-int drm_lock_free(drm_device_t * dev,
- __volatile__ unsigned int *lock, unsigned int context)
+int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context)
{
unsigned int old, new, prev;
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+ spin_lock(&lock_data->spinlock);
+ if (lock_data->kernel_waiters != 0) {
+ drm_lock_transfer(lock_data, 0);
+ lock_data->idle_has_lock = 1;
+ spin_unlock(&lock_data->spinlock);
+ return 1;
+ }
+ spin_unlock(&lock_data->spinlock);
do {
old = *lock;
@@ -287,7 +303,7 @@ int drm_lock_free(drm_device_t * dev,
context, _DRM_LOCKING_CONTEXT(old));
return 1;
}
- wake_up_interruptible(&dev->lock.lock_queue);
+ wake_up_interruptible(&lock_data->lock_queue);
return 0;
}
@@ -322,65 +338,66 @@ static int drm_notifier(void *priv)
return 0;
}
-/*
- * Can be used by drivers to take the hardware lock if necessary.
- * (Waiting for idle before reclaiming buffers etc.)
+/**
+ * This function returns immediately and takes the hw lock
+ * with the kernel context if it is free, otherwise it gets the highest priority when and if
+ * it is eventually released.
+ *
+ * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
+ * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
+ * a deadlock, which is why the "idlelock" was invented).
+ *
+ * This should be sufficient to wait for GPU idle without
+ * having to worry about starvation.
*/
-int drm_i_have_hw_lock(struct file *filp)
+void drm_idlelock_take(drm_lock_data_t *lock_data)
{
- DRM_DEVICE;
-
- return (priv->lock_count && dev->lock.hw_lock &&
- _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
- dev->lock.filp == filp);
-}
+ int ret = 0;
-EXPORT_SYMBOL(drm_i_have_hw_lock);
+ spin_lock(&lock_data->spinlock);
+ lock_data->kernel_waiters++;
+ if (!lock_data->idle_has_lock) {
-int drm_kernel_take_hw_lock(struct file *filp)
-{
- DRM_DEVICE;
+ spin_unlock(&lock_data->spinlock);
+ ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
+ spin_lock(&lock_data->spinlock);
- int ret = 0;
- unsigned long _end = jiffies + 3*DRM_HZ;
-
- if (!drm_i_have_hw_lock(filp)) {
-
- DECLARE_WAITQUEUE(entry, current);
-
- add_wait_queue(&dev->lock.lock_queue, &entry);
- for (;;) {
- __set_current_state(TASK_INTERRUPTIBLE);
- if (!dev->lock.hw_lock) {
- /* Device has been unregistered */
- ret = -EINTR;
- break;
- }
- if (drm_lock_take(&dev->lock.hw_lock->lock,
- DRM_KERNEL_CONTEXT)) {
- dev->lock.filp = filp;
- dev->lock.lock_time = jiffies;
- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
- break; /* Got lock */
- }
- /* Contention */
- if (time_after_eq(jiffies,_end)) {
- ret = -EBUSY;
- break;
- }
+ if (ret == 1)
+ lock_data->idle_has_lock = 1;
+ }
+ spin_unlock(&lock_data->spinlock);
+}
+EXPORT_SYMBOL(drm_idlelock_take);
- schedule_timeout(1);
- if (signal_pending(current)) {
- ret = -ERESTARTSYS;
- break;
- }
+void drm_idlelock_release(drm_lock_data_t *lock_data)
+{
+ unsigned int old, prev;
+ volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+ spin_lock(&lock_data->spinlock);
+ if (--lock_data->kernel_waiters == 0) {
+ if (lock_data->idle_has_lock) {
+ do {
+ old = *lock;
+ prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
+ } while (prev != old);
+ wake_up_interruptible(&lock_data->lock_queue);
+ lock_data->idle_has_lock = 0;
}
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&dev->lock.lock_queue, &entry);
}
- return ret;
+ spin_unlock(&lock_data->spinlock);
}
+EXPORT_SYMBOL(drm_idlelock_release);
-EXPORT_SYMBOL(drm_kernel_take_hw_lock);
+int drm_i_have_hw_lock(struct file *filp)
+{
+ DRM_DEVICE;
+
+ return (priv->lock_count && dev->lock.hw_lock &&
+ _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
+ dev->lock.filp == filp);
+}
+
+EXPORT_SYMBOL(drm_i_have_hw_lock);
diff --git a/linux-core/drm_mm.c b/linux-core/drm_mm.c
index 5889ee4d..634a1782 100644
--- a/linux-core/drm_mm.c
+++ b/linux-core/drm_mm.c
@@ -217,6 +217,7 @@ void drm_mm_put_block(drm_mm_node_t * cur)
drm_ctl_free(cur, sizeof(*cur), DRM_MEM_MM);
}
}
+EXPORT_SYMBOL(drm_mm_put_block);
drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm,
unsigned long size,
diff --git a/linux-core/drm_object.c b/linux-core/drm_object.c
index 0157329c..939cf0d7 100644
--- a/linux-core/drm_object.c
+++ b/linux-core/drm_object.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -10,6 +10,10 @@
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
@@ -18,13 +22,11 @@
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
*
**************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
#include "drmP.h"
diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h
new file mode 100644
index 00000000..98228ada
--- /dev/null
+++ b/linux-core/drm_objects.h
@@ -0,0 +1,470 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifndef _DRM_OBJECTS_H
+#define _DRM_OJBECTS_H
+#define DRM_HAS_TTM
+
+struct drm_device;
+
+/***************************************************
+ * User space objects. (drm_object.c)
+ */
+
+#define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
+
+typedef enum {
+ drm_fence_type,
+ drm_buffer_type,
+ drm_ttm_type
+ /*
+ * Add other user space object types here.
+ */
+} drm_object_type_t;
+
+/*
+ * A user object is a structure that helps the drm give out user handles
+ * to kernel internal objects and to keep track of these objects so that
+ * they can be destroyed, for example when the user space process exits.
+ * Designed to be accessible using a user space 32-bit handle.
+ */
+
+typedef struct drm_user_object {
+ drm_hash_item_t hash;
+ struct list_head list;
+ drm_object_type_t type;
+ atomic_t refcount;
+ int shareable;
+ drm_file_t *owner;
+ void (*ref_struct_locked) (drm_file_t * priv,
+ struct drm_user_object * obj,
+ drm_ref_t ref_action);
+ void (*unref) (drm_file_t * priv, struct drm_user_object * obj,
+ drm_ref_t unref_action);
+ void (*remove) (drm_file_t * priv, struct drm_user_object * obj);
+} drm_user_object_t;
+
+/*
+ * A ref object is a structure which is used to
+ * keep track of references to user objects and to keep track of these
+ * references so that they can be destroyed for example when the user space
+ * process exits. Designed to be accessible using a pointer to the _user_ object.
+ */
+
+typedef struct drm_ref_object {
+ drm_hash_item_t hash;
+ struct list_head list;
+ atomic_t refcount;
+ drm_ref_t unref_action;
+} drm_ref_object_t;
+
+/**
+ * Must be called with the struct_mutex held.
+ */
+
+extern int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item,
+ int shareable);
+/**
+ * Must be called with the struct_mutex held.
+ */
+
+extern drm_user_object_t *drm_lookup_user_object(drm_file_t * priv,
+ uint32_t key);
+
+/*
+ * Must be called with the struct_mutex held.
+ * If "item" has been obtained by a call to drm_lookup_user_object. You may not
+ * release the struct_mutex before calling drm_remove_ref_object.
+ * This function may temporarily release the struct_mutex.
+ */
+
+extern int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item);
+
+/*
+ * Must be called with the struct_mutex held. May temporarily release it.
+ */
+
+extern int drm_add_ref_object(drm_file_t * priv,
+ drm_user_object_t * referenced_object,
+ drm_ref_t ref_action);
+
+/*
+ * Must be called with the struct_mutex held.
+ */
+
+drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv,
+ drm_user_object_t * referenced_object,
+ drm_ref_t ref_action);
+/*
+ * Must be called with the struct_mutex held.
+ * If "item" has been obtained by a call to drm_lookup_ref_object. You may not
+ * release the struct_mutex before calling drm_remove_ref_object.
+ * This function may temporarily release the struct_mutex.
+ */
+
+extern void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item);
+extern int drm_user_object_ref(drm_file_t * priv, uint32_t user_token,
+ drm_object_type_t type,
+ drm_user_object_t ** object);
+extern int drm_user_object_unref(drm_file_t * priv, uint32_t user_token,
+ drm_object_type_t type);
+
+/***************************************************
+ * Fence objects. (drm_fence.c)
+ */
+
+typedef struct drm_fence_object {
+ drm_user_object_t base;
+ atomic_t usage;
+
+ /*
+ * The below three fields are protected by the fence manager spinlock.
+ */
+
+ struct list_head ring;
+ int class;
+ uint32_t native_type;
+ uint32_t type;
+ uint32_t signaled;
+ uint32_t sequence;
+ uint32_t flush_mask;
+ uint32_t submitted_flush;
+} drm_fence_object_t;
+
+#define _DRM_FENCE_CLASSES 8
+#define _DRM_FENCE_TYPE_EXE 0x00
+
+typedef struct drm_fence_class_manager {
+ struct list_head ring;
+ uint32_t pending_flush;
+ wait_queue_head_t fence_queue;
+ int pending_exe_flush;
+ uint32_t last_exe_flush;
+ uint32_t exe_flush_sequence;
+} drm_fence_class_manager_t;
+
+typedef struct drm_fence_manager {
+ int initialized;
+ rwlock_t lock;
+ drm_fence_class_manager_t class[_DRM_FENCE_CLASSES];
+ uint32_t num_classes;
+ atomic_t count;
+} drm_fence_manager_t;
+
+typedef struct drm_fence_driver {
+ uint32_t num_classes;
+ uint32_t wrap_diff;
+ uint32_t flush_diff;
+ uint32_t sequence_mask;
+ int lazy_capable;
+ int (*has_irq) (struct drm_device * dev, uint32_t class,
+ uint32_t flags);
+ int (*emit) (struct drm_device * dev, uint32_t class, uint32_t flags,
+ uint32_t * breadcrumb, uint32_t * native_type);
+ void (*poke_flush) (struct drm_device * dev, uint32_t class);
+} drm_fence_driver_t;
+
+extern void drm_fence_handler(struct drm_device *dev, uint32_t class,
+ uint32_t sequence, uint32_t type);
+extern void drm_fence_manager_init(struct drm_device *dev);
+extern void drm_fence_manager_takedown(struct drm_device *dev);
+extern void drm_fence_flush_old(struct drm_device *dev, uint32_t class,
+ uint32_t sequence);
+extern int drm_fence_object_flush(struct drm_device *dev,
+ drm_fence_object_t * fence, uint32_t type);
+extern int drm_fence_object_signaled(drm_fence_object_t * fence, uint32_t type);
+extern void drm_fence_usage_deref_locked(struct drm_device *dev,
+ drm_fence_object_t * fence);
+extern void drm_fence_usage_deref_unlocked(struct drm_device *dev,
+ drm_fence_object_t * fence);
+extern int drm_fence_object_wait(struct drm_device *dev,
+ drm_fence_object_t * fence,
+ int lazy, int ignore_signals, uint32_t mask);
+extern int drm_fence_object_create(struct drm_device *dev, uint32_t type,
+ uint32_t fence_flags, uint32_t class,
+ drm_fence_object_t ** c_fence);
+extern int drm_fence_add_user_object(drm_file_t * priv,
+ drm_fence_object_t * fence, int shareable);
+extern int drm_fence_ioctl(DRM_IOCTL_ARGS);
+
+/**************************************************
+ *TTMs
+ */
+
+/*
+ * The ttm backend GTT interface. (In our case AGP).
+ * Any similar type of device (PCIE?)
+ * needs only to implement these functions to be usable with the "TTM" interface.
+ * The AGP backend implementation lives in drm_agpsupport.c
+ * basically maps these calls to available functions in agpgart.
+ * Each drm device driver gets an
+ * additional function pointer that creates these types,
+ * so that the device can choose the correct aperture.
+ * (Multiple AGP apertures, etc.)
+ * Most device drivers will let this point to the standard AGP implementation.
+ */
+
+#define DRM_BE_FLAG_NEEDS_FREE 0x00000001
+#define DRM_BE_FLAG_BOUND_CACHED 0x00000002
+
+typedef struct drm_ttm_backend {
+ void *private;
+ uint32_t flags;
+ uint32_t drm_map_type;
+ int (*needs_ub_cache_adjust) (struct drm_ttm_backend * backend);
+ int (*populate) (struct drm_ttm_backend * backend,
+ unsigned long num_pages, struct page ** pages);
+ void (*clear) (struct drm_ttm_backend * backend);
+ int (*bind) (struct drm_ttm_backend * backend,
+ unsigned long offset, int cached);
+ int (*unbind) (struct drm_ttm_backend * backend);
+ void (*destroy) (struct drm_ttm_backend * backend);
+} drm_ttm_backend_t;
+
+typedef struct drm_ttm {
+ struct page **pages;
+ uint32_t page_flags;
+ unsigned long num_pages;
+ unsigned long aper_offset;
+ atomic_t vma_count;
+ struct drm_device *dev;
+ int destroy;
+ uint32_t mapping_offset;
+ drm_ttm_backend_t *be;
+ enum {
+ ttm_bound,
+ ttm_evicted,
+ ttm_unbound,
+ ttm_unpopulated,
+ } state;
+
+} drm_ttm_t;
+
+extern drm_ttm_t *drm_ttm_init(struct drm_device *dev, unsigned long size);
+extern int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset);
+extern void drm_ttm_unbind(drm_ttm_t * ttm);
+extern void drm_ttm_evict(drm_ttm_t * ttm);
+extern void drm_ttm_fixup_caching(drm_ttm_t * ttm);
+extern struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index);
+
+/*
+ * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this,
+ * which calls this function iff there are no vmas referencing it anymore. Otherwise it is called
+ * when the last vma exits.
+ */
+
+extern int drm_destroy_ttm(drm_ttm_t * ttm);
+
+#define DRM_FLAG_MASKED(_old, _new, _mask) {\
+(_old) ^= (((_old) ^ (_new)) & (_mask)); \
+}
+
+#define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1)
+#define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS)
+
+/*
+ * Page flags.
+ */
+
+#define DRM_TTM_PAGE_UNCACHED 0x01
+#define DRM_TTM_PAGE_USED 0x02
+#define DRM_TTM_PAGE_BOUND 0x04
+#define DRM_TTM_PAGE_PRESENT 0x08
+#define DRM_TTM_PAGE_VMALLOC 0x10
+
+/***************************************************
+ * Buffer objects. (drm_bo.c, drm_bo_move.c)
+ */
+
+typedef struct drm_bo_mem_reg {
+ drm_mm_node_t *mm_node;
+ unsigned long size;
+ unsigned long num_pages;
+ uint32_t page_alignment;
+ uint32_t mem_type;
+ uint32_t flags;
+ uint32_t mask;
+} drm_bo_mem_reg_t;
+
+typedef struct drm_buffer_object {
+ struct drm_device *dev;
+ drm_user_object_t base;
+
+ /*
+ * If there is a possibility that the usage variable is zero,
+ * then dev->struct_mutext should be locked before incrementing it.
+ */
+
+ atomic_t usage;
+ unsigned long buffer_start;
+ drm_bo_type_t type;
+ unsigned long offset;
+ atomic_t mapped;
+ drm_bo_mem_reg_t mem;
+
+ struct list_head lru;
+ struct list_head ddestroy;
+
+ uint32_t fence_type;
+ uint32_t fence_class;
+ drm_fence_object_t *fence;
+ uint32_t priv_flags;
+ wait_queue_head_t event_queue;
+ struct mutex mutex;
+
+ /* For pinned buffers */
+ drm_mm_node_t *pinned_node;
+ uint32_t pinned_mem_type;
+ struct list_head pinned_lru;
+
+ /* For vm */
+
+ drm_ttm_t *ttm;
+ drm_map_list_t map_list;
+ uint32_t memory_type;
+ unsigned long bus_offset;
+ uint32_t vm_flags;
+ void *iomap;
+
+#ifdef DRM_ODD_MM_COMPAT
+ /* dev->struct_mutex only protected. */
+ struct list_head vma_list;
+ struct list_head p_mm_list;
+#endif
+
+} drm_buffer_object_t;
+
+#define _DRM_BO_FLAG_UNFENCED 0x00000001
+#define _DRM_BO_FLAG_EVICTED 0x00000002
+
+typedef struct drm_mem_type_manager {
+ int has_type;
+ int use_type;
+ drm_mm_t manager;
+ struct list_head lru;
+ struct list_head pinned;
+ uint32_t flags;
+ uint32_t drm_bus_maptype;
+ unsigned long io_offset;
+ unsigned long io_size;
+ void *io_addr;
+} drm_mem_type_manager_t;
+
+#define _DRM_FLAG_MEMTYPE_FIXED 0x00000001 /* Fixed (on-card) PCI memory */
+#define _DRM_FLAG_MEMTYPE_MAPPABLE 0x00000002 /* Memory mappable */
+#define _DRM_FLAG_MEMTYPE_CACHED 0x00000004 /* Cached binding */
+#define _DRM_FLAG_NEEDS_IOREMAP 0x00000008 /* Fixed memory needs ioremap
+ before kernel access. */
+#define _DRM_FLAG_MEMTYPE_CMA 0x00000010 /* Can't map aperture */
+#define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */
+
+typedef struct drm_buffer_manager {
+ struct mutex init_mutex;
+ struct mutex evict_mutex;
+ int nice_mode;
+ int initialized;
+ drm_file_t *last_to_validate;
+ drm_mem_type_manager_t man[DRM_BO_MEM_TYPES];
+ struct list_head unfenced;
+ struct list_head ddestroy;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+ struct work_struct wq;
+#else
+ struct delayed_work wq;
+#endif
+ uint32_t fence_type;
+ unsigned long cur_pages;
+ atomic_t count;
+} drm_buffer_manager_t;
+
+typedef struct drm_bo_driver {
+ const uint32_t *mem_type_prio;
+ const uint32_t *mem_busy_prio;
+ uint32_t num_mem_type_prio;
+ uint32_t num_mem_busy_prio;
+ drm_ttm_backend_t *(*create_ttm_backend_entry)
+ (struct drm_device * dev);
+ int (*fence_type) (struct drm_buffer_object *bo, uint32_t * class, uint32_t * type);
+ int (*invalidate_caches) (struct drm_device * dev, uint32_t flags);
+ int (*init_mem_type) (struct drm_device * dev, uint32_t type,
+ drm_mem_type_manager_t * man);
+ uint32_t(*evict_mask) (struct drm_buffer_object *bo);
+ int (*move) (struct drm_buffer_object * bo,
+ int evict, int no_wait, struct drm_bo_mem_reg * new_mem);
+} drm_bo_driver_t;
+
+/*
+ * buffer objects (drm_bo.c)
+ */
+
+extern int drm_bo_ioctl(DRM_IOCTL_ARGS);
+extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS);
+extern int drm_bo_driver_finish(struct drm_device *dev);
+extern int drm_bo_driver_init(struct drm_device *dev);
+extern int drm_bo_pci_offset(struct drm_device *dev,
+ drm_bo_mem_reg_t * mem,
+ unsigned long *bus_base,
+ unsigned long *bus_offset,
+ unsigned long *bus_size);
+extern int drm_mem_reg_is_pci(struct drm_device *dev, drm_bo_mem_reg_t * mem);
+
+extern void drm_bo_usage_deref_locked(drm_buffer_object_t * bo);
+extern int drm_fence_buffer_objects(drm_file_t * priv,
+ struct list_head *list,
+ uint32_t fence_flags,
+ drm_fence_object_t * fence,
+ drm_fence_object_t ** used_fence);
+extern void drm_bo_add_to_lru(drm_buffer_object_t * bo);
+extern int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
+ int no_wait);
+extern int drm_bo_mem_space(drm_buffer_object_t * bo,
+ drm_bo_mem_reg_t * mem, int no_wait);
+extern int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags,
+ int no_wait, int move_unfenced);
+
+/*
+ * Buffer object memory move helpers.
+ * drm_bo_move.c
+ */
+
+extern int drm_bo_move_ttm(drm_buffer_object_t * bo,
+ int evict, int no_wait, drm_bo_mem_reg_t * new_mem);
+extern int drm_bo_move_memcpy(drm_buffer_object_t * bo,
+ int evict,
+ int no_wait, drm_bo_mem_reg_t * new_mem);
+extern int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo,
+ int evict,
+ int no_wait,
+ uint32_t fence_class,
+ uint32_t fence_type,
+ uint32_t fence_flags,
+ drm_bo_mem_reg_t * new_mem);
+
+#endif
diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c
index 60123cdc..348cd2c6 100644
--- a/linux-core/drm_stub.c
+++ b/linux-core/drm_stub.c
@@ -63,10 +63,12 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
spin_lock_init(&dev->count_lock);
spin_lock_init(&dev->drw_lock);
spin_lock_init(&dev->tasklet_lock);
+ spin_lock_init(&dev->lock.spinlock);
init_timer(&dev->timer);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
mutex_init(&dev->bm.init_mutex);
+ mutex_init(&dev->bm.evict_mutex);
dev->pdev = pdev;
dev->pci_device = pdev->device;
diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c
index c17c41cb..e67719e2 100644
--- a/linux-core/drm_ttm.c
+++ b/linux-core/drm_ttm.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -11,6 +11,10 @@
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
@@ -18,13 +22,11 @@
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
*
**************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
#include "drmP.h"
@@ -33,18 +35,17 @@ static void drm_ttm_ipi_handler(void *null)
flush_agp_cache();
}
-static void drm_ttm_cache_flush(void)
+static void drm_ttm_cache_flush(void)
{
if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0)
DRM_ERROR("Timed out waiting for drm cache flush.\n");
}
-
/*
* Use kmalloc if possible. Otherwise fall back to vmalloc.
*/
-static void ttm_alloc_pages(drm_ttm_t *ttm)
+static void ttm_alloc_pages(drm_ttm_t * ttm)
{
unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
ttm->pages = NULL;
@@ -65,7 +66,7 @@ static void ttm_alloc_pages(drm_ttm_t *ttm)
}
}
-static void ttm_free_pages(drm_ttm_t *ttm)
+static void ttm_free_pages(drm_ttm_t * ttm)
{
unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
@@ -79,27 +80,24 @@ static void ttm_free_pages(drm_ttm_t *ttm)
ttm->pages = NULL;
}
-/*
- * Unmap all vma pages from vmas mapping this ttm.
- */
-
-static int unmap_vma_pages(drm_ttm_t * ttm)
+static struct page *drm_ttm_alloc_page(void)
{
- drm_device_t *dev = ttm->dev;
- loff_t offset = ((loff_t) ttm->mapping_offset) << PAGE_SHIFT;
- loff_t holelen = ((loff_t) ttm->num_pages) << PAGE_SHIFT;
+ struct page *page;
-#ifdef DRM_ODD_MM_COMPAT
- int ret;
- ret = drm_ttm_lock_mm(ttm);
- if (ret)
- return ret;
-#endif
- unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
-#ifdef DRM_ODD_MM_COMPAT
- drm_ttm_finish_unmap(ttm);
+ if (drm_alloc_memctl(PAGE_SIZE)) {
+ return NULL;
+ }
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
+ if (!page) {
+ drm_free_memctl(PAGE_SIZE);
+ return NULL;
+ }
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
+ SetPageLocked(page);
+#else
+ SetPageReserved(page);
#endif
- return 0;
+ return page;
}
/*
@@ -116,7 +114,7 @@ static int drm_set_caching(drm_ttm_t * ttm, int noncached)
if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached)
return 0;
- if (noncached)
+ if (noncached)
drm_ttm_cache_flush();
for (i = 0; i < ttm->num_pages; ++i) {
@@ -135,7 +133,7 @@ static int drm_set_caching(drm_ttm_t * ttm, int noncached)
if (do_tlbflush)
flush_agp_mappings();
- DRM_MASK_VAL(ttm->page_flags, DRM_TTM_PAGE_UNCACHED, noncached);
+ DRM_FLAG_MASKED(ttm->page_flags, noncached, DRM_TTM_PAGE_UNCACHED);
return 0;
}
@@ -154,18 +152,6 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
if (!ttm)
return 0;
- if (atomic_read(&ttm->vma_count) > 0) {
- ttm->destroy = 1;
- DRM_ERROR("VMAs are still alive. Skipping destruction.\n");
- return -EBUSY;
- }
-
- DRM_DEBUG("Destroying a ttm\n");
-
-#ifdef DRM_TTM_ODD_COMPAT
- BUG_ON(!list_empty(&ttm->vma_list));
- BUG_ON(!list_empty(&ttm->p_mm_list));
-#endif
be = ttm->be;
if (be) {
be->destroy(be);
@@ -193,11 +179,6 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
DRM_ERROR("Erroneous map count. "
"Leaking page mappings.\n");
}
-
- /*
- * End debugging.
- */
-
__free_page(*cur_page);
drm_free_memctl(PAGE_SIZE);
--bm->cur_pages;
@@ -210,37 +191,36 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
return 0;
}
+struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index)
+{
+ struct page *p;
+ drm_buffer_manager_t *bm = &ttm->dev->bm;
+
+ p = ttm->pages[index];
+ if (!p) {
+ p = drm_ttm_alloc_page();
+ if (!p)
+ return NULL;
+ ttm->pages[index] = p;
+ ++bm->cur_pages;
+ }
+ return p;
+}
+
static int drm_ttm_populate(drm_ttm_t * ttm)
{
struct page *page;
unsigned long i;
- drm_buffer_manager_t *bm;
drm_ttm_backend_t *be;
if (ttm->state != ttm_unpopulated)
return 0;
- bm = &ttm->dev->bm;
be = ttm->be;
for (i = 0; i < ttm->num_pages; ++i) {
- page = ttm->pages[i];
- if (!page) {
- if (drm_alloc_memctl(PAGE_SIZE)) {
- return -ENOMEM;
- }
- page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
- if (!page) {
- drm_free_memctl(PAGE_SIZE);
- return -ENOMEM;
- }
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
- SetPageLocked(page);
-#else
- SetPageReserved(page);
-#endif
- ttm->pages[i] = page;
- ++bm->cur_pages;
- }
+ page = drm_ttm_get_page(ttm, i);
+ if (!page)
+ return -ENOMEM;
}
be->populate(be, ttm->num_pages, ttm->pages);
ttm->state = ttm_unbound;
@@ -251,7 +231,7 @@ static int drm_ttm_populate(drm_ttm_t * ttm)
* Initialize a ttm.
*/
-static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size)
+drm_ttm_t *drm_ttm_init(struct drm_device * dev, unsigned long size)
{
drm_bo_driver_t *bo_driver = dev->driver->bo_driver;
drm_ttm_t *ttm;
@@ -263,11 +243,6 @@ static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size)
if (!ttm)
return NULL;
-#ifdef DRM_ODD_MM_COMPAT
- INIT_LIST_HEAD(&ttm->p_mm_list);
- INIT_LIST_HEAD(&ttm->vma_list);
-#endif
-
ttm->dev = dev;
atomic_set(&ttm->vma_count, 0);
@@ -300,29 +275,20 @@ static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size)
* Unbind a ttm region from the aperture.
*/
-int drm_evict_ttm(drm_ttm_t * ttm)
+void drm_ttm_evict(drm_ttm_t * ttm)
{
drm_ttm_backend_t *be = ttm->be;
int ret;
- switch (ttm->state) {
- case ttm_bound:
- if (be->needs_ub_cache_adjust(be)) {
- ret = unmap_vma_pages(ttm);
- if (ret) {
- return ret;
- }
- }
- be->unbind(be);
- break;
- default:
- break;
+ if (ttm->state == ttm_bound) {
+ ret = be->unbind(be);
+ BUG_ON(ret);
}
+
ttm->state = ttm_evicted;
- return 0;
}
-void drm_fixup_ttm_caching(drm_ttm_t * ttm)
+void drm_ttm_fixup_caching(drm_ttm_t * ttm)
{
if (ttm->state == ttm_evicted) {
@@ -334,18 +300,12 @@ void drm_fixup_ttm_caching(drm_ttm_t * ttm)
}
}
-int drm_unbind_ttm(drm_ttm_t * ttm)
+void drm_ttm_unbind(drm_ttm_t * ttm)
{
- int ret = 0;
-
if (ttm->state == ttm_bound)
- ret = drm_evict_ttm(ttm);
-
- if (ret)
- return ret;
+ drm_ttm_evict(ttm);
- drm_fixup_ttm_caching(ttm);
- return 0;
+ drm_ttm_fixup_caching(ttm);
}
int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset)
@@ -364,26 +324,13 @@ int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset)
ret = drm_ttm_populate(ttm);
if (ret)
return ret;
- if (ttm->state == ttm_unbound && !cached) {
- ret = unmap_vma_pages(ttm);
- if (ret)
- return ret;
+ if (ttm->state == ttm_unbound && !cached) {
drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
}
-#ifdef DRM_ODD_MM_COMPAT
- else if (ttm->state == ttm_evicted && !cached) {
- ret = drm_ttm_lock_mm(ttm);
- if (ret)
- return ret;
- }
-#endif
+
if ((ret = be->bind(be, aper_offset, cached))) {
ttm->state = ttm_evicted;
-#ifdef DRM_ODD_MM_COMPAT
- if (be->needs_ub_cache_adjust(be))
- drm_ttm_unlock_mm(ttm);
-#endif
DRM_ERROR("Couldn't bind backend.\n");
return ret;
}
@@ -391,130 +338,7 @@ int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset)
ttm->aper_offset = aper_offset;
ttm->state = ttm_bound;
-#ifdef DRM_ODD_MM_COMPAT
- if (be->needs_ub_cache_adjust(be)) {
- ret = drm_ttm_remap_bound(ttm);
- if (ret)
- return ret;
- }
-#endif
-
return 0;
}
-/*
- * dev->struct_mutex locked.
- */
-static void drm_ttm_object_remove(drm_device_t * dev, drm_ttm_object_t * object)
-{
- drm_map_list_t *list = &object->map_list;
- drm_local_map_t *map;
-
- if (list->user_token)
- drm_ht_remove_item(&dev->map_hash, &list->hash);
-
- if (list->file_offset_node) {
- drm_mm_put_block(list->file_offset_node);
- list->file_offset_node = NULL;
- }
-
- map = list->map;
-
- if (map) {
- drm_ttm_t *ttm = (drm_ttm_t *) map->offset;
- if (ttm) {
- if (drm_destroy_ttm(ttm) != -EBUSY) {
- drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
- }
- } else {
- drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
- }
- }
-
- drm_ctl_free(object, sizeof(*object), DRM_MEM_TTM);
-}
-
-void drm_ttm_object_deref_locked(drm_device_t * dev, drm_ttm_object_t * to)
-{
- if (atomic_dec_and_test(&to->usage)) {
- drm_ttm_object_remove(dev, to);
- }
-}
-
-void drm_ttm_object_deref_unlocked(drm_device_t * dev, drm_ttm_object_t * to)
-{
- if (atomic_dec_and_test(&to->usage)) {
- mutex_lock(&dev->struct_mutex);
- if (atomic_read(&to->usage) == 0)
- drm_ttm_object_remove(dev, to);
- mutex_unlock(&dev->struct_mutex);
- }
-}
-
-/*
- * Create a ttm and add it to the drm book-keeping.
- * dev->struct_mutex locked.
- */
-
-int drm_ttm_object_create(drm_device_t * dev, unsigned long size,
- uint32_t flags, drm_ttm_object_t ** ttm_object)
-{
- drm_ttm_object_t *object;
- drm_map_list_t *list;
- drm_local_map_t *map;
- drm_ttm_t *ttm;
-
- object = drm_ctl_calloc(1, sizeof(*object), DRM_MEM_TTM);
- if (!object)
- return -ENOMEM;
- object->flags = flags;
- list = &object->map_list;
-
- list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_TTM);
- if (!list->map) {
- drm_ttm_object_remove(dev, object);
- return -ENOMEM;
- }
- map = list->map;
-
- ttm = drm_init_ttm(dev, size);
- if (!ttm) {
- DRM_ERROR("Could not create ttm\n");
- drm_ttm_object_remove(dev, object);
- return -ENOMEM;
- }
-
- map->offset = (unsigned long)ttm;
- map->type = _DRM_TTM;
- map->flags = _DRM_REMOVABLE;
- map->size = ttm->num_pages * PAGE_SIZE;
- map->handle = (void *)object;
-
- /*
- * Add a one-page "hole" to the block size to avoid the mm subsystem
- * merging vmas.
- * FIXME: Is this really needed?
- */
-
- list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
- ttm->num_pages + 1, 0, 0);
- if (!list->file_offset_node) {
- drm_ttm_object_remove(dev, object);
- return -ENOMEM;
- }
- list->file_offset_node = drm_mm_get_block(list->file_offset_node,
- ttm->num_pages + 1, 0);
-
- list->hash.key = list->file_offset_node->start;
-
- if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
- drm_ttm_object_remove(dev, object);
- return -ENOMEM;
- }
-
- list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT;
- ttm->mapping_offset = list->hash.key;
- atomic_set(&object->usage, 1);
- *ttm_object = object;
- return 0;
-}
+EXPORT_SYMBOL(drm_bind_ttm);
diff --git a/linux-core/drm_ttm.h b/linux-core/drm_ttm.h
deleted file mode 100644
index 796f2317..00000000
--- a/linux-core/drm_ttm.h
+++ /dev/null
@@ -1,146 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#ifndef _DRM_TTM_H
-#define _DRM_TTM_H
-#define DRM_HAS_TTM
-
-/*
- * The backend GART interface. (In our case AGP). Any similar type of device (PCIE?)
- * needs only to implement these functions to be usable with the "TTM" interface.
- * The AGP backend implementation lives in drm_agpsupport.c
- * basically maps these calls to available functions in agpgart. Each drm device driver gets an
- * additional function pointer that creates these types,
- * so that the device can choose the correct aperture.
- * (Multiple AGP apertures, etc.)
- * Most device drivers will let this point to the standard AGP implementation.
- */
-
-#define DRM_BE_FLAG_NEEDS_FREE 0x00000001
-#define DRM_BE_FLAG_BOUND_CACHED 0x00000002
-#define DRM_BE_FLAG_CBA 0x00000004
-
-typedef struct drm_ttm_backend {
- unsigned long aperture_base;
- void *private;
- uint32_t flags;
- uint32_t drm_map_type;
- int (*needs_ub_cache_adjust) (struct drm_ttm_backend * backend);
- int (*populate) (struct drm_ttm_backend * backend,
- unsigned long num_pages, struct page ** pages);
- void (*clear) (struct drm_ttm_backend * backend);
- int (*bind) (struct drm_ttm_backend * backend,
- unsigned long offset, int cached);
- int (*unbind) (struct drm_ttm_backend * backend);
- void (*destroy) (struct drm_ttm_backend * backend);
-} drm_ttm_backend_t;
-
-typedef struct drm_ttm {
- struct page **pages;
- uint32_t page_flags;
- unsigned long num_pages;
- unsigned long aper_offset;
- atomic_t vma_count;
- struct drm_device *dev;
- int destroy;
- uint32_t mapping_offset;
- drm_ttm_backend_t *be;
- enum {
- ttm_bound,
- ttm_evicted,
- ttm_unbound,
- ttm_unpopulated,
- } state;
-#ifdef DRM_ODD_MM_COMPAT
- struct list_head vma_list;
- struct list_head p_mm_list;
-#endif
-
-} drm_ttm_t;
-
-typedef struct drm_ttm_object {
- atomic_t usage;
- uint32_t flags;
- drm_map_list_t map_list;
-} drm_ttm_object_t;
-
-extern int drm_ttm_object_create(struct drm_device *dev, unsigned long size,
- uint32_t flags,
- drm_ttm_object_t ** ttm_object);
-extern void drm_ttm_object_deref_locked(struct drm_device *dev,
- drm_ttm_object_t * to);
-extern void drm_ttm_object_deref_unlocked(struct drm_device *dev,
- drm_ttm_object_t * to);
-extern drm_ttm_object_t *drm_lookup_ttm_object(drm_file_t * priv,
- uint32_t handle,
- int check_owner);
-extern int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset);
-
-extern int drm_unbind_ttm(drm_ttm_t * ttm);
-
-/*
- * Evict a ttm region. Keeps Aperture caching policy.
- */
-
-extern int drm_evict_ttm(drm_ttm_t * ttm);
-extern void drm_fixup_ttm_caching(drm_ttm_t * ttm);
-
-/*
- * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this,
- * which calls this function iff there are no vmas referencing it anymore. Otherwise it is called
- * when the last vma exits.
- */
-
-extern int drm_destroy_ttm(drm_ttm_t * ttm);
-extern int drm_ttm_ioctl(DRM_IOCTL_ARGS);
-
-static __inline__ drm_ttm_t *drm_ttm_from_object(drm_ttm_object_t * to)
-{
- return (drm_ttm_t *) to->map_list.map->offset;
-}
-
-#define DRM_MASK_VAL(dest, mask, val) \
- (dest) = ((dest) & ~(mask)) | ((val) & (mask));
-
-#define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1)
-#define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS)
-
-/*
- * Page flags.
- */
-
-#define DRM_TTM_PAGE_UNCACHED 0x01
-#define DRM_TTM_PAGE_USED 0x02
-#define DRM_TTM_PAGE_BOUND 0x04
-#define DRM_TTM_PAGE_PRESENT 0x08
-#define DRM_TTM_PAGE_VMALLOC 0x10
-
-#endif
diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c
index 827a7bdb..f3b1088f 100644
--- a/linux-core/drm_vm.c
+++ b/linux-core/drm_vm.c
@@ -41,9 +41,9 @@
static void drm_vm_open(struct vm_area_struct *vma);
static void drm_vm_close(struct vm_area_struct *vma);
-static void drm_vm_ttm_close(struct vm_area_struct *vma);
-static int drm_vm_ttm_open(struct vm_area_struct *vma);
-static void drm_vm_ttm_open_wrapper(struct vm_area_struct *vma);
+static int drm_bo_mmap_locked(struct vm_area_struct *vma,
+ struct file *filp,
+ drm_local_map_t *map);
pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
@@ -159,96 +159,6 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
}
#endif /* __OS_HAS_AGP */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) || \
- LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21))
-static
-#endif
-struct page *drm_vm_ttm_fault(struct vm_area_struct *vma,
- struct fault_data *data)
-{
- unsigned long address = data->address;
- drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
- unsigned long page_offset;
- struct page *page;
- drm_ttm_t *ttm;
- drm_buffer_manager_t *bm;
- drm_device_t *dev;
- unsigned long pfn;
- int err;
- pgprot_t pgprot;
-
- if (!map) {
- data->type = VM_FAULT_OOM;
- return NULL;
- }
-
- if (address > vma->vm_end) {
- data->type = VM_FAULT_SIGBUS;
- return NULL;
- }
-
- ttm = (drm_ttm_t *) map->offset;
-
- dev = ttm->dev;
-
- /*
- * Perhaps retry here?
- */
-
- mutex_lock(&dev->struct_mutex);
- drm_fixup_ttm_caching(ttm);
-
- bm = &dev->bm;
- page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
- page = ttm->pages[page_offset];
-
- if (!page) {
- if (drm_alloc_memctl(PAGE_SIZE)) {
- data->type = VM_FAULT_OOM;
- goto out;
- }
- page = ttm->pages[page_offset] =
- alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
- if (!page) {
- drm_free_memctl(PAGE_SIZE);
- data->type = VM_FAULT_OOM;
- goto out;
- }
- ++bm->cur_pages;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
- SetPageLocked(page);
-#else
- SetPageReserved(page);
-#endif
- }
-
- if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) {
-
- /*
- * FIXME: Check can't map aperture flag.
- */
-
- pfn = ttm->aper_offset + page_offset +
- (ttm->be->aperture_base >> PAGE_SHIFT);
- pgprot = drm_io_prot(ttm->be->drm_map_type, vma);
- } else {
- pfn = page_to_pfn(page);
- pgprot = vma->vm_page_prot;
- }
-
- err = vm_insert_pfn(vma, address, pfn, pgprot);
-
- if (!err || err == -EBUSY)
- data->type = VM_FAULT_MINOR;
- else
- data->type = VM_FAULT_OOM;
- out:
- mutex_unlock(&dev->struct_mutex);
- return NULL;
-}
-#endif
-
/**
* \c nopage method for shared virtual memory.
*
@@ -508,20 +418,6 @@ static struct vm_operations_struct drm_vm_sg_ops = {
.close = drm_vm_close,
};
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21))
-static struct vm_operations_struct drm_vm_ttm_ops = {
- .nopage = drm_vm_ttm_nopage,
- .open = drm_vm_ttm_open_wrapper,
- .close = drm_vm_ttm_close,
-};
-#else
-static struct vm_operations_struct drm_vm_ttm_ops = {
- .fault = drm_vm_ttm_fault,
- .open = drm_vm_ttm_open_wrapper,
- .close = drm_vm_ttm_close,
-};
-#endif
-
/**
* \c open method for shared virtual memory.
*
@@ -530,7 +426,7 @@ static struct vm_operations_struct drm_vm_ttm_ops = {
* Create a new drm_vma_entry structure as the \p vma private data entry and
* add it to drm_device::vmalist.
*/
-static void drm_vm_open(struct vm_area_struct *vma)
+static void drm_vm_open_locked(struct vm_area_struct *vma)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->head->dev;
@@ -542,36 +438,21 @@ static void drm_vm_open(struct vm_area_struct *vma)
vma_entry = drm_ctl_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
if (vma_entry) {
- mutex_lock(&dev->struct_mutex);
vma_entry->vma = vma;
vma_entry->next = dev->vmalist;
vma_entry->pid = current->pid;
dev->vmalist = vma_entry;
- mutex_unlock(&dev->struct_mutex);
}
}
-static int drm_vm_ttm_open(struct vm_area_struct *vma) {
-
- drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data;
- drm_ttm_t *ttm;
+static void drm_vm_open(struct vm_area_struct *vma)
+{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->head->dev;
- drm_vm_open(vma);
mutex_lock(&dev->struct_mutex);
- ttm = (drm_ttm_t *) map->offset;
- atomic_inc(&ttm->vma_count);
-#ifdef DRM_ODD_MM_COMPAT
- drm_ttm_add_vma(ttm, vma);
-#endif
+ drm_vm_open_locked(vma);
mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
-static void drm_vm_ttm_open_wrapper(struct vm_area_struct *vma)
-{
- drm_vm_ttm_open(vma);
}
/**
@@ -608,34 +489,6 @@ static void drm_vm_close(struct vm_area_struct *vma)
}
-static void drm_vm_ttm_close(struct vm_area_struct *vma)
-{
- drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
- drm_ttm_t *ttm;
- drm_device_t *dev;
- int ret;
-
- drm_vm_close(vma);
- if (map) {
- ttm = (drm_ttm_t *) map->offset;
- dev = ttm->dev;
- mutex_lock(&dev->struct_mutex);
-#ifdef DRM_ODD_MM_COMPAT
- drm_ttm_delete_vma(ttm, vma);
-#endif
- if (atomic_dec_and_test(&ttm->vma_count)) {
- if (ttm->destroy) {
- ret = drm_destroy_ttm(ttm);
- BUG_ON(ret);
- drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
- }
- }
- mutex_unlock(&dev->struct_mutex);
- }
- return;
-}
-
-
/**
* mmap DMA memory.
*
@@ -653,7 +506,6 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
drm_device_dma_t *dma;
unsigned long length = vma->vm_end - vma->vm_start;
- lock_kernel();
dev = priv->head->dev;
dma = dev->dma;
DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
@@ -661,10 +513,8 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
/* Length must match exact page count */
if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
- unlock_kernel();
return -EINVAL;
}
- unlock_kernel();
if (!capable(CAP_SYS_ADMIN) &&
(dma->flags & _DRM_DMA_USE_PCI_RO)) {
@@ -686,7 +536,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags |= VM_RESERVED; /* Don't swap */
vma->vm_file = filp; /* Needed for drm_vm_open() */
- drm_vm_open(vma);
+ drm_vm_open_locked(vma);
return 0;
}
@@ -719,7 +569,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs);
* according to the mapping type and remaps the pages. Finally sets the file
* pointer and calls vm_open().
*/
-int drm_mmap(struct file *filp, struct vm_area_struct *vma)
+static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
@@ -737,6 +587,7 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
* the AGP mapped at physical address 0
* --BenH.
*/
+
if (!vma->vm_pgoff
#if __OS_HAS_AGP
&& (!dev->agp
@@ -833,27 +684,254 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_private_data = (void *)map;
vma->vm_flags |= VM_RESERVED;
break;
- case _DRM_TTM: {
- vma->vm_ops = &drm_vm_ttm_ops;
- vma->vm_private_data = (void *) map;
- vma->vm_file = filp;
- vma->vm_flags |= VM_RESERVED | VM_IO;
-#ifdef DRM_ODD_MM_COMPAT
- mutex_lock(&dev->struct_mutex);
- drm_ttm_map_bound(vma);
- mutex_unlock(&dev->struct_mutex);
-#endif
- if (drm_vm_ttm_open(vma))
- return -EAGAIN;
- return 0;
- }
+ case _DRM_TTM:
+ return drm_bo_mmap_locked(vma, filp, map);
default:
return -EINVAL; /* This should never happen. */
}
vma->vm_flags |= VM_RESERVED; /* Don't swap */
vma->vm_file = filp; /* Needed for drm_vm_open() */
- drm_vm_open(vma);
+ drm_vm_open_locked(vma);
return 0;
}
+
+int drm_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ drm_file_t *priv = filp->private_data;
+ drm_device_t *dev = priv->head->dev;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = drm_mmap_locked(filp, vma);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
EXPORT_SYMBOL(drm_mmap);
+
+/**
+ * buffer object vm functions.
+ */
+
+/**
+ * \c Pagefault method for buffer objects.
+ *
+ * \param vma Virtual memory area.
+ * \param data Fault data on failure or refault.
+ * \return Always NULL as we insert pfns directly.
+ *
+ * It's important that pfns are inserted while holding the bo->mutex lock.
+ * otherwise we might race with unmap_mapping_range() which is always
+ * called with the bo->mutex lock held.
+ *
+ * It's not pretty to modify the vma->vm_page_prot variable while not
+ * holding the mm semaphore in write mode. However, we have it i read mode,
+ * so we won't be racing with any other writers, and we only actually modify
+ * it when no ptes are present so it shouldn't be a big deal.
+ */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19) || \
+ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+#ifdef DRM_FULL_MM_COMPAT
+static
+#endif
+struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
+ struct fault_data *data)
+{
+ unsigned long address = data->address;
+ drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
+ unsigned long page_offset;
+ struct page *page = NULL;
+ drm_ttm_t *ttm;
+ drm_device_t *dev;
+ unsigned long pfn;
+ int err;
+ unsigned long bus_base;
+ unsigned long bus_offset;
+ unsigned long bus_size;
+
+
+ mutex_lock(&bo->mutex);
+
+ err = drm_bo_wait(bo, 0, 0, 0);
+ if (err) {
+ data->type = (err == -EAGAIN) ?
+ VM_FAULT_MINOR : VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+
+
+ /*
+ * If buffer happens to be in a non-mappable location,
+ * move it to a mappable.
+ */
+
+#ifdef DRM_BO_FULL_COMPAT
+ if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
+ uint32_t new_mask = bo->mem.mask |
+ DRM_BO_FLAG_MAPPABLE |
+ DRM_BO_FLAG_FORCE_MAPPABLE;
+ err = drm_bo_move_buffer(bo, new_mask, 0, 0);
+
+ if (err) {
+ data->type = (err == -EAGAIN) ?
+ VM_FAULT_MINOR : VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+ }
+#else
+ if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
+ unsigned long _end = jiffies + 3*DRM_HZ;
+ uint32_t new_mask = bo->mem.mask |
+ DRM_BO_FLAG_MAPPABLE |
+ DRM_BO_FLAG_FORCE_MAPPABLE;
+
+ do {
+ err = drm_bo_move_buffer(bo, new_mask, 0, 0);
+ } while((err == -EAGAIN) && !time_after_eq(jiffies, _end));
+
+ if (err) {
+ DRM_ERROR("Timeout moving buffer to mappable location.\n");
+ data->type = VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+ }
+#endif
+
+ if (address > vma->vm_end) {
+ data->type = VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+
+ dev = bo->dev;
+ err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
+ &bus_size);
+
+ if (err) {
+ data->type = VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+
+ page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
+
+ if (bus_size) {
+ drm_mem_type_manager_t *man = &dev->bm.man[bo->mem.mem_type];
+
+ pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
+ vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
+ } else {
+ ttm = bo->ttm;
+
+ drm_ttm_fixup_caching(ttm);
+ page = drm_ttm_get_page(ttm, page_offset);
+ if (!page) {
+ data->type = VM_FAULT_OOM;
+ goto out_unlock;
+ }
+ pfn = page_to_pfn(page);
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ }
+
+ err = vm_insert_pfn(vma, address, pfn);
+
+ if (!err || err == -EBUSY)
+ data->type = VM_FAULT_MINOR;
+ else
+ data->type = VM_FAULT_OOM;
+out_unlock:
+ mutex_unlock(&bo->mutex);
+ return NULL;
+}
+#endif
+
+static void drm_bo_vm_open_locked(struct vm_area_struct *vma)
+{
+ drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
+
+ drm_vm_open_locked(vma);
+ atomic_inc(&bo->usage);
+#ifdef DRM_ODD_MM_COMPAT
+ drm_bo_add_vma(bo, vma);
+#endif
+}
+
+/**
+ * \c vma open method for buffer objects.
+ *
+ * \param vma virtual memory area.
+ */
+
+static void drm_bo_vm_open(struct vm_area_struct *vma)
+{
+ drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
+ drm_device_t *dev = bo->dev;
+
+ mutex_lock(&dev->struct_mutex);
+ drm_bo_vm_open_locked(vma);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * \c vma close method for buffer objects.
+ *
+ * \param vma virtual memory area.
+ */
+
+static void drm_bo_vm_close(struct vm_area_struct *vma)
+{
+ drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
+ drm_device_t *dev = bo->dev;
+
+ drm_vm_close(vma);
+ if (bo) {
+ mutex_lock(&dev->struct_mutex);
+#ifdef DRM_ODD_MM_COMPAT
+ drm_bo_delete_vma(bo, vma);
+#endif
+ drm_bo_usage_deref_locked(bo);
+ mutex_unlock(&dev->struct_mutex);
+ }
+ return;
+}
+
+static struct vm_operations_struct drm_bo_vm_ops = {
+#ifdef DRM_FULL_MM_COMPAT
+ .fault = drm_bo_vm_fault,
+#else
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
+ .nopfn = drm_bo_vm_nopfn,
+#else
+ .nopage = drm_bo_vm_nopage,
+#endif
+#endif
+ .open = drm_bo_vm_open,
+ .close = drm_bo_vm_close,
+};
+
+/**
+ * mmap buffer object memory.
+ *
+ * \param vma virtual memory area.
+ * \param filp file pointer.
+ * \param map The buffer object drm map.
+ * \return zero on success or a negative number on failure.
+ */
+
+int drm_bo_mmap_locked(struct vm_area_struct *vma,
+ struct file *filp,
+ drm_local_map_t *map)
+{
+ vma->vm_ops = &drm_bo_vm_ops;
+ vma->vm_private_data = map->handle;
+ vma->vm_file = filp;
+ vma->vm_flags |= VM_RESERVED | VM_IO;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
+ vma->vm_flags |= VM_PFNMAP;
+#endif
+ drm_bo_vm_open_locked(vma);
+#ifdef DRM_ODD_MM_COMPAT
+ drm_bo_map_bound(vma);
+#endif
+ return 0;
+}
diff --git a/linux-core/i810_dma.c b/linux-core/i810_dma.c
index ad4d2fce..41467001 100644
--- a/linux-core/i810_dma.c
+++ b/linux-core/i810_dma.c
@@ -125,8 +125,8 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
unlock_kernel();
if (io_remap_pfn_range(vma, vma->vm_start,
- VM_OFFSET(vma) >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start, vma->vm_page_prot))
+ vma->vm_pgoff,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
return 0;
}
diff --git a/linux-core/i830_dma.c b/linux-core/i830_dma.c
index e93307fb..406a3ff7 100644
--- a/linux-core/i830_dma.c
+++ b/linux-core/i830_dma.c
@@ -110,7 +110,7 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
unlock_kernel();
if (remap_pfn_range(vma, vma->vm_start,
- VM_OFFSET(vma) >> PAGE_SHIFT,
+ vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
return -EAGAIN;
diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c
index c3e54468..8797de89 100644
--- a/linux-core/i915_buffer.c
+++ b/linux-core/i915_buffer.c
@@ -33,16 +33,15 @@
#include "i915_drm.h"
#include "i915_drv.h"
-
drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t * dev)
{
return drm_agp_init_ttm(dev, NULL);
}
-int i915_fence_types(uint32_t buffer_flags, uint32_t * class, uint32_t * type)
+int i915_fence_types(drm_buffer_object_t *bo, uint32_t * class, uint32_t * type)
{
*class = 0;
- if (buffer_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
+ if (bo->mem.flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
*type = 3;
else
*type = 1;
@@ -64,3 +63,173 @@ int i915_invalidate_caches(drm_device_t * dev, uint32_t flags)
return i915_emit_mi_flush(dev, flush_cmd);
}
+
+int i915_init_mem_type(drm_device_t * dev, uint32_t type,
+ drm_mem_type_manager_t * man)
+{
+ switch (type) {
+ case DRM_BO_MEM_LOCAL:
+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
+ _DRM_FLAG_MEMTYPE_CACHED;
+ man->drm_bus_maptype = 0;
+ break;
+ case DRM_BO_MEM_TT:
+ if (!(drm_core_has_AGP(dev) && dev->agp)) {
+ DRM_ERROR("AGP is not enabled for memory type %u\n",
+ (unsigned)type);
+ return -EINVAL;
+ }
+ man->io_offset = dev->agp->agp_info.aper_base;
+ man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
+ man->io_addr = NULL;
+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
+ man->drm_bus_maptype = _DRM_AGP;
+ break;
+ case DRM_BO_MEM_PRIV0:
+ if (!(drm_core_has_AGP(dev) && dev->agp)) {
+ DRM_ERROR("AGP is not enabled for memory type %u\n",
+ (unsigned)type);
+ return -EINVAL;
+ }
+ man->io_offset = dev->agp->agp_info.aper_base;
+ man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
+ man->io_addr = NULL;
+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
+ _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP;
+ man->drm_bus_maptype = _DRM_AGP;
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+uint32_t i915_evict_mask(drm_buffer_object_t *bo)
+{
+ switch (bo->mem.mem_type) {
+ case DRM_BO_MEM_LOCAL:
+ case DRM_BO_MEM_TT:
+ return DRM_BO_FLAG_MEM_LOCAL;
+ default:
+ return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED;
+ }
+}
+
+static void i915_emit_copy_blit(drm_device_t * dev,
+ uint32_t src_offset,
+ uint32_t dst_offset,
+ uint32_t pages, int direction)
+{
+ uint32_t cur_pages;
+ uint32_t stride = PAGE_SIZE;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ RING_LOCALS;
+
+ if (!dev_priv)
+ return;
+
+ i915_kernel_lost_context(dev);
+ while (pages > 0) {
+ cur_pages = pages;
+ if (cur_pages > 2048)
+ cur_pages = 2048;
+ pages -= cur_pages;
+
+ BEGIN_LP_RING(6);
+ OUT_RING(SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
+ XY_SRC_COPY_BLT_WRITE_RGB);
+ OUT_RING((stride & 0xffff) | (0xcc << 16) | (1 << 24) |
+ (1 << 25) | (direction ? (1 << 30) : 0));
+ OUT_RING((cur_pages << 16) | PAGE_SIZE);
+ OUT_RING(dst_offset);
+ OUT_RING(stride & 0xffff);
+ OUT_RING(src_offset);
+ ADVANCE_LP_RING();
+ }
+ return;
+}
+
+static int i915_move_blit(drm_buffer_object_t * bo,
+ int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
+{
+ drm_bo_mem_reg_t *old_mem = &bo->mem;
+ int dir = 0;
+
+ if ((old_mem->mem_type == new_mem->mem_type) &&
+ (new_mem->mm_node->start <
+ old_mem->mm_node->start + old_mem->mm_node->size)) {
+ dir = 1;
+ }
+
+ i915_emit_copy_blit(bo->dev,
+ old_mem->mm_node->start << PAGE_SHIFT,
+ new_mem->mm_node->start << PAGE_SHIFT,
+ new_mem->num_pages, dir);
+
+ i915_emit_mi_flush(bo->dev, MI_READ_FLUSH | MI_EXE_FLUSH);
+
+ return drm_bo_move_accel_cleanup(bo, evict, no_wait, 0,
+ DRM_FENCE_TYPE_EXE |
+ DRM_I915_FENCE_TYPE_RW,
+ DRM_I915_FENCE_FLAG_FLUSHED, new_mem);
+}
+
+/*
+ * Flip destination ttm into cached-coherent AGP,
+ * then blit and subsequently move out again.
+ */
+
+static int i915_move_flip(drm_buffer_object_t * bo,
+ int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
+{
+ drm_device_t *dev = bo->dev;
+ drm_bo_mem_reg_t tmp_mem;
+ int ret;
+
+ tmp_mem = *new_mem;
+ tmp_mem.mm_node = NULL;
+ tmp_mem.mask = DRM_BO_FLAG_MEM_TT |
+ DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING;
+
+ ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
+ if (ret)
+ return ret;
+
+ ret = drm_bind_ttm(bo->ttm, 1, tmp_mem.mm_node->start);
+ if (ret)
+ goto out_cleanup;
+
+ ret = i915_move_blit(bo, 1, no_wait, &tmp_mem);
+ if (ret)
+ goto out_cleanup;
+
+ ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem);
+out_cleanup:
+ if (tmp_mem.mm_node) {
+ mutex_lock(&dev->struct_mutex);
+ if (tmp_mem.mm_node != bo->pinned_node)
+ drm_mm_put_block(tmp_mem.mm_node);
+ tmp_mem.mm_node = NULL;
+ mutex_unlock(&dev->struct_mutex);
+ }
+ return ret;
+}
+
+int i915_move(drm_buffer_object_t * bo,
+ int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
+{
+ drm_bo_mem_reg_t *old_mem = &bo->mem;
+
+ if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
+ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ } else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
+ if (i915_move_flip(bo, evict, no_wait, new_mem))
+ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ } else {
+ if (i915_move_blit(bo, evict, no_wait, new_mem))
+ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ }
+ return 0;
+}
diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c
index 2c5b43d0..56e5998f 100644
--- a/linux-core/i915_drv.c
+++ b/linux-core/i915_drv.c
@@ -40,22 +40,32 @@ static struct pci_device_id pciidlist[] = {
#ifdef I915_HAVE_FENCE
static drm_fence_driver_t i915_fence_driver = {
- .no_types = 2,
+ .num_classes = 1,
.wrap_diff = (1 << 30),
.flush_diff = (1 << 29),
.sequence_mask = 0xffffffffU,
.lazy_capable = 1,
.emit = i915_fence_emit_sequence,
.poke_flush = i915_poke_flush,
+ .has_irq = i915_fence_has_irq,
};
#endif
#ifdef I915_HAVE_BUFFER
+
+static uint32_t i915_mem_prios[] = {DRM_BO_MEM_PRIV0, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL};
+static uint32_t i915_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_PRIV0, DRM_BO_MEM_LOCAL};
+
static drm_bo_driver_t i915_bo_driver = {
- .iomap = {NULL, NULL},
- .cached = {1, 1},
+ .mem_type_prio = i915_mem_prios,
+ .mem_busy_prio = i915_busy_prios,
+ .num_mem_type_prio = sizeof(i915_mem_prios)/sizeof(uint32_t),
+ .num_mem_busy_prio = sizeof(i915_busy_prios)/sizeof(uint32_t),
.create_ttm_backend_entry = i915_create_ttm_backend_entry,
.fence_type = i915_fence_types,
- .invalidate_caches = i915_invalidate_caches
+ .invalidate_caches = i915_invalidate_caches,
+ .init_mem_type = i915_init_mem_type,
+ .evict_mask = i915_evict_mask,
+ .move = i915_move,
};
#endif
diff --git a/linux-core/i915_fence.c b/linux-core/i915_fence.c
index 2182604c..88daa57c 100644
--- a/linux-core/i915_fence.c
+++ b/linux-core/i915_fence.c
@@ -42,36 +42,34 @@ static void i915_perform_flush(drm_device_t * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_class_manager_t *fc = &fm->class[0];
drm_fence_driver_t *driver = dev->driver->fence_driver;
uint32_t flush_flags = 0;
uint32_t flush_sequence = 0;
uint32_t i_status;
uint32_t diff;
uint32_t sequence;
+ int rwflush;
if (!dev_priv)
return;
- if (fm->pending_exe_flush) {
+ if (fc->pending_exe_flush) {
sequence = READ_BREADCRUMB(dev_priv);
/*
* First update fences with the current breadcrumb.
*/
- diff = sequence - fm->last_exe_flush;
+ diff = sequence - fc->last_exe_flush;
if (diff < driver->wrap_diff && diff != 0) {
- drm_fence_handler(dev, sequence, DRM_FENCE_TYPE_EXE);
+ drm_fence_handler(dev, 0, sequence, DRM_FENCE_TYPE_EXE);
}
- diff = sequence - fm->exe_flush_sequence;
- if (diff < driver->wrap_diff) {
- fm->pending_exe_flush = 0;
- if (dev_priv->fence_irq_on) {
- i915_user_irq_off(dev_priv);
- dev_priv->fence_irq_on = 0;
- }
- } else if (!dev_priv->fence_irq_on) {
+ if (dev_priv->fence_irq_on && !fc->pending_exe_flush) {
+ i915_user_irq_off(dev_priv);
+ dev_priv->fence_irq_on = 0;
+ } else if (!dev_priv->fence_irq_on && fc->pending_exe_flush) {
i915_user_irq_on(dev_priv);
dev_priv->fence_irq_on = 1;
}
@@ -84,17 +82,18 @@ static void i915_perform_flush(drm_device_t * dev)
flush_flags = dev_priv->flush_flags;
flush_sequence = dev_priv->flush_sequence;
dev_priv->flush_pending = 0;
- drm_fence_handler(dev, flush_sequence, flush_flags);
+ drm_fence_handler(dev, 0, flush_sequence, flush_flags);
}
}
- if (fm->pending_flush && !dev_priv->flush_pending) {
+ rwflush = fc->pending_flush & DRM_I915_FENCE_TYPE_RW;
+ if (rwflush && !dev_priv->flush_pending) {
dev_priv->flush_sequence = (uint32_t) READ_BREADCRUMB(dev_priv);
- dev_priv->flush_flags = fm->pending_flush;
+ dev_priv->flush_flags = fc->pending_flush;
dev_priv->saved_flush_status = READ_HWSP(dev_priv, 0);
I915_WRITE(I915REG_INSTPM, (1 << 5) | (1 << 21));
dev_priv->flush_pending = 1;
- fm->pending_flush = 0;
+ fc->pending_flush &= ~DRM_I915_FENCE_TYPE_RW;
}
if (dev_priv->flush_pending) {
@@ -104,13 +103,13 @@ static void i915_perform_flush(drm_device_t * dev)
flush_flags = dev_priv->flush_flags;
flush_sequence = dev_priv->flush_sequence;
dev_priv->flush_pending = 0;
- drm_fence_handler(dev, flush_sequence, flush_flags);
+ drm_fence_handler(dev, 0, flush_sequence, flush_flags);
}
}
}
-void i915_poke_flush(drm_device_t * dev)
+void i915_poke_flush(drm_device_t * dev, uint32_t class)
{
drm_fence_manager_t *fm = &dev->fm;
unsigned long flags;
@@ -120,7 +119,7 @@ void i915_poke_flush(drm_device_t * dev)
write_unlock_irqrestore(&fm->lock, flags);
}
-int i915_fence_emit_sequence(drm_device_t * dev, uint32_t flags,
+int i915_fence_emit_sequence(drm_device_t * dev, uint32_t class, uint32_t flags,
uint32_t * sequence, uint32_t * native_type)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -144,3 +143,15 @@ void i915_fence_handler(drm_device_t * dev)
i915_perform_flush(dev);
write_unlock(&fm->lock);
}
+
+int i915_fence_has_irq(drm_device_t *dev, uint32_t class, uint32_t flags)
+{
+ /*
+ * We have an irq that tells us when we have a new breadcrumb.
+ */
+
+ if (class == 0 && flags == DRM_FENCE_TYPE_EXE)
+ return 1;
+
+ return 0;
+}
diff --git a/linux-core/sis_drv.c b/linux-core/sis_drv.c
index 9b0b9830..114ec8f9 100644
--- a/linux-core/sis_drv.c
+++ b/linux-core/sis_drv.c
@@ -74,7 +74,7 @@ static struct drm_driver driver = {
.context_dtor = NULL,
.dma_quiescent = sis_idle,
.reclaim_buffers = NULL,
- .reclaim_buffers_locked = sis_reclaim_buffers_locked,
+ .reclaim_buffers_idlelocked = sis_reclaim_buffers_locked,
.lastclose = sis_lastclose,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
diff --git a/linux-core/via_buffer.c b/linux-core/via_buffer.c
new file mode 100644
index 00000000..f156ee6d
--- /dev/null
+++ b/linux-core/via_buffer.c
@@ -0,0 +1,163 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA,
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+#include "via_drm.h"
+#include "via_drv.h"
+
+drm_ttm_backend_t *via_create_ttm_backend_entry(drm_device_t * dev)
+{
+ return drm_agp_init_ttm(dev, NULL);
+}
+
+int via_fence_types(drm_buffer_object_t *bo, uint32_t * class, uint32_t * type)
+{
+ *class = 0;
+ *type = 3;
+ return 0;
+}
+
+int via_invalidate_caches(drm_device_t * dev, uint32_t flags)
+{
+ /*
+ * FIXME: Invalidate texture caches here.
+ */
+
+ return 0;
+}
+
+
+static int via_vram_info(drm_device_t *dev,
+ unsigned long *offset,
+ unsigned long *size)
+{
+ struct pci_dev *pdev = dev->pdev;
+ unsigned long flags;
+
+ int ret = DRM_ERR(EINVAL);
+ int i;
+ for (i=0; i<6; ++i) {
+ flags = pci_resource_flags(pdev, i);
+ if ((flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH)) ==
+ (IORESOURCE_MEM | IORESOURCE_PREFETCH)) {
+ ret = 0;
+ break;
+ }
+ }
+
+ if (ret) {
+ DRM_ERROR("Could not find VRAM PCI resource\n");
+ return ret;
+ }
+
+ *offset = pci_resource_start(pdev, i);
+ *size = pci_resource_end(pdev, i) - *offset + 1;
+ return 0;
+}
+
+int via_init_mem_type(drm_device_t * dev, uint32_t type,
+ drm_mem_type_manager_t * man)
+{
+ switch (type) {
+ case DRM_BO_MEM_LOCAL:
+ /* System memory */
+
+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
+ _DRM_FLAG_MEMTYPE_CACHED;
+ man->drm_bus_maptype = 0;
+ break;
+
+ case DRM_BO_MEM_TT:
+ /* Dynamic agpgart memory */
+
+ if (!(drm_core_has_AGP(dev) && dev->agp)) {
+ DRM_ERROR("AGP is not enabled for memory type %u\n",
+ (unsigned)type);
+ return -EINVAL;
+ }
+ man->io_offset = dev->agp->agp_info.aper_base;
+ man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
+ man->io_addr = NULL;
+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_NEEDS_IOREMAP;
+
+ /* Only to get pte protection right. */
+
+ man->drm_bus_maptype = _DRM_AGP;
+ break;
+
+ case DRM_BO_MEM_VRAM:
+ /* "On-card" video ram */
+
+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_NEEDS_IOREMAP;
+ man->drm_bus_maptype = _DRM_FRAME_BUFFER;
+ man->io_addr = NULL;
+ return via_vram_info(dev, &man->io_offset, &man->io_size);
+ break;
+
+ case DRM_BO_MEM_PRIV0:
+ /* Pre-bound agpgart memory */
+
+ if (!(drm_core_has_AGP(dev) && dev->agp)) {
+ DRM_ERROR("AGP is not enabled for memory type %u\n",
+ (unsigned)type);
+ return -EINVAL;
+ }
+ man->io_offset = dev->agp->agp_info.aper_base;
+ man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
+ man->io_addr = NULL;
+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
+ _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP;
+ man->drm_bus_maptype = _DRM_AGP;
+ break;
+
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+uint32_t via_evict_mask(drm_buffer_object_t *bo)
+{
+ switch (bo->mem.mem_type) {
+ case DRM_BO_MEM_LOCAL:
+ case DRM_BO_MEM_TT:
+ return DRM_BO_FLAG_MEM_LOCAL; /* Evict TT to local */
+ case DRM_BO_MEM_PRIV0: /* Evict pre-bound AGP to TT */
+ return DRM_BO_MEM_TT;
+ case DRM_BO_MEM_VRAM:
+ if (bo->mem.num_pages > 128)
+ return DRM_BO_MEM_TT;
+ else
+ return DRM_BO_MEM_LOCAL;
+ default:
+ return DRM_BO_MEM_LOCAL;
+ }
+}
diff --git a/linux-core/via_fence.c b/linux-core/via_fence.c
new file mode 100644
index 00000000..02249939
--- /dev/null
+++ b/linux-core/via_fence.c
@@ -0,0 +1,230 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA,
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+#include "via_drm.h"
+#include "via_drv.h"
+
+/*
+ * DRM_FENCE_TYPE_EXE guarantees that all command buffers can be evicted.
+ * DRM_VIA_FENCE_TYPE_ACCEL guarantees that all 2D & 3D rendering is complete.
+ */
+
+
+static uint32_t via_perform_flush(drm_device_t *dev, uint32_t class)
+{
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+ drm_fence_class_manager_t *fc = &dev->fm.class[class];
+ uint32_t pending_flush_types = 0;
+ uint32_t signaled_flush_types = 0;
+ uint32_t status;
+
+ if (class != 0)
+ return 0;
+
+ if (!dev_priv)
+ return 0;
+
+ spin_lock(&dev_priv->fence_lock);
+
+ pending_flush_types = fc->pending_flush |
+ ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
+
+ if (pending_flush_types) {
+
+ /*
+ * Take the idlelock. This guarantees that the next time a client tries
+ * to grab the lock, it will stall until the idlelock is released. This
+ * guarantees that eventually, the GPU engines will be idle, but nothing
+ * else. It cannot be used to protect the hardware.
+ */
+
+
+ if (!dev_priv->have_idlelock) {
+ drm_idlelock_take(&dev->lock);
+ dev_priv->have_idlelock = 1;
+ }
+
+ /*
+ * Check if AGP command reader is idle.
+ */
+
+ if (pending_flush_types & DRM_FENCE_TYPE_EXE)
+ if (VIA_READ(0x41C) & 0x80000000)
+ signaled_flush_types |= DRM_FENCE_TYPE_EXE;
+
+ /*
+ * Check VRAM command queue empty and 2D + 3D engines idle.
+ */
+
+ if (pending_flush_types & DRM_VIA_FENCE_TYPE_ACCEL) {
+ status = VIA_READ(VIA_REG_STATUS);
+ if ((status & VIA_VR_QUEUE_BUSY) &&
+ !(status & (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY | VIA_3D_ENG_BUSY)))
+ signaled_flush_types |= DRM_VIA_FENCE_TYPE_ACCEL;
+ }
+
+ if (signaled_flush_types) {
+ pending_flush_types &= ~signaled_flush_types;
+ if (!pending_flush_types && dev_priv->have_idlelock) {
+ drm_idlelock_release(&dev->lock);
+ dev_priv->have_idlelock = 0;
+ }
+ drm_fence_handler(dev, 0, dev_priv->emit_0_sequence, signaled_flush_types);
+ }
+ }
+
+ spin_unlock(&dev_priv->fence_lock);
+
+ return fc->pending_flush |
+ ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
+}
+
+
+/**
+ * Emit a fence sequence.
+ */
+
+int via_fence_emit_sequence(drm_device_t * dev, uint32_t class, uint32_t flags,
+ uint32_t * sequence, uint32_t * native_type)
+{
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+ int ret = 0;
+
+ if (!dev_priv)
+ return -EINVAL;
+
+ switch(class) {
+ case 0: /* AGP command stream */
+
+ /*
+ * The sequence number isn't really used by the hardware yet.
+ */
+
+ spin_lock(&dev_priv->fence_lock);
+ *sequence = ++dev_priv->emit_0_sequence;
+ spin_unlock(&dev_priv->fence_lock);
+
+ /*
+ * When drm_fence_handler() is called with flush type 0x01, and a
+ * sequence number, That means that the EXE flag is expired.
+ * Nothing else. No implicit flushing or other engines idle.
+ */
+
+ *native_type = DRM_FENCE_TYPE_EXE;
+ break;
+ default:
+ ret = DRM_ERR(EINVAL);
+ break;
+ }
+ return ret;
+}
+
+/**
+ * Manual poll (from the fence manager).
+ */
+
+void via_poke_flush(drm_device_t * dev, uint32_t class)
+{
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+ drm_fence_manager_t *fm = &dev->fm;
+ unsigned long flags;
+ uint32_t pending_flush;
+
+ if (!dev_priv)
+ return ;
+
+ write_lock_irqsave(&fm->lock, flags);
+ pending_flush = via_perform_flush(dev, class);
+ if (pending_flush)
+ pending_flush = via_perform_flush(dev, class);
+ write_unlock_irqrestore(&fm->lock, flags);
+
+ /*
+ * Kick the timer if there are more flushes pending.
+ */
+
+ if (pending_flush && !timer_pending(&dev_priv->fence_timer)) {
+ dev_priv->fence_timer.expires = jiffies + 1;
+ add_timer(&dev_priv->fence_timer);
+ }
+}
+
+/**
+ * No irq fence expirations implemented yet.
+ * Although both the HQV engines and PCI dmablit engines signal
+ * idle with an IRQ, we haven't implemented this yet.
+ * This means that the drm fence manager will always poll for engine idle,
+ * unless the caller wanting to wait for a fence object has indicated a lazy wait.
+ */
+
+int via_fence_has_irq(struct drm_device * dev, uint32_t class,
+ uint32_t flags)
+{
+ return 0;
+}
+
+/**
+ * Regularly call the flush function. This enables lazy waits, so we can
+ * set lazy_capable. Lazy waits don't really care when the fence expires,
+ * so a timer tick delay should be fine.
+ */
+
+void via_fence_timer(unsigned long data)
+{
+ drm_device_t *dev = (drm_device_t *) data;
+ drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+ drm_fence_manager_t *fm = &dev->fm;
+ uint32_t pending_flush;
+ drm_fence_class_manager_t *fc = &dev->fm.class[0];
+
+ if (!dev_priv)
+ return;
+ if (!fm->initialized)
+ goto out_unlock;
+
+ via_poke_flush(dev, 0);
+ pending_flush = fc->pending_flush |
+ ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
+
+ /*
+ * disable timer if there are no more flushes pending.
+ */
+
+ if (!pending_flush && timer_pending(&dev_priv->fence_timer)) {
+ BUG_ON(dev_priv->have_idlelock);
+ del_timer(&dev_priv->fence_timer);
+ }
+ return;
+out_unlock:
+ return;
+
+}