summaryrefslogtreecommitdiff
path: root/linux-core
diff options
context:
space:
mode:
authorNian Wu <nian.wu@intel.com>2006-11-09 21:21:17 -0800
committerNian Wu <nian.wu@intel.com>2006-11-09 21:21:17 -0800
commit14e3f2711e90fe9a9c315d96abd4c7681539936a (patch)
tree6a562d9035e56671eb7905bac1f3829597f109a9 /linux-core
parente2ea72187e470c2c13adbd3fba4177bd4a0ecc37 (diff)
parentd51e1bb56ca2f7858cdeac6f61a7b747c1e15b1e (diff)
Merge branch 'master' into crestline
Conflicts: shared-core/i915_dma.c
Diffstat (limited to 'linux-core')
-rw-r--r--linux-core/Makefile4
-rw-r--r--linux-core/Makefile.kernel6
-rw-r--r--linux-core/drmP.h527
-rw-r--r--linux-core/drm_agpsupport.c158
-rw-r--r--linux-core/drm_bo.c1998
-rw-r--r--linux-core/drm_bufs.c47
-rw-r--r--linux-core/drm_compat.c434
-rw-r--r--linux-core/drm_compat.h149
-rw-r--r--linux-core/drm_context.c8
-rw-r--r--linux-core/drm_core.h8
-rw-r--r--linux-core/drm_drawable.c56
-rw-r--r--linux-core/drm_drv.c114
-rw-r--r--linux-core/drm_fence.c619
-rw-r--r--linux-core/drm_fops.c155
-rw-r--r--linux-core/drm_hashtab.c49
-rw-r--r--linux-core/drm_hashtab.h1
-rw-r--r--linux-core/drm_ioctl.c22
-rw-r--r--linux-core/drm_irq.c170
-rw-r--r--linux-core/drm_lock.c96
-rw-r--r--linux-core/drm_memory.c70
-rw-r--r--linux-core/drm_memory.h1
-rw-r--r--linux-core/drm_memory_debug.c1
-rw-r--r--linux-core/drm_memory_debug.h1
-rw-r--r--linux-core/drm_mm.c183
-rw-r--r--linux-core/drm_object.c287
-rw-r--r--linux-core/drm_pci.c18
-rw-r--r--linux-core/drm_proc.c103
-rw-r--r--linux-core/drm_scatter.c1
-rw-r--r--linux-core/drm_sman.c5
-rw-r--r--linux-core/drm_stub.c39
-rw-r--r--linux-core/drm_sysfs.c1
-rw-r--r--linux-core/drm_ttm.c519
-rw-r--r--linux-core/drm_ttm.h145
-rw-r--r--linux-core/drm_vm.c275
-rw-r--r--linux-core/ffb_drv.c1
-rw-r--r--linux-core/i810_dma.c10
-rw-r--r--linux-core/i810_drv.c1
-rw-r--r--linux-core/i810_drv.h20
-rw-r--r--linux-core/i830_dma.c4
-rw-r--r--linux-core/i830_drv.c2
-rw-r--r--linux-core/i915_buffer.c66
-rw-r--r--linux-core/i915_drv.c32
-rw-r--r--linux-core/i915_fence.c146
-rw-r--r--linux-core/imagine_drv.c1
-rw-r--r--linux-core/mach64_drv.c1
-rw-r--r--linux-core/mga_drv.c2
-rw-r--r--linux-core/nv_drv.c1
-rw-r--r--linux-core/r128_drv.c1
-rw-r--r--linux-core/radeon_drv.c3
-rw-r--r--linux-core/savage_drv.c1
-rw-r--r--linux-core/sis_drv.c15
-rw-r--r--linux-core/tdfx_drv.c1
-rw-r--r--linux-core/via_dmablit.c25
-rw-r--r--linux-core/via_dmablit.h10
54 files changed, 6195 insertions, 418 deletions
diff --git a/linux-core/Makefile b/linux-core/Makefile
index 32828d28..3aecec43 100644
--- a/linux-core/Makefile
+++ b/linux-core/Makefile
@@ -75,8 +75,8 @@ DRM_MODULES ?= $(MODULE_LIST)
# These definitions are for handling dependencies in the out of kernel build.
-DRMSHARED = drm.h drm_sarea.h
-DRMHEADERS = drmP.h drm_compat.h drm_os_linux.h $(DRMSHARED)
+DRMSHARED = drm.h drm_sarea.h drm_drawable.c
+DRMHEADERS = drmP.h drm_compat.h drm_os_linux.h drm.h drm_sarea.h
COREHEADERS = drm_core.h drm_sman.h drm_hashtab.h
TDFXHEADERS = tdfx_drv.h $(DRMHEADERS)
diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel
index 211e5b05..fba57ddf 100644
--- a/linux-core/Makefile.kernel
+++ b/linux-core/Makefile.kernel
@@ -12,13 +12,15 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \
drm_memory_debug.o ati_pcigart.o drm_sman.o \
- drm_hashtab.o drm_mm.o
+ drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \
+ drm_fence.o drm_ttm.o drm_bo.o
tdfx-objs := tdfx_drv.o
r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
i810-objs := i810_drv.o i810_dma.o
i830-objs := i830_drv.o i830_dma.o i830_irq.o
-i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o
+i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
+ i915_buffer.o
radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o
sis-objs := sis_drv.o sis_mm.o
ffb-objs := ffb_drv.o ffb_context.o
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index 6cbb810f..d02184c7 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -41,7 +41,6 @@
* can build the DRM (part of PI DRI). 4/21/2000 S + B */
#include <asm/current.h>
#endif /* __alpha__ */
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
@@ -84,6 +83,7 @@
#include <linux/poll.h>
#include <asm/pgalloc.h>
#include "drm.h"
+#include <linux/slab.h>
#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
#define __OS_HAS_MTRR (defined(CONFIG_MTRR))
@@ -110,6 +110,7 @@
#define DRIVER_IRQ_VBL 0x100
#define DRIVER_DMA_QUEUE 0x200
#define DRIVER_FB_DMA 0x400
+#define DRIVER_IRQ_VBL2 0x800
/*@}*/
@@ -154,9 +155,18 @@
#define DRM_MEM_CTXLIST 21
#define DRM_MEM_MM 22
#define DRM_MEM_HASHTAB 23
+#define DRM_MEM_OBJECTS 24
+#define DRM_MEM_FENCE 25
+#define DRM_MEM_TTM 26
+#define DRM_MEM_BUFOBJ 27
#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
#define DRM_MAP_HASH_OFFSET 0x10000000
+#define DRM_MAP_HASH_ORDER 12
+#define DRM_OBJECT_HASH_ORDER 12
+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
+#define DRM_MM_INIT_MAX_PAGES 256
/*@}*/
@@ -298,8 +308,8 @@ typedef struct drm_devstate {
} drm_devstate_t;
typedef struct drm_magic_entry {
- drm_hash_item_t hash_item;
- struct list_head head;
+ drm_hash_item_t hash_item;
+ struct list_head head;
struct drm_file *priv;
} drm_magic_entry_t;
@@ -387,6 +397,19 @@ typedef struct drm_buf_entry {
drm_freelist_t freelist;
} drm_buf_entry_t;
+/*
+ * This should be small enough to allow the use of kmalloc for hash tables
+ * instead of vmalloc.
+ */
+
+#define DRM_FILE_HASH_ORDER 8
+typedef enum{
+ _DRM_REF_USE=0,
+ _DRM_REF_TYPE1,
+ _DRM_NO_REF_TYPES
+} drm_ref_t;
+
+
/** File private data */
typedef struct drm_file {
int authenticated;
@@ -401,6 +424,18 @@ typedef struct drm_file {
struct drm_head *head;
int remove_auth_on_close;
unsigned long lock_count;
+
+ /*
+ * The user object hash table is global and resides in the
+ * drm_device structure. We protect the lists and hash tables with the
+ * device struct_mutex. A bit coarse-grained but probably the best
+ * option.
+ */
+
+ struct list_head refd_objects;
+ struct list_head user_objects;
+
+ drm_open_hash_t refd_object_hash[_DRM_NO_REF_TYPES];
void *driver_priv;
} drm_file_t;
@@ -448,7 +483,8 @@ typedef struct drm_device_dma {
enum {
_DRM_DMA_USE_AGP = 0x01,
_DRM_DMA_USE_SG = 0x02,
- _DRM_DMA_USE_FB = 0x04
+ _DRM_DMA_USE_FB = 0x04,
+ _DRM_DMA_USE_PCI_RO = 0x08
} flags;
} drm_device_dma_t;
@@ -501,14 +537,35 @@ typedef struct drm_sigdata {
drm_hw_lock_t *lock;
} drm_sigdata_t;
+
+/*
+ * Generic memory manager structs
+ */
+
+typedef struct drm_mm_node {
+ struct list_head fl_entry;
+ struct list_head ml_entry;
+ int free;
+ unsigned long start;
+ unsigned long size;
+ struct drm_mm *mm;
+ void *private;
+} drm_mm_node_t;
+
+typedef struct drm_mm {
+ drm_mm_node_t root_node;
+} drm_mm_t;
+
+
/**
* Mappings list
*/
typedef struct drm_map_list {
struct list_head head; /**< list head */
- drm_hash_item_t hash;
+ drm_hash_item_t hash;
drm_map_t *map; /**< mapping */
- unsigned int user_token;
+ drm_u64_t user_token;
+ drm_mm_node_t *file_offset_node;
} drm_map_list_t;
typedef drm_map_t drm_local_map_t;
@@ -541,22 +598,77 @@ typedef struct ati_pcigart_info {
drm_local_map_t mapping;
} drm_ati_pcigart_info;
-/*
- * Generic memory manager structs
+/*
+ * User space objects and their references.
*/
-typedef struct drm_mm_node {
- struct list_head fl_entry;
- struct list_head ml_entry;
- int free;
- unsigned long start;
- unsigned long size;
- void *private;
-} drm_mm_node_t;
+#define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
-typedef struct drm_mm {
- drm_mm_node_t root_node;
-} drm_mm_t;
+typedef enum {
+ drm_fence_type,
+ drm_buffer_type,
+ drm_ttm_type
+
+ /*
+ * Add other user space object types here.
+ */
+
+} drm_object_type_t;
+
+
+
+
+/*
+ * A user object is a structure that helps the drm give out user handles
+ * to kernel internal objects and to keep track of these objects so that
+ * they can be destroyed, for example when the user space process exits.
+ * Designed to be accessible using a user space 32-bit handle.
+ */
+
+typedef struct drm_user_object{
+ drm_hash_item_t hash;
+ struct list_head list;
+ drm_object_type_t type;
+ atomic_t refcount;
+ int shareable;
+ drm_file_t *owner;
+ void (*ref_struct_locked) (drm_file_t *priv, struct drm_user_object *obj,
+ drm_ref_t ref_action);
+ void (*unref)(drm_file_t *priv, struct drm_user_object *obj,
+ drm_ref_t unref_action);
+ void (*remove)(drm_file_t *priv, struct drm_user_object *obj);
+} drm_user_object_t;
+
+/*
+ * A ref object is a structure which is used to
+ * keep track of references to user objects and to keep track of these
+ * references so that they can be destroyed for example when the user space
+ * process exits. Designed to be accessible using a pointer to the _user_ object.
+ */
+
+
+typedef struct drm_ref_object {
+ drm_hash_item_t hash;
+ struct list_head list;
+ atomic_t refcount;
+ drm_ref_t unref_action;
+} drm_ref_object_t;
+
+
+#include "drm_ttm.h"
+
+/*
+ * buffer object driver
+ */
+
+typedef struct drm_bo_driver{
+ int cached[DRM_BO_MEM_TYPES];
+ drm_local_map_t *iomap[DRM_BO_MEM_TYPES];
+ drm_ttm_backend_t *(*create_ttm_backend_entry)
+ (struct drm_device *dev);
+ int (*fence_type)(uint32_t flags, uint32_t *class, uint32_t *type);
+ int (*invalidate_caches)(struct drm_device *dev, uint32_t flags);
+} drm_bo_driver_t;
/**
@@ -564,6 +676,7 @@ typedef struct drm_mm {
* a family of cards. There will one drm_device for each card present
* in this family
*/
+
struct drm_device;
struct drm_driver {
int (*load) (struct drm_device *, unsigned long flags);
@@ -582,6 +695,7 @@ struct drm_driver {
int new);
void (*kernel_context_switch_unlock) (struct drm_device * dev);
int (*vblank_wait) (struct drm_device * dev, unsigned int *sequence);
+ int (*vblank_wait2) (struct drm_device * dev, unsigned int *sequence);
int (*dri_library_name) (struct drm_device * dev, char * buf);
/**
@@ -609,6 +723,9 @@ struct drm_driver {
unsigned long (*get_reg_ofs) (struct drm_device * dev);
void (*set_version) (struct drm_device * dev, drm_set_version_t * sv);
+ struct drm_fence_driver *fence_driver;
+ struct drm_bo_driver *bo_driver;
+
int major;
int minor;
int patchlevel;
@@ -638,6 +755,71 @@ typedef struct drm_head {
struct class_device *dev_class;
} drm_head_t;
+typedef struct drm_cache {
+
+ /*
+ * Memory caches
+ */
+
+ kmem_cache_t *mm;
+ kmem_cache_t *fence_object;
+} drm_cache_t;
+
+
+
+typedef struct drm_fence_driver{
+ int no_types;
+ uint32_t wrap_diff;
+ uint32_t flush_diff;
+ uint32_t sequence_mask;
+ int lazy_capable;
+ int (*emit) (struct drm_device *dev, uint32_t flags,
+ uint32_t *breadcrumb,
+ uint32_t *native_type);
+ void (*poke_flush) (struct drm_device *dev);
+} drm_fence_driver_t;
+
+#define _DRM_FENCE_TYPE_EXE 0x00
+
+typedef struct drm_fence_manager{
+ int initialized;
+ rwlock_t lock;
+
+ /*
+ * The list below should be maintained in sequence order and
+ * access is protected by the above spinlock.
+ */
+
+ struct list_head ring;
+ struct list_head *fence_types[32];
+ volatile uint32_t pending_flush;
+ wait_queue_head_t fence_queue;
+ int pending_exe_flush;
+ uint32_t last_exe_flush;
+ uint32_t exe_flush_sequence;
+ atomic_t count;
+} drm_fence_manager_t;
+
+typedef struct drm_buffer_manager{
+ struct mutex init_mutex;
+ int nice_mode;
+ int initialized;
+ drm_file_t *last_to_validate;
+ int has_type[DRM_BO_MEM_TYPES];
+ int use_type[DRM_BO_MEM_TYPES];
+ drm_mm_t manager[DRM_BO_MEM_TYPES];
+ struct list_head lru[DRM_BO_MEM_TYPES];
+ struct list_head pinned[DRM_BO_MEM_TYPES];
+ struct list_head unfenced;
+ struct list_head ddestroy;
+ struct work_struct wq;
+ uint32_t fence_type;
+ unsigned long cur_pages;
+ atomic_t count;
+} drm_buffer_manager_t;
+
+
+
/**
* DRM device structure. This structure represent a complete card that
* may contain multiple heads.
@@ -676,8 +858,8 @@ typedef struct drm_device {
/*@{ */
drm_file_t *file_first; /**< file list head */
drm_file_t *file_last; /**< file list tail */
- drm_open_hash_t magiclist;
- struct list_head magicfree;
+ drm_open_hash_t magiclist;
+ struct list_head magicfree;
/*@} */
/** \name Memory management */
@@ -685,6 +867,10 @@ typedef struct drm_device {
drm_map_list_t *maplist; /**< Linked list of regions */
int map_count; /**< Number of mappable regions */
drm_open_hash_t map_hash; /**< User token hash table for maps */
+ drm_mm_t offset_manager; /**< User token manager */
+ drm_open_hash_t object_hash; /**< User token hash table for objects */
+ struct address_space *dev_mapping; /**< For unmap_mapping_range() */
+ struct page *ttm_dummy_page;
/** \name Context handle management */
/*@{ */
@@ -732,9 +918,13 @@ typedef struct drm_device {
wait_queue_head_t vbl_queue; /**< VBLANK wait queue */
atomic_t vbl_received;
+ atomic_t vbl_received2; /**< number of secondary VBLANK interrupts */
spinlock_t vbl_lock;
drm_vbl_sig_t vbl_sigs; /**< signal list to send on VBLANK */
+ drm_vbl_sig_t vbl_sigs2; /**< signals to send on secondary VBLANK */
unsigned int vbl_pending;
+ spinlock_t tasklet_lock; /**< For drm_locked_tasklet */
+ void (*locked_tasklet_func)(struct drm_device *dev);
/*@} */
cycles_t ctx_start;
@@ -747,10 +937,8 @@ typedef struct drm_device {
drm_agp_head_t *agp; /**< AGP data */
struct pci_dev *pdev; /**< PCI device structure */
- int pci_domain; /**< PCI bus domain number */
- int pci_bus; /**< PCI bus number */
- int pci_slot; /**< PCI slot number */
- int pci_func; /**< PCI function number */
+ int pci_vendor; /**< PCI vendor id */
+ int pci_device; /**< PCI device id */
#ifdef __alpha__
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3)
struct pci_controler *hose;
@@ -768,14 +956,101 @@ typedef struct drm_device {
drm_local_map_t *agp_buffer_map;
unsigned int agp_buffer_token;
drm_head_t primary; /**< primary screen head */
+
+ drm_fence_manager_t fm;
+ drm_buffer_manager_t bm;
+
+ /** \name Drawable information */
+ /*@{ */
+ spinlock_t drw_lock;
+ unsigned int drw_bitfield_length;
+ u32 *drw_bitfield;
+ unsigned int drw_info_length;
+ drm_drawable_info_t **drw_info;
+ /*@} */
} drm_device_t;
+#if __OS_HAS_AGP
+typedef struct drm_agp_ttm_priv {
+ DRM_AGP_MEM *mem;
+ struct agp_bridge_data *bridge;
+ unsigned alloc_type;
+ unsigned cached_type;
+ unsigned uncached_type;
+ int populated;
+} drm_agp_ttm_priv;
+#endif
+
+typedef struct drm_fence_object{
+ drm_user_object_t base;
+ atomic_t usage;
+
+ /*
+ * The below three fields are protected by the fence manager spinlock.
+ */
+
+ struct list_head ring;
+ int class;
+ uint32_t native_type;
+ uint32_t type;
+ uint32_t signaled;
+ uint32_t sequence;
+ uint32_t flush_mask;
+ uint32_t submitted_flush;
+} drm_fence_object_t;
+
+
+typedef struct drm_buffer_object{
+ drm_device_t *dev;
+ drm_user_object_t base;
+
+ /*
+ * If there is a possibility that the usage variable is zero,
+ * then dev->struct_mutext should be locked before incrementing it.
+ */
+
+ atomic_t usage;
+ drm_ttm_object_t *ttm_object;
+ drm_ttm_t *ttm;
+ unsigned long num_pages;
+ unsigned long buffer_start;
+ drm_bo_type_t type;
+ unsigned long offset;
+ uint32_t page_alignment;
+ atomic_t mapped;
+ uint32_t flags;
+ uint32_t mask;
+
+ drm_mm_node_t *node_ttm; /* MM node for on-card RAM */
+ drm_mm_node_t *node_card; /* MM node for ttm*/
+ struct list_head lru_ttm; /* LRU for the ttm pages*/
+ struct list_head lru_card; /* For memory types with on-card RAM */
+ struct list_head ddestroy;
+
+ uint32_t fence_type;
+ uint32_t fence_class;
+ drm_fence_object_t *fence;
+ uint32_t priv_flags;
+ wait_queue_head_t event_queue;
+ struct mutex mutex;
+} drm_buffer_object_t;
+
+#define _DRM_BO_FLAG_UNFENCED 0x00000001
+#define _DRM_BO_FLAG_EVICTED 0x00000002
+
+
static __inline__ int drm_core_check_feature(struct drm_device *dev,
int feature)
{
return ((dev->driver->driver_features & feature) ? 1 : 0);
}
+#ifdef __alpha__
+#define drm_get_pci_domain(dev) dev->hose->bus->number
+#else
+#define drm_get_pci_domain(dev) 0
+#endif
+
#if __OS_HAS_AGP
static inline int drm_core_has_AGP(struct drm_device *dev)
{
@@ -806,9 +1081,22 @@ static inline int drm_mtrr_del(int handle, unsigned long offset,
}
#else
+static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
+ unsigned int flags)
+{
+ return -ENODEV;
+}
+
+static inline int drm_mtrr_del(int handle, unsigned long offset,
+ unsigned long size, unsigned int flags)
+{
+ return -ENODEV;
+}
+
#define drm_core_has_MTRR(dev) (0)
#endif
+
/******************************************************************/
/** \name Internal function definitions */
/*@{*/
@@ -837,6 +1125,7 @@ unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
extern unsigned long drm_core_get_map_ofs(drm_map_t * map);
extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev);
+extern pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma);
/* Memory management support (drm_memory.h) */
#include "drm_memory.h"
@@ -852,6 +1141,14 @@ extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);
extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
extern int drm_unbind_agp(DRM_AGP_MEM * handle);
+extern void drm_free_memctl(size_t size);
+extern int drm_alloc_memctl(size_t size);
+extern void drm_query_memctl(drm_u64_t *cur_used,
+ drm_u64_t *low_threshold,
+ drm_u64_t *high_threshold);
+extern void drm_init_memctl(size_t low_threshold,
+ size_t high_threshold);
+
/* Misc. IOCTL support (drm_ioctl.h) */
extern int drm_irq_by_busid(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
@@ -900,6 +1197,10 @@ extern int drm_adddraw(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_rmdraw(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
+extern int drm_update_drawable_info(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg);
+extern drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev,
+ drm_drawable_t id);
/* Authentication IOCTL support (drm_auth.h) */
extern int drm_getmagic(struct inode *inode, struct file *filp,
@@ -915,6 +1216,13 @@ extern int drm_unlock(struct inode *inode, struct file *filp,
extern int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context);
extern int drm_lock_free(drm_device_t * dev,
__volatile__ unsigned int *lock, unsigned int context);
+/*
+ * These are exported to drivers so that they can implement fencing using
+ * DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
+ */
+
+extern int drm_i_have_hw_lock(struct file *filp);
+extern int drm_kernel_take_hw_lock(struct file *filp);
/* Buffer management support (drm_bufs.h) */
extern int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request);
@@ -964,6 +1272,7 @@ extern int drm_wait_vblank(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_vblank_wait(drm_device_t * dev, unsigned int *vbl_seq);
extern void drm_vbl_send_signals(drm_device_t * dev);
+extern void drm_locked_tasklet(drm_device_t *dev, void(*func)(drm_device_t*));
/* AGP/GART support (drm_agpsupport.h) */
extern drm_agp_head_t *drm_agp_init(drm_device_t *dev);
@@ -999,7 +1308,8 @@ extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size
extern int drm_agp_free_memory(DRM_AGP_MEM * handle);
extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start);
extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
-
+extern drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev,
+ drm_ttm_backend_t *backend);
/* Stub support (drm_stub.h) */
extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
struct drm_driver *driver);
@@ -1008,6 +1318,7 @@ extern int drm_put_head(drm_head_t * head);
extern unsigned int drm_debug; /* 1 to enable debug output */
extern unsigned int drm_cards_limit;
extern drm_head_t **drm_heads;
+extern drm_cache_t drm_cache;
extern struct drm_sysfs_class *drm_class;
extern struct proc_dir_entry *drm_proc_root;
@@ -1051,11 +1362,121 @@ extern void drm_sysfs_device_remove(struct class_device *class_dev);
extern drm_mm_node_t * drm_mm_get_block(drm_mm_node_t * parent, unsigned long size,
unsigned alignment);
-extern void drm_mm_put_block(drm_mm_t *mm, drm_mm_node_t *cur);
+extern void drm_mm_put_block(drm_mm_node_t *cur);
extern drm_mm_node_t *drm_mm_search_free(const drm_mm_t *mm, unsigned long size,
unsigned alignment, int best_match);
extern int drm_mm_init(drm_mm_t *mm, unsigned long start, unsigned long size);
extern void drm_mm_takedown(drm_mm_t *mm);
+extern int drm_mm_clean(drm_mm_t *mm);
+extern unsigned long drm_mm_tail_space(drm_mm_t *mm);
+extern int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size);
+extern int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size);
+
+static inline drm_mm_t *drm_get_mm(drm_mm_node_t *block)
+{
+ return block->mm;
+}
+
+
+/*
+ * User space object bookkeeping (drm_object.c)
+ */
+
+/*
+ * Must be called with the struct_mutex held.
+ */
+
+extern int drm_add_user_object(drm_file_t *priv, drm_user_object_t *item,
+
+/*
+ * Must be called with the struct_mutex held.
+ */
+ int shareable);
+extern drm_user_object_t *drm_lookup_user_object(drm_file_t *priv, uint32_t key);
+
+/*
+ * Must be called with the struct_mutex held.
+ * If "item" has been obtained by a call to drm_lookup_user_object. You may not
+ * release the struct_mutex before calling drm_remove_ref_object.
+ * This function may temporarily release the struct_mutex.
+ */
+
+extern int drm_remove_user_object(drm_file_t *priv, drm_user_object_t *item);
+
+/*
+ * Must be called with the struct_mutex held. May temporarily release it.
+ */
+
+extern int drm_add_ref_object(drm_file_t *priv, drm_user_object_t *referenced_object,
+ drm_ref_t ref_action);
+
+/*
+ * Must be called with the struct_mutex held.
+ */
+
+drm_ref_object_t *drm_lookup_ref_object(drm_file_t *priv,
+ drm_user_object_t *referenced_object,
+ drm_ref_t ref_action);
+/*
+ * Must be called with the struct_mutex held.
+ * If "item" has been obtained by a call to drm_lookup_ref_object. You may not
+ * release the struct_mutex before calling drm_remove_ref_object.
+ * This function may temporarily release the struct_mutex.
+ */
+
+extern void drm_remove_ref_object(drm_file_t *priv, drm_ref_object_t *item);
+extern int drm_user_object_ref(drm_file_t *priv, uint32_t user_token, drm_object_type_t type,
+ drm_user_object_t **object);
+extern int drm_user_object_unref(drm_file_t *priv, uint32_t user_token, drm_object_type_t type);
+
+
+
+/*
+ * fence objects (drm_fence.c)
+ */
+
+extern void drm_fence_handler(drm_device_t *dev, uint32_t breadcrumb, uint32_t type);
+extern void drm_fence_manager_init(drm_device_t *dev);
+extern void drm_fence_manager_takedown(drm_device_t *dev);
+extern void drm_fence_flush_old(drm_device_t *dev, uint32_t sequence);
+extern int drm_fence_object_flush(drm_device_t * dev,
+ volatile drm_fence_object_t * fence,
+ uint32_t type);
+extern int drm_fence_object_signaled(volatile drm_fence_object_t * fence,
+ uint32_t type);
+extern void drm_fence_usage_deref_locked(drm_device_t * dev,
+ drm_fence_object_t * fence);
+extern void drm_fence_usage_deref_unlocked(drm_device_t * dev,
+ drm_fence_object_t * fence);
+extern int drm_fence_object_wait(drm_device_t * dev,
+ volatile drm_fence_object_t * fence,
+ int lazy, int ignore_signals, uint32_t mask);
+extern int drm_fence_object_create(drm_device_t *dev, uint32_t type,
+ uint32_t fence_flags,
+ drm_fence_object_t **c_fence);
+extern int drm_fence_add_user_object(drm_file_t *priv,
+ drm_fence_object_t *fence,
+ int shareable);
+
+
+
+
+
+extern int drm_fence_ioctl(DRM_IOCTL_ARGS);
+
+/*
+ * buffer objects (drm_bo.c)
+ */
+
+extern int drm_bo_ioctl(DRM_IOCTL_ARGS);
+extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS);
+extern int drm_bo_driver_finish(drm_device_t *dev);
+extern int drm_bo_driver_init(drm_device_t *dev);
+extern int drm_fence_buffer_objects(drm_file_t * priv,
+ struct list_head *list,
+ uint32_t fence_flags,
+ drm_fence_object_t *fence,
+ drm_fence_object_t **used_fence);
/* Inline replacements for DRM_IOREMAP macros */
@@ -1127,6 +1548,58 @@ extern void *drm_alloc(size_t size, int area);
extern void drm_free(void *pt, size_t size, int area);
#endif
+/*
+ * Accounting variants of standard calls.
+ */
+
+static inline void *drm_ctl_alloc(size_t size, int area)
+{
+ void *ret;
+ if (drm_alloc_memctl(size))
+ return NULL;
+ ret = drm_alloc(size, area);
+ if (!ret)
+ drm_free_memctl(size);
+ return ret;
+}
+
+static inline void *drm_ctl_calloc(size_t nmemb, size_t size, int area)
+{
+ void *ret;
+
+ if (drm_alloc_memctl(nmemb*size))
+ return NULL;
+ ret = drm_calloc(nmemb, size, area);
+ if (!ret)
+ drm_free_memctl(nmemb*size);
+ return ret;
+}
+
+static inline void drm_ctl_free(void *pt, size_t size, int area)
+{
+ drm_free(pt, size, area);
+ drm_free_memctl(size);
+}
+
+static inline void *drm_ctl_cache_alloc(kmem_cache_t *cache, size_t size,
+ int flags)
+{
+ void *ret;
+ if (drm_alloc_memctl(size))
+ return NULL;
+ ret = kmem_cache_alloc(cache, flags);
+ if (!ret)
+ drm_free_memctl(size);
+ return ret;
+}
+
+static inline void drm_ctl_cache_free(kmem_cache_t *cache, size_t size,
+ void *obj)
+{
+ kmem_cache_free(cache, obj);
+ drm_free_memctl(size);
+}
+
/*@}*/
#endif /* __KERNEL__ */
diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c
index dce27cdf..a5f1f9ee 100644
--- a/linux-core/drm_agpsupport.c
+++ b/linux-core/drm_agpsupport.c
@@ -552,4 +552,162 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
return agp_unbind_memory(handle);
}
+
+
+/*
+ * AGP ttm backend interface.
+ */
+
+#ifndef AGP_USER_TYPES
+#define AGP_USER_TYPES (1 << 16)
+#define AGP_USER_MEMORY (AGP_USER_TYPES)
+#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
+#endif
+
+static int drm_agp_needs_unbind_cache_adjust(drm_ttm_backend_t *backend) {
+ return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
+}
+
+
+static int drm_agp_populate(drm_ttm_backend_t *backend, unsigned long num_pages,
+ struct page **pages) {
+
+ drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
+ struct page **cur_page, **last_page = pages + num_pages;
+ DRM_AGP_MEM *mem;
+
+ if (drm_alloc_memctl(num_pages * sizeof(void *)))
+ return -1;
+
+ DRM_DEBUG("drm_agp_populate_ttm\n");
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
+ mem = drm_agp_allocate_memory(num_pages, agp_priv->alloc_type);
+#else
+ mem = drm_agp_allocate_memory(agp_priv->bridge, num_pages, agp_priv->alloc_type);
+#endif
+ if (!mem) {
+ drm_free_memctl(num_pages *sizeof(void *));
+ return -1;
+ }
+
+ DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count);
+ mem->page_count = 0;
+ for (cur_page = pages; cur_page < last_page; ++cur_page) {
+ mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(*cur_page));
+ }
+ agp_priv->mem = mem;
+ return 0;
+}
+
+static int drm_agp_bind_ttm(drm_ttm_backend_t *backend,
+ unsigned long offset,
+ int cached)
+{
+ drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
+ DRM_AGP_MEM *mem = agp_priv->mem;
+ int ret;
+
+ DRM_DEBUG("drm_agp_bind_ttm\n");
+ DRM_MASK_VAL(backend->flags, DRM_BE_FLAG_BOUND_CACHED,
+ (cached) ? DRM_BE_FLAG_BOUND_CACHED : 0);
+ mem->is_flushed = TRUE;
+ mem->type = (cached) ? agp_priv->cached_type : agp_priv->uncached_type;
+ ret = drm_agp_bind_memory(mem, offset);
+ if (ret) {
+ DRM_ERROR("AGP Bind memory failed\n");
+ }
+ return ret;
+}
+
+static int drm_agp_unbind_ttm(drm_ttm_backend_t *backend) {
+
+ drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
+
+ DRM_DEBUG("drm_agp_unbind_ttm\n");
+ if (agp_priv->mem->is_bound)
+ return drm_agp_unbind_memory(agp_priv->mem);
+ else
+ return 0;
+}
+
+static void drm_agp_clear_ttm(drm_ttm_backend_t *backend) {
+
+ drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
+ DRM_AGP_MEM *mem = agp_priv->mem;
+
+ DRM_DEBUG("drm_agp_clear_ttm\n");
+ if (mem) {
+ unsigned long num_pages = mem->page_count;
+ backend->unbind(backend);
+ agp_free_memory(mem);
+ drm_free_memctl(num_pages *sizeof(void *));
+ }
+
+ agp_priv->mem = NULL;
+}
+
+static void drm_agp_destroy_ttm(drm_ttm_backend_t *backend) {
+
+ drm_agp_ttm_priv *agp_priv;
+
+ if (backend) {
+ DRM_DEBUG("drm_agp_destroy_ttm\n");
+ agp_priv = (drm_agp_ttm_priv *) backend->private;
+ if (agp_priv) {
+ if (agp_priv->mem) {
+ backend->clear(backend);
+ }
+ drm_ctl_free(agp_priv, sizeof(*agp_priv), DRM_MEM_MAPPINGS);
+ backend->private = NULL;
+ }
+ if (backend->flags & DRM_BE_FLAG_NEEDS_FREE) {
+ drm_ctl_free(backend, sizeof(*backend), DRM_MEM_MAPPINGS);
+ }
+ }
+}
+
+
+drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev,
+ drm_ttm_backend_t *backend)
+{
+
+ drm_ttm_backend_t *agp_be;
+ drm_agp_ttm_priv *agp_priv;
+
+ agp_be = (backend != NULL) ? backend:
+ drm_ctl_calloc(1, sizeof(*agp_be), DRM_MEM_MAPPINGS);
+
+ if (!agp_be)
+ return NULL;
+
+ agp_priv = drm_ctl_calloc(1, sizeof(*agp_priv), DRM_MEM_MAPPINGS);
+
+ if (!agp_priv) {
+ drm_ctl_free(agp_be, sizeof(*agp_be), DRM_MEM_MAPPINGS);
+ return NULL;
+ }
+
+ agp_priv->mem = NULL;
+ agp_priv->alloc_type = AGP_USER_MEMORY;
+ agp_priv->cached_type = AGP_USER_CACHED_MEMORY;
+ agp_priv->uncached_type = AGP_USER_MEMORY;
+ agp_priv->bridge = dev->agp->bridge;
+ agp_priv->populated = FALSE;
+ agp_be->aperture_base = dev->agp->agp_info.aper_base;
+ agp_be->private = (void *) agp_priv;
+ agp_be->needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust;
+ agp_be->populate = drm_agp_populate;
+ agp_be->clear = drm_agp_clear_ttm;
+ agp_be->bind = drm_agp_bind_ttm;
+ agp_be->unbind = drm_agp_unbind_ttm;
+ agp_be->destroy = drm_agp_destroy_ttm;
+ DRM_MASK_VAL(agp_be->flags, DRM_BE_FLAG_NEEDS_FREE,
+ (backend == NULL) ? DRM_BE_FLAG_NEEDS_FREE : 0);
+ DRM_MASK_VAL(agp_be->flags, DRM_BE_FLAG_CBA,
+ (dev->agp->cant_use_aperture) ? DRM_BE_FLAG_CBA : 0);
+ agp_be->drm_map_type = _DRM_AGP;
+ return agp_be;
+}
+EXPORT_SYMBOL(drm_agp_init_ttm);
+
#endif /* __OS_HAS_AGP */
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c
new file mode 100644
index 00000000..65e24fb6
--- /dev/null
+++ b/linux-core/drm_bo.c
@@ -0,0 +1,1998 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+
+/*
+ * Buffer object locking policy:
+ * Lock dev->struct_mutex;
+ * Increase usage
+ * Unlock dev->struct_mutex;
+ * Lock buffer->mutex;
+ * Do whatever you want;
+ * Unlock buffer->mutex;
+ * Decrease usage. Call destruction if zero.
+ *
+ * User object visibility ups usage just once, since it has its own
+ * refcounting.
+ *
+ * Destruction:
+ * lock dev->struct_mutex;
+ * Verify that usage is zero. Otherwise unlock and continue.
+ * Destroy object.
+ * unlock dev->struct_mutex;
+ *
+ * Mutex and spinlock locking orders:
+ * 1.) Buffer mutex
+ * 2.) Refer to ttm locking orders.
+ */
+
+#define DRM_FLAG_MASKED(_old, _new, _mask) {\
+(_old) ^= (((_old) ^ (_new)) & (_mask)); \
+}
+
+static inline uint32_t drm_bo_type_flags(unsigned type)
+{
+ return (1 << (24 + type));
+}
+
+static inline drm_buffer_object_t *drm_bo_entry(struct list_head *list,
+ unsigned type)
+{
+ switch (type) {
+ case DRM_BO_MEM_LOCAL:
+ case DRM_BO_MEM_TT:
+ return list_entry(list, drm_buffer_object_t, lru_ttm);
+ case DRM_BO_MEM_VRAM:
+ case DRM_BO_MEM_VRAM_NM:
+ return list_entry(list, drm_buffer_object_t, lru_card);
+ default:
+ BUG_ON(1);
+ }
+ return NULL;
+}
+
+static inline drm_mm_node_t *drm_bo_mm_node(drm_buffer_object_t * bo,
+ unsigned type)
+{
+ switch (type) {
+ case DRM_BO_MEM_LOCAL:
+ case DRM_BO_MEM_TT:
+ return bo->node_ttm;
+ case DRM_BO_MEM_VRAM:
+ case DRM_BO_MEM_VRAM_NM:
+ return bo->node_card;
+ default:
+ BUG_ON(1);
+ }
+ return NULL;
+}
+
+/*
+ * bo locked. dev->struct_mutex locked.
+ */
+
+static void drm_bo_add_to_lru(drm_buffer_object_t * buf,
+ drm_buffer_manager_t * bm)
+{
+ struct list_head *list;
+ unsigned mem_type;
+
+ if (buf->flags & DRM_BO_FLAG_MEM_TT) {
+ mem_type = DRM_BO_MEM_TT;
+ list =
+ (buf->
+ flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ?
+ &bm->pinned[mem_type] : &bm->lru[mem_type];
+ list_add_tail(&buf->lru_ttm, list);
+ } else {
+ mem_type = DRM_BO_MEM_LOCAL;
+ list =
+ (buf->
+ flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ?
+ &bm->pinned[mem_type] : &bm->lru[mem_type];
+ list_add_tail(&buf->lru_ttm, list);
+ }
+ if (buf->flags & DRM_BO_FLAG_MEM_VRAM) {
+ mem_type = DRM_BO_MEM_VRAM;
+ list =
+ (buf->
+ flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ?
+ &bm->pinned[mem_type] : &bm->lru[mem_type];
+ list_add_tail(&buf->lru_card, list);
+ }
+}
+
+/*
+ * bo locked.
+ */
+
+static int drm_move_tt_to_local(drm_buffer_object_t * buf, int evict,
+ int force_no_move)
+{
+ drm_device_t *dev = buf->dev;
+ int ret;
+
+ if (buf->node_ttm) {
+ mutex_lock(&dev->struct_mutex);
+ if (evict)
+ ret = drm_evict_ttm(buf->ttm);
+ else
+ ret = drm_unbind_ttm(buf->ttm);
+
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ if (ret == -EAGAIN)
+ schedule();
+ return ret;
+ }
+
+ if (!(buf->flags & DRM_BO_FLAG_NO_MOVE) || force_no_move) {
+ drm_mm_put_block(buf->node_ttm);
+ buf->node_ttm = NULL;
+ }
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ buf->flags &= ~DRM_BO_FLAG_MEM_TT;
+ buf->flags |= DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
+
+ return 0;
+}
+
+/*
+ * Lock dev->struct_mutex
+ */
+
+static void drm_bo_destroy_locked(drm_device_t * dev, drm_buffer_object_t * bo)
+{
+
+ drm_buffer_manager_t *bm = &dev->bm;
+
+ DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
+
+ /*
+ * Somone might try to access us through the still active BM lists.
+ */
+
+ if (atomic_read(&bo->usage) != 0)
+ return;
+ if (!list_empty(&bo->ddestroy))
+ return;
+
+ if (bo->fence) {
+ if (!drm_fence_object_signaled(bo->fence, bo->fence_type)) {
+
+ drm_fence_object_flush(dev, bo->fence, bo->fence_type);
+ list_add_tail(&bo->ddestroy, &bm->ddestroy);
+ schedule_delayed_work(&bm->wq,
+ ((DRM_HZ / 100) <
+ 1) ? 1 : DRM_HZ / 100);
+ return;
+ } else {
+ drm_fence_usage_deref_locked(dev, bo->fence);
+ bo->fence = NULL;
+ }
+ }
+ /*
+ * Take away from lru lists.
+ */
+
+ list_del_init(&bo->lru_ttm);
+ list_del_init(&bo->lru_card);
+
+ if (bo->ttm) {
+ unsigned long _end = jiffies + DRM_HZ;
+ int ret;
+
+ /*
+ * This temporarily unlocks struct_mutex.
+ */
+
+ do {
+ ret = drm_unbind_ttm(bo->ttm);
+ if (ret == -EAGAIN) {
+ mutex_unlock(&dev->struct_mutex);
+ schedule();
+ mutex_lock(&dev->struct_mutex);
+ }
+ } while (ret == -EAGAIN && !time_after_eq(jiffies, _end));
+
+ if (ret) {
+ DRM_ERROR("Couldn't unbind buffer. "
+ "Bad. Continuing anyway\n");
+ }
+ }
+
+ if (bo->node_ttm) {
+ drm_mm_put_block(bo->node_ttm);
+ bo->node_ttm = NULL;
+ }
+ if (bo->node_card) {
+ drm_mm_put_block(bo->node_card);
+ bo->node_card = NULL;
+ }
+ if (bo->ttm_object) {
+ drm_ttm_object_deref_locked(dev, bo->ttm_object);
+ }
+ atomic_dec(&bm->count);
+ drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
+}
+
+/*
+ * Call bo->mutex locked.
+ * Wait until the buffer is idle.
+ */
+
+static int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
+ int no_wait)
+{
+
+ drm_fence_object_t *fence = bo->fence;
+ int ret;
+
+ if (fence) {
+ drm_device_t *dev = bo->dev;
+ if (drm_fence_object_signaled(fence, bo->fence_type)) {
+ drm_fence_usage_deref_unlocked(dev, fence);
+ bo->fence = NULL;
+ return 0;
+ }
+ if (no_wait) {
+ return -EBUSY;
+ }
+ ret =
+ drm_fence_object_wait(dev, fence, lazy, ignore_signals,
+ bo->fence_type);
+ if (ret)
+ return ret;
+
+ drm_fence_usage_deref_unlocked(dev, fence);
+ bo->fence = NULL;
+
+ }
+ return 0;
+}
+
+/*
+ * Call dev->struct_mutex locked.
+ */
+
+static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all)
+{
+ drm_buffer_manager_t *bm = &dev->bm;
+
+ drm_buffer_object_t *entry, *nentry;
+ struct list_head *list, *next;
+ drm_fence_object_t *fence;
+
+ list_for_each_safe(list, next, &bm->ddestroy) {
+ entry = list_entry(list, drm_buffer_object_t, ddestroy);
+ atomic_inc(&entry->usage);
+ if (atomic_read(&entry->usage) != 1) {
+ atomic_dec(&entry->usage);
+ continue;
+ }
+
+ nentry = NULL;
+ if (next != &bm->ddestroy) {
+ nentry = list_entry(next, drm_buffer_object_t,
+ ddestroy);
+ atomic_inc(&nentry->usage);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+ mutex_lock(&entry->mutex);
+ fence = entry->fence;
+ if (fence && drm_fence_object_signaled(fence,
+ entry->fence_type)) {
+ drm_fence_usage_deref_locked(dev, fence);
+ entry->fence = NULL;
+ }
+
+ if (entry->fence && remove_all) {
+ if (bm->nice_mode) {
+ unsigned long _end = jiffies + 3 * DRM_HZ;
+ int ret;
+ do {
+ ret = drm_bo_wait(entry, 0, 1, 0);
+ } while (ret && !time_after_eq(jiffies, _end));
+
+ if (entry->fence) {
+ bm->nice_mode = 0;
+ DRM_ERROR("Detected GPU lockup or "
+ "fence driver was taken down. "
+ "Evicting waiting buffers.\n");
+ }
+ }
+ if (entry->fence) {
+ drm_fence_usage_deref_unlocked(dev,
+ entry->fence);
+ entry->fence = NULL;
+ }
+ }
+ mutex_lock(&dev->struct_mutex);
+ mutex_unlock(&entry->mutex);
+ if (atomic_dec_and_test(&entry->usage) && (!entry->fence)) {
+ list_del_init(&entry->ddestroy);
+ drm_bo_destroy_locked(dev, entry);
+ }
+ if (nentry) {
+ atomic_dec(&nentry->usage);
+ }
+ }
+
+}
+
+static void drm_bo_delayed_workqueue(void *data)
+{
+ drm_device_t *dev = (drm_device_t *) data;
+ drm_buffer_manager_t *bm = &dev->bm;
+
+ DRM_DEBUG("Delayed delete Worker\n");
+
+ mutex_lock(&dev->struct_mutex);
+ if (!bm->initialized) {
+ mutex_unlock(&dev->struct_mutex);
+ return;
+ }
+ drm_bo_delayed_delete(dev, 0);
+ if (bm->initialized && !list_empty(&bm->ddestroy)) {
+ schedule_delayed_work(&bm->wq,
+ ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
+ }
+ mutex_unlock(&dev->struct_mutex);
+}
+
+void drm_bo_usage_deref_locked(drm_device_t * dev, drm_buffer_object_t * bo)
+{
+ if (atomic_dec_and_test(&bo->usage)) {
+ drm_bo_destroy_locked(dev, bo);
+ }
+}
+
+static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo)
+{
+ drm_bo_usage_deref_locked(priv->head->dev,
+ drm_user_object_entry(uo, drm_buffer_object_t,
+ base));
+}
+
+void drm_bo_usage_deref_unlocked(drm_device_t * dev, drm_buffer_object_t * bo)
+{
+ if (atomic_dec_and_test(&bo->usage)) {
+ mutex_lock(&dev->struct_mutex);
+ if (atomic_read(&bo->usage) == 0)
+ drm_bo_destroy_locked(dev, bo);
+ mutex_unlock(&dev->struct_mutex);
+ }
+}
+
+/*
+ * Note. The caller has to register (if applicable)
+ * and deregister fence object usage.
+ */
+
+int drm_fence_buffer_objects(drm_file_t * priv,
+ struct list_head *list,
+ uint32_t fence_flags,
+ drm_fence_object_t * fence,
+ drm_fence_object_t ** used_fence)
+{
+ drm_device_t *dev = priv->head->dev;
+ drm_buffer_manager_t *bm = &dev->bm;
+
+ drm_buffer_object_t *entry;
+ uint32_t fence_type = 0;
+ int count = 0;
+ int ret = 0;
+ struct list_head f_list, *l;
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (!list)
+ list = &bm->unfenced;
+
+ list_for_each_entry(entry, list, lru_ttm) {
+ BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
+ fence_type |= entry->fence_type;
+ if (entry->fence_class != 0) {
+ DRM_ERROR("Fence class %d is not implemented yet.\n",
+ entry->fence_class);
+ ret = -EINVAL;
+ goto out;
+ }
+ count++;
+ }
+
+ if (!count) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Transfer to a local list before we release the dev->struct_mutex;
+ * This is so we don't get any new unfenced objects while fencing
+ * the ones we already have..
+ */
+
+ list_add_tail(&f_list, list);
+ list_del_init(list);
+
+ if (fence) {
+ if ((fence_type & fence->type) != fence_type) {
+ DRM_ERROR("Given fence doesn't match buffers "
+ "on unfenced list.\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ } else {
+ mutex_unlock(&dev->struct_mutex);
+ ret = drm_fence_object_create(dev, fence_type,
+ fence_flags | DRM_FENCE_FLAG_EMIT,
+ &fence);
+ mutex_lock(&dev->struct_mutex);
+ if (ret)
+ goto out;
+ }
+
+ count = 0;
+ l = f_list.next;
+ while (l != &f_list) {
+ entry = list_entry(l, drm_buffer_object_t, lru_ttm);
+ atomic_inc(&entry->usage);
+ mutex_unlock(&dev->struct_mutex);
+ mutex_lock(&entry->mutex);
+ mutex_lock(&dev->struct_mutex);
+ list_del_init(l);
+ list_del_init(&entry->lru_card);
+ if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
+ count++;
+ if (entry->fence)
+ drm_fence_usage_deref_locked(dev, entry->fence);
+ entry->fence = fence;
+ DRM_FLAG_MASKED(entry->priv_flags, 0,
+ _DRM_BO_FLAG_UNFENCED);
+ DRM_WAKEUP(&entry->event_queue);
+ drm_bo_add_to_lru(entry, bm);
+ }
+ mutex_unlock(&entry->mutex);
+ drm_bo_usage_deref_locked(dev, entry);
+ l = f_list.next;
+ }
+ atomic_add(count, &fence->usage);
+ DRM_DEBUG("Fenced %d buffers\n", count);
+ out:
+ mutex_unlock(&dev->struct_mutex);
+ *used_fence = fence;
+ return ret;
+}
+
+EXPORT_SYMBOL(drm_fence_buffer_objects);
+
+/*
+ * bo->mutex locked
+ */
+
+static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
+ int no_wait, int force_no_move)
+{
+ int ret = 0;
+ drm_device_t *dev = bo->dev;
+ drm_buffer_manager_t *bm = &dev->bm;
+
+ /*
+ * Someone might have modified the buffer before we took the buffer mutex.
+ */
+
+ if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
+ goto out;
+ if (!(bo->flags & drm_bo_type_flags(mem_type)))
+ goto out;
+
+ ret = drm_bo_wait(bo, 0, 0, no_wait);
+
+ if (ret) {
+ if (ret != -EAGAIN)
+ DRM_ERROR("Failed to expire fence before "
+ "buffer eviction.\n");
+ goto out;
+ }
+
+ if (mem_type == DRM_BO_MEM_TT) {
+ ret = drm_move_tt_to_local(bo, 1, force_no_move);
+ if (ret)
+ goto out;
+ mutex_lock(&dev->struct_mutex);
+ list_del_init(&bo->lru_ttm);
+ drm_bo_add_to_lru(bo, bm);
+ mutex_unlock(&dev->struct_mutex);
+ }
+#if 0
+ else {
+ ret = drm_move_vram_to_local(bo);
+ mutex_lock(&dev->struct_mutex);
+ list_del_init(&bo->lru_card);
+ mutex_unlock(&dev->struct_mutex);
+ }
+#endif
+ if (ret)
+ goto out;
+
+ DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
+ _DRM_BO_FLAG_EVICTED);
+ out:
+ return ret;
+}
+
+/*
+ * buf->mutex locked.
+ */
+
+int drm_bo_alloc_space(drm_buffer_object_t * buf, unsigned mem_type,
+ int no_wait)
+{
+ drm_device_t *dev = buf->dev;
+ drm_mm_node_t *node;
+ drm_buffer_manager_t *bm = &dev->bm;
+ drm_buffer_object_t *bo;
+ drm_mm_t *mm = &bm->manager[mem_type];
+ struct list_head *lru;
+ unsigned long size = buf->num_pages;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+ do {
+ node = drm_mm_search_free(mm, size, buf->page_alignment, 1);
+ if (node)
+ break;
+
+ lru = &bm->lru[mem_type];
+ if (lru->next == lru)
+ break;
+
+ bo = drm_bo_entry(lru->next, mem_type);
+
+ atomic_inc(&bo->usage);
+ mutex_unlock(&dev->struct_mutex);
+ mutex_lock(&bo->mutex);
+ BUG_ON(bo->flags & DRM_BO_FLAG_NO_MOVE);
+ ret = drm_bo_evict(bo, mem_type, no_wait, 0);
+ mutex_unlock(&bo->mutex);
+ drm_bo_usage_deref_unlocked(dev, bo);
+ if (ret)
+ return ret;
+ mutex_lock(&dev->struct_mutex);
+ } while (1);
+
+ if (!node) {
+ DRM_ERROR("Out of videoram / aperture space\n");
+ mutex_unlock(&dev->struct_mutex);
+ return -ENOMEM;
+ }
+
+ node = drm_mm_get_block(node, size, buf->page_alignment);
+ mutex_unlock(&dev->struct_mutex);
+ BUG_ON(!node);
+ node->private = (void *)buf;
+
+ if (mem_type == DRM_BO_MEM_TT) {
+ buf->node_ttm = node;
+ } else {
+ buf->node_card = node;
+ }
+ buf->offset = node->start * PAGE_SIZE;
+ return 0;
+}
+
+static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait)
+{
+ drm_device_t *dev = bo->dev;
+ drm_ttm_backend_t *be;
+ int ret;
+
+ if (!(bo->node_ttm && (bo->flags & DRM_BO_FLAG_NO_MOVE))) {
+ BUG_ON(bo->node_ttm);
+ ret = drm_bo_alloc_space(bo, DRM_BO_MEM_TT, no_wait);
+ if (ret)
+ return ret;
+ }
+
+ DRM_DEBUG("Flipping in to AGP 0x%08lx\n", bo->node_ttm->start);
+
+ mutex_lock(&dev->struct_mutex);
+ ret = drm_bind_ttm(bo->ttm, bo->flags & DRM_BO_FLAG_BIND_CACHED,
+ bo->node_ttm->start);
+ if (ret) {
+ drm_mm_put_block(bo->node_ttm);
+ bo->node_ttm = NULL;
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ret) {
+ return ret;
+ }
+
+ be = bo->ttm->be;
+ if (be->needs_ub_cache_adjust(be))
+ bo->flags &= ~DRM_BO_FLAG_CACHED;
+ bo->flags &= ~DRM_BO_MASK_MEM;
+ bo->flags |= DRM_BO_FLAG_MEM_TT;
+
+ if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
+ ret = dev->driver->bo_driver->invalidate_caches(dev, bo->flags);
+ if (ret)
+ DRM_ERROR("Could not flush read caches\n");
+ }
+ DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_EVICTED);
+
+ return 0;
+}
+
+static int drm_bo_new_flags(drm_device_t * dev,
+ uint32_t flags, uint32_t new_mask, uint32_t hint,
+ int init, uint32_t * n_flags, uint32_t * n_mask)
+{
+ uint32_t new_flags = 0;
+ uint32_t new_props;
+ drm_bo_driver_t *driver = dev->driver->bo_driver;
+ drm_buffer_manager_t *bm = &dev->bm;
+ unsigned i;
+
+ /*
+ * First adjust the mask to take away nonexistant memory types.
+ */
+
+ for (i = 0; i < DRM_BO_MEM_TYPES; ++i) {
+ if (!bm->use_type[i])
+ new_mask &= ~drm_bo_type_flags(i);
+ }
+
+ if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
+ DRM_ERROR
+ ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
+ "processes\n");
+ return -EPERM;
+ }
+ if (new_mask & DRM_BO_FLAG_BIND_CACHED) {
+ if (((new_mask & DRM_BO_FLAG_MEM_TT) &&
+ !driver->cached[DRM_BO_MEM_TT]) &&
+ ((new_mask & DRM_BO_FLAG_MEM_VRAM)
+ && !driver->cached[DRM_BO_MEM_VRAM])) {
+ new_mask &= ~DRM_BO_FLAG_BIND_CACHED;
+ } else {
+ if (!driver->cached[DRM_BO_MEM_TT])
+ new_flags &= DRM_BO_FLAG_MEM_TT;
+ if (!driver->cached[DRM_BO_MEM_VRAM])
+ new_flags &= DRM_BO_FLAG_MEM_VRAM;
+ }
+ }
+
+ if ((new_mask & DRM_BO_FLAG_READ_CACHED) &&
+ !(new_mask & DRM_BO_FLAG_BIND_CACHED)) {
+ if ((new_mask & DRM_BO_FLAG_NO_EVICT) &&
+ !(new_mask & DRM_BO_FLAG_MEM_LOCAL)) {
+ DRM_ERROR
+ ("Cannot read cached from a pinned VRAM / TT buffer\n");
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * Determine new memory location:
+ */
+
+ if (!(flags & new_mask & DRM_BO_MASK_MEM) || init) {
+
+ new_flags = new_mask & DRM_BO_MASK_MEM;
+
+ if (!new_flags) {
+ DRM_ERROR("Invalid buffer object memory flags\n");
+ return -EINVAL;
+ }
+
+ if (new_flags & DRM_BO_FLAG_MEM_LOCAL) {
+ if ((hint & DRM_BO_HINT_AVOID_LOCAL) &&
+ new_flags & (DRM_BO_FLAG_MEM_VRAM |
+ DRM_BO_FLAG_MEM_TT)) {
+ new_flags &= ~DRM_BO_FLAG_MEM_LOCAL;
+ } else {
+ new_flags = DRM_BO_FLAG_MEM_LOCAL;
+ }
+ }
+ if (new_flags & DRM_BO_FLAG_MEM_TT) {
+ if ((new_mask & DRM_BO_FLAG_PREFER_VRAM) &&
+ new_flags & DRM_BO_FLAG_MEM_VRAM) {
+ new_flags = DRM_BO_FLAG_MEM_VRAM;
+ } else {
+ new_flags = DRM_BO_FLAG_MEM_TT;
+ }
+ }
+ } else {
+ new_flags = flags & DRM_BO_MASK_MEM;
+ }
+
+ new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
+ DRM_BO_FLAG_READ);
+
+ if (!new_props) {
+ DRM_ERROR("Invalid buffer object rwx properties\n");
+ return -EINVAL;
+ }
+
+ new_flags |= new_mask & ~DRM_BO_MASK_MEM;
+
+ if (((flags ^ new_flags) & DRM_BO_FLAG_BIND_CACHED) &&
+ (new_flags & DRM_BO_FLAG_NO_EVICT) &&
+ (flags & (DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MEM_VRAM))) {
+ if (!(flags & DRM_BO_FLAG_CACHED)) {
+ DRM_ERROR
+ ("Cannot change caching policy of pinned buffer\n");
+ return -EINVAL;
+ } else {
+ new_flags &= ~DRM_BO_FLAG_CACHED;
+ }
+ }
+
+ *n_flags = new_flags;
+ *n_mask = new_mask;
+ return 0;
+}
+
+/*
+ * Call dev->struct_mutex locked.
+ */
+
+drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv,
+ uint32_t handle, int check_owner)
+{
+ drm_user_object_t *uo;
+ drm_buffer_object_t *bo;
+
+ uo = drm_lookup_user_object(priv, handle);
+
+ if (!uo || (uo->type != drm_buffer_type)) {
+ DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
+ return NULL;
+ }
+
+ if (check_owner && priv != uo->owner) {
+ if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE))
+ return NULL;
+ }
+
+ bo = drm_user_object_entry(uo, drm_buffer_object_t, base);
+ atomic_inc(&bo->usage);
+ return bo;
+}
+
+/*
+ * Call bo->mutex locked.
+ * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
+ * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
+ */
+
+static int drm_bo_quick_busy(drm_buffer_object_t * bo)
+{
+ drm_fence_object_t *fence = bo->fence;
+
+ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
+ if (fence) {
+ drm_device_t *dev = bo->dev;
+ if (drm_fence_object_signaled(fence, bo->fence_type)) {
+ drm_fence_usage_deref_unlocked(dev, fence);
+ bo->fence = NULL;
+ return 0;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Call bo->mutex locked.
+ * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
+ */
+
+static int drm_bo_busy(drm_buffer_object_t * bo)
+{
+ drm_fence_object_t *fence = bo->fence;
+
+ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
+ if (fence) {
+ drm_device_t *dev = bo->dev;
+ if (drm_fence_object_signaled(fence, bo->fence_type)) {
+ drm_fence_usage_deref_unlocked(dev, fence);
+ bo->fence = NULL;
+ return 0;
+ }
+ drm_fence_object_flush(dev, fence, DRM_FENCE_TYPE_EXE);
+ if (drm_fence_object_signaled(fence, bo->fence_type)) {
+ drm_fence_usage_deref_unlocked(dev, fence);
+ bo->fence = NULL;
+ return 0;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+static int drm_bo_read_cached(drm_buffer_object_t * bo)
+{
+ int ret = 0;
+
+ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
+ if (bo->node_card)
+ ret = drm_bo_evict(bo, DRM_BO_MEM_VRAM, 1, 0);
+ if (ret)
+ return ret;
+ if (bo->node_ttm)
+ ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1, 0);
+ return ret;
+}
+
+/*
+ * Wait until a buffer is unmapped.
+ */
+
+static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait)
+{
+ int ret = 0;
+
+ if ((atomic_read(&bo->mapped) >= 0) && no_wait)
+ return -EBUSY;
+
+ DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
+ atomic_read(&bo->mapped) == -1);
+
+ if (ret == -EINTR)
+ ret = -EAGAIN;
+
+ return ret;
+}
+
+static int drm_bo_check_unfenced(drm_buffer_object_t * bo)
+{
+ int ret;
+
+ mutex_lock(&bo->mutex);
+ ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
+ mutex_unlock(&bo->mutex);
+ return ret;
+}
+
+/*
+ * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
+ * Until then, we cannot really do anything with it except delete it.
+ * The unfenced list is a PITA, and the operations
+ * 1) validating
+ * 2) submitting commands
+ * 3) fencing
+ * Should really be an atomic operation.
+ * We now "solve" this problem by keeping
+ * the buffer "unfenced" after validating, but before fencing.
+ */
+
+static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait,
+ int eagain_if_wait)
+{
+ int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
+ unsigned long _end = jiffies + 3 * DRM_HZ;
+
+ if (ret && no_wait)
+ return -EBUSY;
+ else if (!ret)
+ return 0;
+
+ do {
+ mutex_unlock(&bo->mutex);
+ DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
+ !drm_bo_check_unfenced(bo));
+ mutex_lock(&bo->mutex);
+ if (ret == -EINTR)
+ return -EAGAIN;
+ if (ret) {
+ DRM_ERROR
+ ("Error waiting for buffer to become fenced\n");
+ return ret;
+ }
+ ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
+ } while (ret && !time_after_eq(jiffies, _end));
+ if (ret) {
+ DRM_ERROR("Timeout waiting for buffer to become fenced\n");
+ return ret;
+ }
+ if (eagain_if_wait)
+ return -EAGAIN;
+
+ return 0;
+}
+
+/*
+ * Fill in the ioctl reply argument with buffer info.
+ * Bo locked.
+ */
+
+static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo,
+ drm_bo_arg_reply_t * rep)
+{
+ rep->handle = bo->base.hash.key;
+ rep->flags = bo->flags;
+ rep->size = bo->num_pages * PAGE_SIZE;
+ rep->offset = bo->offset;
+
+ if (bo->ttm_object) {
+ rep->arg_handle = bo->ttm_object->map_list.user_token;
+ } else {
+ rep->arg_handle = 0;
+ }
+
+ rep->mask = bo->mask;
+ rep->buffer_start = bo->buffer_start;
+ rep->fence_flags = bo->fence_type;
+ rep->rep_flags = 0;
+ rep->page_alignment = bo->page_alignment;
+
+ if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
+ DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
+ DRM_BO_REP_BUSY);
+ }
+}
+
+/*
+ * Wait for buffer idle and register that we've mapped the buffer.
+ * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
+ * so that if the client dies, the mapping is automatically
+ * unregistered.
+ */
+
+static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
+ uint32_t map_flags, unsigned hint,
+ drm_bo_arg_reply_t * rep)
+{
+ drm_buffer_object_t *bo;
+ drm_device_t *dev = priv->head->dev;
+ int ret = 0;
+ int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
+
+ mutex_lock(&dev->struct_mutex);
+ bo = drm_lookup_buffer_object(priv, handle, 1);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (!bo)
+ return -EINVAL;
+
+ mutex_lock(&bo->mutex);
+ if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
+ ret = drm_bo_wait_unfenced(bo, no_wait, 0);
+ if (ret)
+ goto out;
+ }
+
+ /*
+ * If this returns true, we are currently unmapped.
+ * We need to do this test, because unmapping can
+ * be done without the bo->mutex held.
+ */
+
+ while (1) {
+ if (atomic_inc_and_test(&bo->mapped)) {
+ if (no_wait && drm_bo_busy(bo)) {
+ atomic_dec(&bo->mapped);
+ ret = -EBUSY;
+ goto out;
+ }
+ ret = drm_bo_wait(bo, 0, 0, no_wait);
+ if (ret) {
+ atomic_dec(&bo->mapped);
+ goto out;
+ }
+
+ if ((map_flags & DRM_BO_FLAG_READ) &&
+ (bo->flags & DRM_BO_FLAG_READ_CACHED) &&
+ (!(bo->flags & DRM_BO_FLAG_CACHED))) {
+ drm_bo_read_cached(bo);
+ }
+ break;
+ } else if ((map_flags & DRM_BO_FLAG_READ) &&
+ (bo->flags & DRM_BO_FLAG_READ_CACHED) &&
+ (!(bo->flags & DRM_BO_FLAG_CACHED))) {
+
+ /*
+ * We are already mapped with different flags.
+ * need to wait for unmap.
+ */
+
+ ret = drm_bo_wait_unmapped(bo, no_wait);
+ if (ret)
+ goto out;
+
+ continue;
+ }
+ break;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ ret = drm_add_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
+ mutex_unlock(&dev->struct_mutex);
+ if (ret) {
+ if (atomic_add_negative(-1, &bo->mapped))
+ DRM_WAKEUP(&bo->event_queue);
+
+ } else
+ drm_bo_fill_rep_arg(bo, rep);
+ out:
+ mutex_unlock(&bo->mutex);
+ drm_bo_usage_deref_unlocked(dev, bo);
+ return ret;
+}
+
+static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle)
+{
+ drm_device_t *dev = priv->head->dev;
+ drm_buffer_object_t *bo;
+ drm_ref_object_t *ro;
+ int ret = 0;
+
+ mutex_lock(&dev->struct_mutex);
+
+ bo = drm_lookup_buffer_object(priv, handle, 1);
+ if (!bo) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ro = drm_lookup_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
+ if (!ro) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ drm_remove_ref_object(priv, ro);
+ drm_bo_usage_deref_locked(dev, bo);
+ out:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+/*
+ * Call struct-sem locked.
+ */
+
+static void drm_buffer_user_object_unmap(drm_file_t * priv,
+ drm_user_object_t * uo,
+ drm_ref_t action)
+{
+ drm_buffer_object_t *bo =
+ drm_user_object_entry(uo, drm_buffer_object_t, base);
+
+ /*
+ * We DON'T want to take the bo->lock here, because we want to
+ * hold it when we wait for unmapped buffer.
+ */
+
+ BUG_ON(action != _DRM_REF_TYPE1);
+
+ if (atomic_add_negative(-1, &bo->mapped))
+ DRM_WAKEUP(&bo->event_queue);
+}
+
+/*
+ * bo->mutex locked.
+ */
+
+static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_flags,
+ int no_wait, int force_no_move)
+{
+ int ret = 0;
+
+ /*
+ * Flush outstanding fences.
+ */
+ drm_bo_busy(bo);
+
+ /*
+ * Make sure we're not mapped.
+ */
+
+ ret = drm_bo_wait_unmapped(bo, no_wait);
+ if (ret)
+ return ret;
+
+ /*
+ * Wait for outstanding fences.
+ */
+
+ ret = drm_bo_wait(bo, 0, 0, no_wait);
+
+ if (ret == -EINTR)
+ return -EAGAIN;
+ if (ret)
+ return ret;
+
+ if (new_flags & DRM_BO_FLAG_MEM_TT) {
+ ret = drm_move_local_to_tt(bo, no_wait);
+ if (ret)
+ return ret;
+ } else {
+ drm_move_tt_to_local(bo, 0, force_no_move);
+ }
+
+ return 0;
+}
+
+/*
+ * bo locked.
+ */
+
+static int drm_buffer_object_validate(drm_buffer_object_t * bo,
+ uint32_t new_flags,
+ int move_unfenced, int no_wait)
+{
+ drm_device_t *dev = bo->dev;
+ drm_buffer_manager_t *bm = &dev->bm;
+ uint32_t flag_diff = (new_flags ^ bo->flags);
+ drm_bo_driver_t *driver = dev->driver->bo_driver;
+
+ int ret;
+
+ if (new_flags & DRM_BO_FLAG_MEM_VRAM) {
+ DRM_ERROR("Vram support not implemented yet\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", new_flags, bo->flags);
+ ret = driver->fence_type(new_flags, &bo->fence_class, &bo->fence_type);
+ if (ret) {
+ DRM_ERROR("Driver did not support given buffer permissions\n");
+ return ret;
+ }
+
+ /*
+ * Move out if we need to change caching policy.
+ */
+
+ if ((flag_diff & DRM_BO_FLAG_BIND_CACHED) &&
+ !(bo->flags & DRM_BO_FLAG_MEM_LOCAL)) {
+ if (bo->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
+ DRM_ERROR("Cannot change caching policy of "
+ "pinned buffer.\n");
+ return -EINVAL;
+ }
+ ret = drm_bo_move_buffer(bo, DRM_BO_FLAG_MEM_LOCAL, no_wait, 0);
+ if (ret) {
+ if (ret != -EAGAIN)
+ DRM_ERROR("Failed moving buffer.\n");
+ return ret;
+ }
+ }
+ DRM_MASK_VAL(bo->flags, DRM_BO_FLAG_BIND_CACHED, new_flags);
+ flag_diff = (new_flags ^ bo->flags);
+
+ /*
+ * Check whether we dropped no_move policy, and in that case,
+ * release reserved manager regions.
+ */
+
+ if ((flag_diff & DRM_BO_FLAG_NO_MOVE) &&
+ !(new_flags & DRM_BO_FLAG_NO_MOVE)) {
+ mutex_lock(&dev->struct_mutex);
+ if (bo->node_ttm) {
+ drm_mm_put_block(bo->node_ttm);
+ bo->node_ttm = NULL;
+ }
+ if (bo->node_card) {
+ drm_mm_put_block(bo->node_card);
+ bo->node_card = NULL;
+ }
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ /*
+ * Check whether we need to move buffer.
+ */
+
+ if ((bo->type != drm_bo_type_fake) && (flag_diff & DRM_BO_MASK_MEM)) {
+ ret = drm_bo_move_buffer(bo, new_flags, no_wait, 1);
+ if (ret) {
+ if (ret != -EAGAIN)
+ DRM_ERROR("Failed moving buffer.\n");
+ return ret;
+ }
+ }
+
+ if (move_unfenced) {
+
+ /*
+ * Place on unfenced list.
+ */
+
+ DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
+ _DRM_BO_FLAG_UNFENCED);
+ mutex_lock(&dev->struct_mutex);
+ list_del(&bo->lru_ttm);
+ list_add_tail(&bo->lru_ttm, &bm->unfenced);
+ list_del_init(&bo->lru_card);
+ mutex_unlock(&dev->struct_mutex);
+ } else {
+
+ mutex_lock(&dev->struct_mutex);
+ list_del_init(&bo->lru_ttm);
+ list_del_init(&bo->lru_card);
+ drm_bo_add_to_lru(bo, bm);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ bo->flags = new_flags;
+ return 0;
+}
+
+static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
+ uint32_t flags, uint32_t mask, uint32_t hint,
+ drm_bo_arg_reply_t * rep)
+{
+ drm_buffer_object_t *bo;
+ drm_device_t *dev = priv->head->dev;
+ int ret;
+ int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
+ uint32_t new_flags;
+
+ bo = drm_lookup_buffer_object(priv, handle, 1);
+ if (!bo) {
+ return -EINVAL;
+ }
+
+ mutex_lock(&bo->mutex);
+ ret = drm_bo_wait_unfenced(bo, no_wait, 0);
+
+ if (ret)
+ goto out;
+
+ ret = drm_bo_new_flags(dev, bo->flags,
+ (flags & mask) | (bo->mask & ~mask), hint,
+ 0, &new_flags, &bo->mask);
+
+ if (ret)
+ goto out;
+
+ ret =
+ drm_buffer_object_validate(bo, new_flags,
+ !(hint & DRM_BO_HINT_DONT_FENCE),
+ no_wait);
+ drm_bo_fill_rep_arg(bo, rep);
+
+ out:
+
+ mutex_unlock(&bo->mutex);
+ drm_bo_usage_deref_unlocked(dev, bo);
+ return ret;
+}
+
+static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle,
+ drm_bo_arg_reply_t * rep)
+{
+ drm_buffer_object_t *bo;
+
+ bo = drm_lookup_buffer_object(priv, handle, 1);
+ if (!bo) {
+ return -EINVAL;
+ }
+ mutex_lock(&bo->mutex);
+ if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
+ (void)drm_bo_busy(bo);
+ drm_bo_fill_rep_arg(bo, rep);
+ mutex_unlock(&bo->mutex);
+ drm_bo_usage_deref_unlocked(bo->dev, bo);
+ return 0;
+}
+
+static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle,
+ uint32_t hint, drm_bo_arg_reply_t * rep)
+{
+ drm_buffer_object_t *bo;
+ int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
+ int ret;
+
+ bo = drm_lookup_buffer_object(priv, handle, 1);
+ if (!bo) {
+ return -EINVAL;
+ }
+
+ mutex_lock(&bo->mutex);
+ ret = drm_bo_wait_unfenced(bo, no_wait, 0);
+ if (ret)
+ goto out;
+ ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
+ if (ret)
+ goto out;
+
+ drm_bo_fill_rep_arg(bo, rep);
+
+ out:
+ mutex_unlock(&bo->mutex);
+ drm_bo_usage_deref_unlocked(bo->dev, bo);
+ return ret;
+}
+
+/*
+ * Call bo->mutex locked.
+ */
+
+static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo)
+{
+ drm_device_t *dev = bo->dev;
+ drm_ttm_object_t *to = NULL;
+ int ret = 0;
+ uint32_t ttm_flags = 0;
+
+ bo->ttm_object = NULL;
+ bo->ttm = NULL;
+
+ switch (bo->type) {
+ case drm_bo_type_dc:
+ mutex_lock(&dev->struct_mutex);
+ ret = drm_ttm_object_create(dev, bo->num_pages * PAGE_SIZE,
+ ttm_flags, &to);
+ mutex_unlock(&dev->struct_mutex);
+ break;
+ case drm_bo_type_user:
+ case drm_bo_type_fake:
+ break;
+ default:
+ DRM_ERROR("Illegal buffer object type\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret) {
+ return ret;
+ }
+
+ if (to) {
+ bo->ttm_object = to;
+ bo->ttm = drm_ttm_from_object(to);
+ }
+ return ret;
+}
+
+int drm_buffer_object_create(drm_file_t * priv,
+ unsigned long size,
+ drm_bo_type_t type,
+ uint32_t mask,
+ uint32_t hint,
+ uint32_t page_alignment,
+ unsigned long buffer_start,
+ drm_buffer_object_t ** buf_obj)
+{
+ drm_device_t *dev = priv->head->dev;
+ drm_buffer_manager_t *bm = &dev->bm;
+ drm_buffer_object_t *bo;
+ int ret = 0;
+ uint32_t new_flags;
+ unsigned long num_pages;
+
+ if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) {
+ DRM_ERROR("Invalid buffer object start.\n");
+ return -EINVAL;
+ }
+ num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ if (num_pages == 0) {
+ DRM_ERROR("Illegal buffer object size.\n");
+ return -EINVAL;
+ }
+
+ bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
+
+ if (!bo)
+ return -ENOMEM;
+
+ mutex_init(&bo->mutex);
+ mutex_lock(&bo->mutex);
+
+ atomic_set(&bo->usage, 1);
+ atomic_set(&bo->mapped, -1);
+ DRM_INIT_WAITQUEUE(&bo->event_queue);
+ INIT_LIST_HEAD(&bo->lru_ttm);
+ INIT_LIST_HEAD(&bo->lru_card);
+ INIT_LIST_HEAD(&bo->ddestroy);
+ bo->dev = dev;
+ bo->type = type;
+ bo->num_pages = num_pages;
+ bo->node_card = NULL;
+ bo->node_ttm = NULL;
+ bo->page_alignment = page_alignment;
+ if (bo->type == drm_bo_type_fake) {
+ bo->offset = buffer_start;
+ bo->buffer_start = 0;
+ } else {
+ bo->buffer_start = buffer_start;
+ }
+ bo->priv_flags = 0;
+ bo->flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
+ atomic_inc(&bm->count);
+ ret = drm_bo_new_flags(dev, bo->flags, mask, hint,
+ 1, &new_flags, &bo->mask);
+ if (ret)
+ goto out_err;
+ ret = drm_bo_add_ttm(priv, bo);
+ if (ret)
+ goto out_err;
+
+ ret = drm_buffer_object_validate(bo, new_flags, 0,
+ hint & DRM_BO_HINT_DONT_BLOCK);
+ if (ret)
+ goto out_err;
+
+ mutex_unlock(&bo->mutex);
+ *buf_obj = bo;
+ return 0;
+
+ out_err:
+ mutex_unlock(&bo->mutex);
+ drm_bo_usage_deref_unlocked(dev, bo);
+ return ret;
+}
+
+static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo,
+ int shareable)
+{
+ drm_device_t *dev = priv->head->dev;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = drm_add_user_object(priv, &bo->base, shareable);
+ if (ret)
+ goto out;
+
+ bo->base.remove = drm_bo_base_deref_locked;
+ bo->base.type = drm_buffer_type;
+ bo->base.ref_struct_locked = NULL;
+ bo->base.unref = drm_buffer_user_object_unmap;
+
+ out:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+static int drm_bo_lock_test(drm_device_t * dev, struct file *filp)
+{
+ LOCK_TEST_WITH_RETURN(dev, filp);
+ return 0;
+}
+
+int drm_bo_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_bo_arg_t arg;
+ drm_bo_arg_request_t *req = &arg.d.req;
+ drm_bo_arg_reply_t rep;
+ unsigned long next;
+ drm_user_object_t *uo;
+ drm_buffer_object_t *entry;
+
+ if (!dev->bm.initialized) {
+ DRM_ERROR("Buffer object manager is not initialized.\n");
+ return -EINVAL;
+ }
+
+ do {
+ DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
+
+ if (arg.handled) {
+ data = arg.next;
+ continue;
+ }
+
+ rep.ret = 0;
+ switch (req->op) {
+ case drm_bo_create:
+ rep.ret =
+ drm_buffer_object_create(priv, req->size,
+ req->type,
+ req->mask,
+ req->hint,
+ req->page_alignment,
+ req->buffer_start, &entry);
+ if (rep.ret)
+ break;
+
+ rep.ret =
+ drm_bo_add_user_object(priv, entry,
+ req->
+ mask &
+ DRM_BO_FLAG_SHAREABLE);
+ if (rep.ret)
+ drm_bo_usage_deref_unlocked(dev, entry);
+
+ if (rep.ret)
+ break;
+
+ mutex_lock(&entry->mutex);
+ drm_bo_fill_rep_arg(entry, &rep);
+ mutex_unlock(&entry->mutex);
+ break;
+ case drm_bo_unmap:
+ rep.ret = drm_buffer_object_unmap(priv, req->handle);
+ break;
+ case drm_bo_map:
+ rep.ret = drm_buffer_object_map(priv, req->handle,
+ req->mask,
+ req->hint, &rep);
+ break;
+ case drm_bo_destroy:
+ mutex_lock(&dev->struct_mutex);
+ uo = drm_lookup_user_object(priv, req->handle);
+ if (!uo || (uo->type != drm_buffer_type)
+ || uo->owner != priv) {
+ mutex_unlock(&dev->struct_mutex);
+ rep.ret = -EINVAL;
+ break;
+ }
+ rep.ret = drm_remove_user_object(priv, uo);
+ mutex_unlock(&dev->struct_mutex);
+ break;
+ case drm_bo_reference:
+ rep.ret = drm_user_object_ref(priv, req->handle,
+ drm_buffer_type, &uo);
+ if (rep.ret)
+ break;
+ mutex_lock(&dev->struct_mutex);
+ uo = drm_lookup_user_object(priv, req->handle);
+ entry =
+ drm_user_object_entry(uo, drm_buffer_object_t,
+ base);
+ atomic_dec(&entry->usage);
+ mutex_unlock(&dev->struct_mutex);
+ mutex_lock(&entry->mutex);
+ drm_bo_fill_rep_arg(entry, &rep);
+ mutex_unlock(&entry->mutex);
+ break;
+ case drm_bo_unreference:
+ rep.ret = drm_user_object_unref(priv, req->handle,
+ drm_buffer_type);
+ break;
+ case drm_bo_validate:
+ rep.ret = drm_bo_lock_test(dev, filp);
+
+ if (rep.ret)
+ break;
+ rep.ret =
+ drm_bo_handle_validate(priv, req->handle, req->mask,
+ req->arg_handle, req->hint,
+ &rep);
+ break;
+ case drm_bo_fence:
+ rep.ret = drm_bo_lock_test(dev, filp);
+ if (rep.ret)
+ break;
+ /**/ break;
+ case drm_bo_info:
+ rep.ret = drm_bo_handle_info(priv, req->handle, &rep);
+ break;
+ case drm_bo_wait_idle:
+ rep.ret = drm_bo_handle_wait(priv, req->handle,
+ req->hint, &rep);
+ break;
+ case drm_bo_ref_fence:
+ rep.ret = -EINVAL;
+ DRM_ERROR("Function is not implemented yet.\n");
+ default:
+ rep.ret = -EINVAL;
+ }
+ next = arg.next;
+
+ /*
+ * A signal interrupted us. Make sure the ioctl is restartable.
+ */
+
+ if (rep.ret == -EAGAIN)
+ return -EAGAIN;
+
+ arg.handled = 1;
+ arg.d.rep = rep;
+ DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
+ data = next;
+ } while (data);
+ return 0;
+}
+
+/*
+ * dev->struct_sem locked.
+ */
+
+static int drm_bo_force_list_clean(drm_device_t * dev,
+ struct list_head *head,
+ unsigned mem_type,
+ int force_no_move, int allow_errors)
+{
+ drm_buffer_manager_t *bm = &dev->bm;
+ struct list_head *list, *next, *prev;
+ drm_buffer_object_t *entry;
+ int ret;
+ int clean;
+
+ retry:
+ clean = 1;
+ list_for_each_safe(list, next, head) {
+ prev = list->prev;
+ entry = drm_bo_entry(list, mem_type);
+ atomic_inc(&entry->usage);
+ mutex_unlock(&dev->struct_mutex);
+ mutex_lock(&entry->mutex);
+ mutex_lock(&dev->struct_mutex);
+
+ if (prev != list->prev || next != list->next) {
+ mutex_unlock(&entry->mutex);
+ drm_bo_usage_deref_locked(dev, entry);
+ goto retry;
+ }
+ if (drm_bo_mm_node(entry, mem_type)) {
+ clean = 0;
+
+ /*
+ * Expire the fence.
+ */
+
+ mutex_unlock(&dev->struct_mutex);
+ if (entry->fence && bm->nice_mode) {
+ unsigned long _end = jiffies + 3 * DRM_HZ;
+ do {
+ ret = drm_bo_wait(entry, 0, 1, 0);
+ if (ret && allow_errors) {
+ if (ret == -EINTR)
+ ret = -EAGAIN;
+ goto out_err;
+ }
+ } while (ret && !time_after_eq(jiffies, _end));
+
+ if (entry->fence) {
+ bm->nice_mode = 0;
+ DRM_ERROR("Detected GPU hang or "
+ "fence manager was taken down. "
+ "Evicting waiting buffers\n");
+ }
+ }
+ if (entry->fence) {
+ drm_fence_usage_deref_unlocked(dev,
+ entry->fence);
+ entry->fence = NULL;
+ }
+
+ DRM_MASK_VAL(entry->priv_flags, _DRM_BO_FLAG_UNFENCED,
+ 0);
+
+ if (force_no_move) {
+ DRM_MASK_VAL(entry->flags, DRM_BO_FLAG_NO_MOVE,
+ 0);
+ }
+ if (entry->flags & DRM_BO_FLAG_NO_EVICT) {
+ DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
+ "cleanup. Removing flag and evicting.\n");
+ entry->flags &= ~DRM_BO_FLAG_NO_EVICT;
+ entry->mask &= ~DRM_BO_FLAG_NO_EVICT;
+ }
+
+ ret = drm_bo_evict(entry, mem_type, 1, force_no_move);
+ if (ret) {
+ if (allow_errors) {
+ goto out_err;
+ } else {
+ DRM_ERROR("Aargh. Eviction failed.\n");
+ }
+ }
+ mutex_lock(&dev->struct_mutex);
+ }
+ mutex_unlock(&entry->mutex);
+ drm_bo_usage_deref_locked(dev, entry);
+ if (prev != list->prev || next != list->next) {
+ goto retry;
+ }
+ }
+ if (!clean)
+ goto retry;
+ return 0;
+ out_err:
+ mutex_unlock(&entry->mutex);
+ drm_bo_usage_deref_unlocked(dev, entry);
+ mutex_lock(&dev->struct_mutex);
+ return ret;
+}
+
+int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type)
+{
+ drm_buffer_manager_t *bm = &dev->bm;
+ int ret = -EINVAL;
+
+ if (mem_type >= DRM_BO_MEM_TYPES) {
+ DRM_ERROR("Illegal memory type %d\n", mem_type);
+ return ret;
+ }
+
+ if (!bm->has_type[mem_type]) {
+ DRM_ERROR("Trying to take down uninitialized "
+ "memory manager type\n");
+ return ret;
+ }
+ bm->use_type[mem_type] = 0;
+ bm->has_type[mem_type] = 0;
+
+ ret = 0;
+ if (mem_type > 0) {
+
+ /*
+ * Throw out unfenced buffers.
+ */
+
+ drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 1, 0);
+
+ /*
+ * Throw out evicted no-move buffers.
+ */
+
+ drm_bo_force_list_clean(dev, &bm->pinned[DRM_BO_MEM_LOCAL],
+ mem_type, 1, 0);
+ drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 1,
+ 0);
+ drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 1,
+ 0);
+
+ if (drm_mm_clean(&bm->manager[mem_type])) {
+ drm_mm_takedown(&bm->manager[mem_type]);
+ } else {
+ ret = -EBUSY;
+ }
+ }
+
+ return ret;
+}
+
+static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type)
+{
+ int ret;
+ drm_buffer_manager_t *bm = &dev->bm;
+
+ if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
+ DRM_ERROR("Illegal memory manager memory type %u,\n", mem_type);
+ return -EINVAL;
+ }
+
+ ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1);
+ if (ret)
+ return ret;
+ ret = drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 0, 1);
+ if (ret)
+ return ret;
+ ret =
+ drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 0, 1);
+ return ret;
+}
+
+static int drm_bo_init_mm(drm_device_t * dev,
+ unsigned type,
+ unsigned long p_offset, unsigned long p_size)
+{
+ drm_buffer_manager_t *bm = &dev->bm;
+ int ret = -EINVAL;
+
+ if (type >= DRM_BO_MEM_TYPES) {
+ DRM_ERROR("Illegal memory type %d\n", type);
+ return ret;
+ }
+ if (bm->has_type[type]) {
+ DRM_ERROR("Memory manager already initialized for type %d\n",
+ type);
+ return ret;
+ }
+
+ ret = 0;
+ if (type != DRM_BO_MEM_LOCAL) {
+ if (!p_size) {
+ DRM_ERROR("Zero size memory manager type %d\n", type);
+ return ret;
+ }
+ ret = drm_mm_init(&bm->manager[type], p_offset, p_size);
+ if (ret)
+ return ret;
+ }
+ bm->has_type[type] = 1;
+ bm->use_type[type] = 1;
+
+ INIT_LIST_HEAD(&bm->lru[type]);
+ INIT_LIST_HEAD(&bm->pinned[type]);
+
+ return 0;
+}
+
+/*
+ * This is called from lastclose, so we don't need to bother about
+ * any clients still running when we set the initialized flag to zero.
+ */
+
+int drm_bo_driver_finish(drm_device_t * dev)
+{
+ drm_buffer_manager_t *bm = &dev->bm;
+ int ret = 0;
+ unsigned i = DRM_BO_MEM_TYPES;
+
+ mutex_lock(&dev->bm.init_mutex);
+ mutex_lock(&dev->struct_mutex);
+
+ if (!bm->initialized)
+ goto out;
+ bm->initialized = 0;
+
+ while (i--) {
+ if (bm->has_type[i]) {
+ bm->use_type[i] = 0;
+ if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
+ ret = -EBUSY;
+ DRM_ERROR("DRM memory manager type %d "
+ "is not clean.\n", i);
+ }
+ bm->has_type[i] = 0;
+ }
+ }
+ mutex_unlock(&dev->struct_mutex);
+ if (!cancel_delayed_work(&bm->wq)) {
+ flush_scheduled_work();
+ }
+ mutex_lock(&dev->struct_mutex);
+ drm_bo_delayed_delete(dev, 1);
+ if (list_empty(&bm->ddestroy)) {
+ DRM_DEBUG("Delayed destroy list was clean\n");
+ }
+ if (list_empty(&bm->lru[0])) {
+ DRM_DEBUG("Swap list was clean\n");
+ }
+ if (list_empty(&bm->pinned[0])) {
+ DRM_DEBUG("NO_MOVE list was clean\n");
+ }
+ if (list_empty(&bm->unfenced)) {
+ DRM_DEBUG("Unfenced list was clean\n");
+ }
+ out:
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->bm.init_mutex);
+ return ret;
+}
+
+int drm_bo_driver_init(drm_device_t * dev)
+{
+ drm_bo_driver_t *driver = dev->driver->bo_driver;
+ drm_buffer_manager_t *bm = &dev->bm;
+ int ret = -EINVAL;
+
+ mutex_lock(&dev->bm.init_mutex);
+ mutex_lock(&dev->struct_mutex);
+ if (!driver)
+ goto out_unlock;
+
+ /*
+ * Initialize the system memory buffer type.
+ * Other types need to be driver / IOCTL initialized.
+ */
+
+ ret = drm_bo_init_mm(dev, 0, 0, 0);
+ if (ret)
+ goto out_unlock;
+
+ INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
+ bm->initialized = 1;
+ bm->nice_mode = 1;
+ atomic_set(&bm->count, 0);
+ bm->cur_pages = 0;
+ INIT_LIST_HEAD(&bm->unfenced);
+ INIT_LIST_HEAD(&bm->ddestroy);
+ out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->bm.init_mutex);
+ return ret;
+}
+
+EXPORT_SYMBOL(drm_bo_driver_init);
+
+int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+
+ int ret = 0;
+ drm_mm_init_arg_t arg;
+ drm_buffer_manager_t *bm = &dev->bm;
+ drm_bo_driver_t *driver = dev->driver->bo_driver;
+
+ if (!driver) {
+ DRM_ERROR("Buffer objects are not supported by this driver\n");
+ return -EINVAL;
+ }
+
+ DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
+
+ switch (arg.req.op) {
+ case mm_init:
+ ret = -EINVAL;
+ mutex_lock(&dev->bm.init_mutex);
+ mutex_lock(&dev->struct_mutex);
+ if (!bm->initialized) {
+ DRM_ERROR("DRM memory manager was not initialized.\n");
+ break;
+ }
+ if (arg.req.mem_type == 0) {
+ DRM_ERROR
+ ("System memory buffers already initialized.\n");
+ break;
+ }
+ ret = drm_bo_init_mm(dev, arg.req.mem_type,
+ arg.req.p_offset, arg.req.p_size);
+ break;
+ case mm_takedown:
+ LOCK_TEST_WITH_RETURN(dev, filp);
+ mutex_lock(&dev->bm.init_mutex);
+ mutex_lock(&dev->struct_mutex);
+ ret = -EINVAL;
+ if (!bm->initialized) {
+ DRM_ERROR("DRM memory manager was not initialized\n");
+ break;
+ }
+ if (arg.req.mem_type == 0) {
+ DRM_ERROR("No takedown for System memory buffers.\n");
+ break;
+ }
+ ret = 0;
+ if (drm_bo_clean_mm(dev, arg.req.mem_type)) {
+ DRM_ERROR("Memory manager type %d not clean. "
+ "Delaying takedown\n", arg.req.mem_type);
+ }
+ break;
+ case mm_lock:
+ LOCK_TEST_WITH_RETURN(dev, filp);
+ mutex_lock(&dev->bm.init_mutex);
+ mutex_lock(&dev->struct_mutex);
+ ret = drm_bo_lock_mm(dev, arg.req.mem_type);
+ break;
+ case mm_unlock:
+ LOCK_TEST_WITH_RETURN(dev, filp);
+ mutex_lock(&dev->bm.init_mutex);
+ mutex_lock(&dev->struct_mutex);
+ ret = 0;
+ break;
+ default:
+ DRM_ERROR("Function not implemented yet\n");
+ return -EINVAL;
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->bm.init_mutex);
+ if (ret)
+ return ret;
+
+ DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
+ return 0;
+}
diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c
index 2eeb401d..d6ebc8d1 100644
--- a/linux-core/drm_bufs.c
+++ b/linux-core/drm_bufs.c
@@ -65,8 +65,8 @@ static drm_map_list_t *drm_find_matching_map(drm_device_t *dev,
return NULL;
}
-int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash,
- unsigned long user_token, int hashed_handle)
+static int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash,
+ unsigned long user_token, int hashed_handle)
{
int use_hashed_handle;
@@ -78,14 +78,16 @@ int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash,
#error Unsupported long size. Neither 64 nor 32 bits.
#endif
- if (use_hashed_handle) {
- return drm_ht_just_insert_please(&dev->map_hash, hash,
- user_token, 32 - PAGE_SHIFT - 3,
- PAGE_SHIFT, DRM_MAP_HASH_OFFSET);
- } else {
- hash->key = user_token;
- return drm_ht_insert_item(&dev->map_hash, hash);
+ if (!use_hashed_handle) {
+ int ret;
+ hash->key = user_token >> PAGE_SHIFT;
+ ret = drm_ht_insert_item(&dev->map_hash, hash);
+ if (ret != -EINVAL)
+ return ret;
}
+ return drm_ht_just_insert_please(&dev->map_hash, hash,
+ user_token, 32 - PAGE_SHIFT - 3,
+ 0, DRM_MAP_HASH_OFFSET >> PAGE_SHIFT);
}
/**
@@ -290,16 +292,16 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
user_token = (map->type == _DRM_SHM) ? (unsigned long) map->handle :
map->offset;
- ret = drm_map_handle(dev, &list->hash, user_token, 0);
+ ret = drm_map_handle(dev, &list->hash, user_token, 0);
if (ret) {
- drm_free(map, sizeof(*map), DRM_MEM_MAPS);
- drm_free(list, sizeof(*list), DRM_MEM_MAPS);
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+ drm_free(list, sizeof(*list), DRM_MEM_MAPS);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
}
- list->user_token = list->hash.key;
+ list->user_token = list->hash.key << PAGE_SHIFT;
mutex_unlock(&dev->struct_mutex);
*maplist = list;
@@ -384,7 +386,8 @@ int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
if (r_list->map == map) {
list_del(list);
- drm_ht_remove_key(&dev->map_hash, r_list->user_token);
+ drm_ht_remove_key(&dev->map_hash,
+ r_list->user_token >> PAGE_SHIFT);
drm_free(list, sizeof(*list), DRM_MEM_MAPS);
break;
}
@@ -420,6 +423,8 @@ int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
dmah.size = map->size;
__drm_pci_free(dev, &dmah);
break;
+ case _DRM_TTM:
+ BUG_ON(1);
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
@@ -940,6 +945,9 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
request->count = entry->buf_count;
request->size = size;
+ if (request->flags & _DRM_PCI_BUFFER_RO)
+ dma->flags = _DRM_DMA_USE_PCI_RO;
+
atomic_dec(&dev->buf_alloc);
return 0;
@@ -1526,9 +1534,10 @@ int drm_freebufs(struct inode *inode, struct file *filp,
* \param arg pointer to a drm_buf_map structure.
* \return zero on success or a negative number on failure.
*
- * Maps the AGP or SG buffer region with do_mmap(), and copies information
- * about each buffer into user space. The PCI buffers are already mapped on the
- * addbufs_pci() call.
+ * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
+ * about each buffer into user space. For PCI buffers, it calls do_mmap() with
+ * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
+ * drm_mmap_dma().
*/
int drm_mapbufs(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c
new file mode 100644
index 00000000..b466f8bd
--- /dev/null
+++ b/linux-core/drm_compat.c
@@ -0,0 +1,434 @@
+/**************************************************************************
+ *
+ * This kernel module is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ **************************************************************************/
+/*
+ * This code provides access to unexported mm kernel features. It is necessary
+ * to use the new DRM memory manager code with kernels that don't support it
+ * directly.
+ *
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ * Linux kernel mm subsystem authors.
+ * (Most code taken from there).
+ */
+
+#include "drmP.h"
+
+#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+
+/*
+ * These have bad performance in the AGP module for the indicated kernel versions.
+ */
+
+int drm_map_page_into_agp(struct page *page)
+{
+ int i;
+ i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
+ /* Caller's responsibility to call global_flush_tlb() for
+ * performance reasons */
+ return i;
+}
+
+int drm_unmap_page_from_agp(struct page *page)
+{
+ int i;
+ i = change_page_attr(page, 1, PAGE_KERNEL);
+ /* Caller's responsibility to call global_flush_tlb() for
+ * performance reasons */
+ return i;
+}
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
+
+/*
+ * The protection map was exported in 2.6.19
+ */
+
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+#ifdef MODULE
+ static pgprot_t drm_protection_map[16] = {
+ __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
+ __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
+ };
+
+ return drm_protection_map[vm_flags & 0x0F];
+#else
+ extern pgprot_t protection_map[];
+ return protection_map[vm_flags & 0x0F];
+#endif
+};
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+
+/*
+ * vm code for kernels below 2,6,15 in which version a major vm write
+ * occured. This implement a simple straightforward
+ * version similar to what's going to be
+ * in kernel 2.6.20+?
+ */
+
+static int drm_pte_is_clear(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ int ret = 1;
+ pte_t *pte;
+ pmd_t *pmd;
+ pud_t *pud;
+ pgd_t *pgd;
+
+
+ spin_lock(&mm->page_table_lock);
+ pgd = pgd_offset(mm, addr);
+ if (pgd_none(*pgd))
+ goto unlock;
+ pud = pud_offset(pgd, addr);
+ if (pud_none(*pud))
+ goto unlock;
+ pmd = pmd_offset(pud, addr);
+ if (pmd_none(*pmd))
+ goto unlock;
+ pte = pte_offset_map(pmd, addr);
+ if (!pte)
+ goto unlock;
+ ret = pte_none(*pte);
+ pte_unmap(pte);
+ unlock:
+ spin_unlock(&mm->page_table_lock);
+ return ret;
+}
+
+int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, pgprot_t pgprot)
+{
+ int ret;
+ if (!drm_pte_is_clear(vma, addr))
+ return -EBUSY;
+
+ ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, pgprot);
+ return ret;
+}
+
+static struct {
+ spinlock_t lock;
+ struct page *dummy_page;
+ atomic_t present;
+} drm_np_retry =
+{SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
+
+struct page * get_nopage_retry(void)
+{
+ if (atomic_read(&drm_np_retry.present) == 0) {
+ struct page *page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return NOPAGE_OOM;
+ spin_lock(&drm_np_retry.lock);
+ drm_np_retry.dummy_page = page;
+ atomic_set(&drm_np_retry.present,1);
+ spin_unlock(&drm_np_retry.lock);
+ }
+ get_page(drm_np_retry.dummy_page);
+ return drm_np_retry.dummy_page;
+}
+
+void free_nopage_retry(void)
+{
+ if (atomic_read(&drm_np_retry.present) == 1) {
+ spin_lock(&drm_np_retry.lock);
+ __free_page(drm_np_retry.dummy_page);
+ drm_np_retry.dummy_page = NULL;
+ atomic_set(&drm_np_retry.present, 0);
+ spin_unlock(&drm_np_retry.lock);
+ }
+}
+
+struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
+ unsigned long address,
+ int *type)
+{
+ struct fault_data data;
+
+ if (type)
+ *type = VM_FAULT_MINOR;
+
+ data.address = address;
+ data.vma = vma;
+ drm_vm_ttm_fault(vma, &data);
+ switch (data.type) {
+ case VM_FAULT_OOM:
+ return NOPAGE_OOM;
+ case VM_FAULT_SIGBUS:
+ return NOPAGE_SIGBUS;
+ default:
+ break;
+ }
+
+ return NOPAGE_REFAULT;
+}
+
+#endif
+
+#ifdef DRM_ODD_MM_COMPAT
+
+/*
+ * VM compatibility code for 2.6.15-2.6.19(?). This code implements a complicated
+ * workaround for a single BUG statement in do_no_page in these versions. The
+ * tricky thing is that we need to take the mmap_sem in exclusive mode for _all_
+ * vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to
+ * check first take the dev->struct_mutex, and then trylock all mmap_sems. If this
+ * fails for a single mmap_sem, we have to release all sems and the dev->struct_mutex,
+ * release the cpu and retry. We also need to keep track of all vmas mapping the ttm.
+ * phew.
+ */
+
+typedef struct p_mm_entry {
+ struct list_head head;
+ struct mm_struct *mm;
+ atomic_t refcount;
+ int locked;
+} p_mm_entry_t;
+
+typedef struct vma_entry {
+ struct list_head head;
+ struct vm_area_struct *vma;
+} vma_entry_t;
+
+
+struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
+ unsigned long address,
+ int *type)
+{
+ drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
+ unsigned long page_offset;
+ struct page *page;
+ drm_ttm_t *ttm;
+ drm_buffer_manager_t *bm;
+ drm_device_t *dev;
+
+ /*
+ * FIXME: Check can't map aperture flag.
+ */
+
+ if (type)
+ *type = VM_FAULT_MINOR;
+
+ if (!map)
+ return NOPAGE_OOM;
+
+ if (address > vma->vm_end)
+ return NOPAGE_SIGBUS;
+
+ ttm = (drm_ttm_t *) map->offset;
+ dev = ttm->dev;
+ mutex_lock(&dev->struct_mutex);
+ drm_fixup_ttm_caching(ttm);
+ BUG_ON(ttm->page_flags & DRM_TTM_PAGE_UNCACHED);
+
+ bm = &dev->bm;
+ page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
+ page = ttm->pages[page_offset];
+
+ if (!page) {
+ if (drm_alloc_memctl(PAGE_SIZE)) {
+ page = NOPAGE_OOM;
+ goto out;
+ }
+ page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0);
+ if (!page) {
+ drm_free_memctl(PAGE_SIZE);
+ page = NOPAGE_OOM;
+ goto out;
+ }
+ ++bm->cur_pages;
+ SetPageLocked(page);
+ }
+
+ get_page(page);
+ out:
+ mutex_unlock(&dev->struct_mutex);
+ return page;
+}
+
+
+
+
+int drm_ttm_map_bound(struct vm_area_struct *vma)
+{
+ drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data;
+ drm_ttm_t *ttm = (drm_ttm_t *) map->offset;
+ int ret = 0;
+
+ if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) {
+ unsigned long pfn = ttm->aper_offset +
+ (ttm->be->aperture_base >> PAGE_SHIFT);
+ pgprot_t pgprot = drm_io_prot(ttm->be->drm_map_type, vma);
+
+ ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
+ vma->vm_end - vma->vm_start,
+ pgprot);
+ }
+ return ret;
+}
+
+
+int drm_ttm_add_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
+{
+ p_mm_entry_t *entry, *n_entry;
+ vma_entry_t *v_entry;
+ drm_local_map_t *map = (drm_local_map_t *)
+ vma->vm_private_data;
+ struct mm_struct *mm = vma->vm_mm;
+
+ v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_TTM);
+ if (!v_entry) {
+ DRM_ERROR("Allocation of vma pointer entry failed\n");
+ return -ENOMEM;
+ }
+ v_entry->vma = vma;
+ map->handle = (void *) v_entry;
+ list_add_tail(&v_entry->head, &ttm->vma_list);
+
+ list_for_each_entry(entry, &ttm->p_mm_list, head) {
+ if (mm == entry->mm) {
+ atomic_inc(&entry->refcount);
+ return 0;
+ } else if ((unsigned long)mm < (unsigned long)entry->mm) ;
+ }
+
+ n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_TTM);
+ if (!n_entry) {
+ DRM_ERROR("Allocation of process mm pointer entry failed\n");
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&n_entry->head);
+ n_entry->mm = mm;
+ n_entry->locked = 0;
+ atomic_set(&n_entry->refcount, 0);
+ list_add_tail(&n_entry->head, &entry->head);
+
+ return 0;
+}
+
+void drm_ttm_delete_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
+{
+ p_mm_entry_t *entry, *n;
+ vma_entry_t *v_entry, *v_n;
+ int found = 0;
+ struct mm_struct *mm = vma->vm_mm;
+
+ list_for_each_entry_safe(v_entry, v_n, &ttm->vma_list, head) {
+ if (v_entry->vma == vma) {
+ found = 1;
+ list_del(&v_entry->head);
+ drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_TTM);
+ break;
+ }
+ }
+ BUG_ON(!found);
+
+ list_for_each_entry_safe(entry, n, &ttm->p_mm_list, head) {
+ if (mm == entry->mm) {
+ if (atomic_add_negative(-1, &entry->refcount)) {
+ list_del(&entry->head);
+ BUG_ON(entry->locked);
+ drm_ctl_free(entry, sizeof(*entry), DRM_MEM_TTM);
+ }
+ return;
+ }
+ }
+ BUG_ON(1);
+}
+
+
+
+int drm_ttm_lock_mm(drm_ttm_t * ttm)
+{
+ p_mm_entry_t *entry;
+ int lock_ok = 1;
+
+ list_for_each_entry(entry, &ttm->p_mm_list, head) {
+ BUG_ON(entry->locked);
+ if (!down_write_trylock(&entry->mm->mmap_sem)) {
+ lock_ok = 0;
+ break;
+ }
+ entry->locked = 1;
+ }
+
+ if (lock_ok)
+ return 0;
+
+ list_for_each_entry(entry, &ttm->p_mm_list, head) {
+ if (!entry->locked)
+ break;
+ up_write(&entry->mm->mmap_sem);
+ entry->locked = 0;
+ }
+
+ /*
+ * Possible deadlock. Try again. Our callers should handle this
+ * and restart.
+ */
+
+ return -EAGAIN;
+}
+
+void drm_ttm_unlock_mm(drm_ttm_t * ttm)
+{
+ p_mm_entry_t *entry;
+
+ list_for_each_entry(entry, &ttm->p_mm_list, head) {
+ BUG_ON(!entry->locked);
+ up_write(&entry->mm->mmap_sem);
+ entry->locked = 0;
+ }
+}
+
+int drm_ttm_remap_bound(drm_ttm_t *ttm)
+{
+ vma_entry_t *v_entry;
+ int ret = 0;
+
+ list_for_each_entry(v_entry, &ttm->vma_list, head) {
+ ret = drm_ttm_map_bound(v_entry->vma);
+ if (ret)
+ break;
+ }
+
+ drm_ttm_unlock_mm(ttm);
+ return ret;
+}
+
+void drm_ttm_finish_unmap(drm_ttm_t *ttm)
+{
+ vma_entry_t *v_entry;
+
+ if (!(ttm->page_flags & DRM_TTM_PAGE_UNCACHED))
+ return;
+
+ list_for_each_entry(v_entry, &ttm->vma_list, head) {
+ v_entry->vma->vm_flags &= ~VM_PFNMAP;
+ }
+ drm_ttm_unlock_mm(ttm);
+}
+
+#endif
+
diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h
index 407853d7..a1a94399 100644
--- a/linux-core/drm_compat.h
+++ b/linux-core/drm_compat.h
@@ -31,6 +31,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <asm/agp.h>
#ifndef _DRM_COMPAT_H_
#define _DRM_COMPAT_H_
@@ -227,4 +228,152 @@ static inline int remap_pfn_range(struct vm_area_struct *vma, unsigned long from
}
#endif
+#include <linux/mm.h>
+#include <asm/page.h>
+
+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) && \
+ (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)))
+#define DRM_ODD_MM_COMPAT
+#endif
+
+
+
+/*
+ * Flush relevant caches and clear a VMA structure so that page references
+ * will cause a page fault. Don't flush tlbs.
+ */
+
+extern void drm_clear_vma(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end);
+
+/*
+ * Return the PTE protection map entries for the VMA flags given by
+ * flags. This is a functional interface to the kernel's protection map.
+ */
+
+extern pgprot_t vm_get_page_prot(unsigned long vm_flags);
+
+/*
+ * These are similar to the current kernel gatt pages allocator, only that we
+ * want a struct page pointer instead of a virtual address. This allows for pages
+ * that are not in the kernel linear map.
+ */
+
+#define drm_alloc_gatt_pages(order) ({ \
+ void *_virt = alloc_gatt_pages(order); \
+ ((_virt) ? virt_to_page(_virt) : NULL);})
+#define drm_free_gatt_pages(pages, order) free_gatt_pages(page_address(pages), order)
+
+#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+
+/*
+ * These are too slow in earlier kernels.
+ */
+
+extern int drm_unmap_page_from_agp(struct page *page);
+extern int drm_map_page_into_agp(struct page *page);
+
+#define map_page_into_agp drm_map_page_into_agp
+#define unmap_page_from_agp drm_unmap_page_from_agp
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+extern struct page *get_nopage_retry(void);
+extern void free_nopage_retry(void);
+struct fault_data;
+extern struct page *drm_vm_ttm_fault(struct vm_area_struct *vma,
+ struct fault_data *data);
+
+#define NOPAGE_REFAULT get_nopage_retry()
+#endif
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
+
+/*
+ * Hopefully, real NOPAGE_RETRY functionality will be in 2.6.19.
+ * For now, just return a dummy page that we've allocated out of
+ * static space. The page will be put by do_nopage() since we've already
+ * filled out the pte.
+ */
+
+struct fault_data {
+ struct vm_area_struct *vma;
+ unsigned long address;
+ pgoff_t pgoff;
+ unsigned int flags;
+
+ int type;
+};
+
+
+extern int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, pgprot_t pgprot);
+
+extern struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
+ unsigned long address,
+ int *type);
+
+#endif
+
+#ifdef DRM_ODD_MM_COMPAT
+
+struct drm_ttm;
+
+
+/*
+ * Add a vma to the ttm vma list, and the
+ * process mm pointer to the ttm mm list. Needs the ttm mutex.
+ */
+
+extern int drm_ttm_add_vma(struct drm_ttm * ttm,
+ struct vm_area_struct *vma);
+/*
+ * Delete a vma and the corresponding mm pointer from the
+ * ttm lists. Needs the ttm mutex.
+ */
+extern void drm_ttm_delete_vma(struct drm_ttm * ttm,
+ struct vm_area_struct *vma);
+
+/*
+ * Attempts to lock all relevant mmap_sems for a ttm, while
+ * not releasing the ttm mutex. May return -EAGAIN to avoid
+ * deadlocks. In that case the caller shall release the ttm mutex,
+ * schedule() and try again.
+ */
+
+extern int drm_ttm_lock_mm(struct drm_ttm * ttm);
+
+/*
+ * Unlock all relevant mmap_sems for a ttm.
+ */
+extern void drm_ttm_unlock_mm(struct drm_ttm * ttm);
+
+/*
+ * If the ttm was bound to the aperture, this function shall be called
+ * with all relevant mmap sems held. It deletes the flag VM_PFNMAP from all
+ * vmas mapping this ttm. This is needed just after unmapping the ptes of
+ * the vma, otherwise the do_nopage() function will bug :(. The function
+ * releases the mmap_sems for this ttm.
+ */
+
+extern void drm_ttm_finish_unmap(struct drm_ttm *ttm);
+
+/*
+ * Remap all vmas of this ttm using io_remap_pfn_range. We cannot
+ * fault these pfns in, because the first one will set the vma VM_PFNMAP
+ * flag, which will make the next fault bug in do_nopage(). The function
+ * releases the mmap_sems for this ttm.
+ */
+
+extern int drm_ttm_remap_bound(struct drm_ttm *ttm);
+
+
+/*
+ * Remap a vma for a bound ttm. Call with the ttm mutex held and
+ * the relevant mmap_sem locked.
+ */
+extern int drm_ttm_map_bound(struct vm_area_struct *vma);
+
+#endif
#endif
diff --git a/linux-core/drm_context.c b/linux-core/drm_context.c
index 95581b53..49042272 100644
--- a/linux-core/drm_context.c
+++ b/linux-core/drm_context.c
@@ -53,7 +53,7 @@
* \param ctx_handle context handle.
*
* Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
- * in drm_device::context_sareas, while holding the drm_device::struct_sem
+ * in drm_device::context_sareas, while holding the drm_device::struct_mutex
* lock.
*/
void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle)
@@ -83,7 +83,7 @@ void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle)
*
* Find the first zero bit in drm_device::ctx_bitmap and (re)allocates
* drm_device::context_sareas to accommodate the new entry while holding the
- * drm_device::struct_sem lock.
+ * drm_device::struct_mutex lock.
*/
static int drm_ctxbitmap_next(drm_device_t * dev)
{
@@ -145,7 +145,7 @@ static int drm_ctxbitmap_next(drm_device_t * dev)
* \param dev DRM device.
*
* Allocates and initialize drm_device::ctx_bitmap and drm_device::context_sareas, while holding
- * the drm_device::struct_sem lock.
+ * the drm_device::struct_mutex lock.
*/
int drm_ctxbitmap_init(drm_device_t * dev)
{
@@ -178,7 +178,7 @@ int drm_ctxbitmap_init(drm_device_t * dev)
* \param dev DRM device.
*
* Frees drm_device::ctx_bitmap and drm_device::context_sareas, while holding
- * the drm_device::struct_sem lock.
+ * the drm_device::struct_mutex lock.
*/
void drm_ctxbitmap_cleanup(drm_device_t * dev)
{
diff --git a/linux-core/drm_core.h b/linux-core/drm_core.h
index f5405fdf..705bbff7 100644
--- a/linux-core/drm_core.h
+++ b/linux-core/drm_core.h
@@ -25,11 +25,11 @@
#define CORE_NAME "drm"
#define CORE_DESC "DRM shared core routines"
-#define CORE_DATE "20051102"
+#define CORE_DATE "20060810"
#define DRM_IF_MAJOR 1
-#define DRM_IF_MINOR 2
+#define DRM_IF_MINOR 3
#define CORE_MAJOR 1
-#define CORE_MINOR 0
-#define CORE_PATCHLEVEL 1
+#define CORE_MINOR 1
+#define CORE_PATCHLEVEL 0
diff --git a/linux-core/drm_drawable.c b/linux-core/drm_drawable.c
deleted file mode 100644
index 7857453c..00000000
--- a/linux-core/drm_drawable.c
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * \file drm_drawable.c
- * IOCTLs for drawables
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-
-/** No-op. */
-int drm_adddraw(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
-{
- drm_draw_t draw;
-
- draw.handle = 0; /* NOOP */
- DRM_DEBUG("%d\n", draw.handle);
- if (copy_to_user((drm_draw_t __user *) arg, &draw, sizeof(draw)))
- return -EFAULT;
- return 0;
-}
-
-/** No-op. */
-int drm_rmdraw(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
-{
- return 0; /* NOOP */
-}
diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c
index 9712170b..518e2aa3 100644
--- a/linux-core/drm_drv.c
+++ b/linux-core/drm_drv.c
@@ -50,7 +50,7 @@
#include "drmP.h"
#include "drm_core.h"
-static void __exit drm_cleanup(drm_device_t * dev);
+static void drm_cleanup(drm_device_t * dev);
int drm_fb_loaded = 0;
static int drm_version(struct inode *inode, struct file *filp,
@@ -119,9 +119,16 @@ static drm_ioctl_desc_t drm_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0},
+ [DRM_IOCTL_NR(DRM_IOCTL_FENCE)] = {drm_fence_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_BUFOBJ)] = {drm_bo_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_IOCTL_MM_INIT)] = {drm_mm_init_ioctl,
+ DRM_AUTH },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW)] = {drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
};
-#define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( drm_ioctls )
+#define DRIVER_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
+
/**
* Take down the DRM device.
@@ -141,6 +148,11 @@ int drm_lastclose(drm_device_t * dev)
DRM_DEBUG("\n");
+ if (drm_bo_driver_finish(dev)) {
+ DRM_ERROR("DRM memory manager still busy. "
+ "System is unstable. Please reboot.\n");
+ }
+
if (dev->driver->lastclose)
dev->driver->lastclose(dev);
DRM_DEBUG("driver lastclose completed\n");
@@ -154,6 +166,18 @@ int drm_lastclose(drm_device_t * dev)
if (dev->irq_enabled)
drm_irq_uninstall(dev);
+ /* Free drawable information memory */
+ for (i = 0; i < dev->drw_bitfield_length / sizeof(*dev->drw_bitfield);
+ i++) {
+ drm_drawable_info_t *info = drm_get_drawable_info(dev, i);
+
+ if (info) {
+ drm_free(info->rects, info->num_rects *
+ sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
+ drm_free(info, sizeof(*info), DRM_MEM_BUFS);
+ }
+ }
+
mutex_lock(&dev->struct_mutex);
del_timer(&dev->timer);
@@ -204,7 +228,7 @@ int drm_lastclose(drm_device_t * dev)
if (dev->vmalist) {
for (vma = dev->vmalist; vma; vma = vma_next) {
vma_next = vma->next;
- drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
+ drm_ctl_free(vma, sizeof(*vma), DRM_MEM_VMAS);
}
dev->vmalist = NULL;
}
@@ -242,6 +266,7 @@ int drm_lastclose(drm_device_t * dev)
dev->lock.filp = NULL;
wake_up_interruptible(&dev->lock.lock_queue);
}
+ dev->dev_mapping = NULL;
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG("lastclose completed\n");
@@ -336,7 +361,7 @@ EXPORT_SYMBOL(drm_init);
*
* \sa drm_init
*/
-static void __exit drm_cleanup(drm_device_t * dev)
+static void drm_cleanup(drm_device_t * dev)
{
DRM_DEBUG("\n");
@@ -346,11 +371,14 @@ static void __exit drm_cleanup(drm_device_t * dev)
}
drm_lastclose(dev);
+ drm_fence_manager_takedown(dev);
if (dev->maplist) {
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
dev->maplist = NULL;
drm_ht_remove(&dev->map_hash);
+ drm_mm_takedown(&dev->offset_manager);
+ drm_ht_remove(&dev->object_hash);
}
if (!drm_fb_loaded)
@@ -379,7 +407,7 @@ static void __exit drm_cleanup(drm_device_t * dev)
DRM_ERROR("Cannot unload module\n");
}
-void __exit drm_exit(struct drm_driver *driver)
+void drm_exit(struct drm_driver *driver)
{
int i;
drm_device_t *dev = NULL;
@@ -405,6 +433,9 @@ void __exit drm_exit(struct drm_driver *driver)
}
} else
pci_unregister_driver(&driver->pci_driver);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+ free_nopage_retry();
+#endif
DRM_INFO("Module unloaded\n");
}
EXPORT_SYMBOL(drm_exit);
@@ -415,10 +446,64 @@ static struct file_operations drm_stub_fops = {
.open = drm_stub_open
};
+static int drm_create_memory_caches(void)
+{
+ drm_cache.mm = kmem_cache_create("drm_mm_node_t",
+ sizeof(drm_mm_node_t),
+ 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL,NULL);
+ if (!drm_cache.mm)
+ return -ENOMEM;
+
+ drm_cache.fence_object= kmem_cache_create("drm_fence_object_t",
+ sizeof(drm_fence_object_t),
+ 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL,NULL);
+ if (!drm_cache.fence_object)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void drm_free_mem_cache(kmem_cache_t *cache,
+ const char *name)
+{
+ if (!cache)
+ return;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
+ if (kmem_cache_destroy(cache)) {
+ DRM_ERROR("Warning! DRM is leaking %s memory.\n",
+ name);
+ }
+#else
+ kmem_cache_destroy(cache);
+#endif
+}
+
+static void drm_free_memory_caches(void )
+{
+
+ drm_free_mem_cache(drm_cache.fence_object, "fence object");
+ drm_cache.fence_object = NULL;
+ drm_free_mem_cache(drm_cache.mm, "memory manager block");
+ drm_cache.mm = NULL;
+}
+
+
static int __init drm_core_init(void)
{
- int ret = -ENOMEM;
+ int ret;
+ struct sysinfo si;
+
+ si_meminfo(&si);
+ drm_init_memctl(si.totalram/2, si.totalram*3/4);
+ ret = drm_create_memory_caches();
+ if (ret)
+ goto err_p1;
+ ret = -ENOMEM;
drm_cards_limit =
(drm_cards_limit < DRM_MAX_MINOR + 1 ? drm_cards_limit : DRM_MAX_MINOR + 1);
drm_heads = drm_calloc(drm_cards_limit, sizeof(*drm_heads), DRM_MEM_STUB);
@@ -454,11 +539,13 @@ err_p2:
unregister_chrdev(DRM_MAJOR, "drm");
drm_free(drm_heads, sizeof(*drm_heads) * drm_cards_limit, DRM_MEM_STUB);
err_p1:
+ drm_free_memory_caches();
return ret;
}
static void __exit drm_core_exit(void)
{
+ drm_free_memory_caches();
remove_proc_entry("dri", NULL);
drm_sysfs_destroy(drm_class);
@@ -535,14 +622,19 @@ int drm_ioctl(struct inode *inode, struct file *filp,
current->pid, cmd, nr, (long)old_encode_dev(priv->head->device),
priv->authenticated);
- if (nr < DRIVER_IOCTL_COUNT)
+ if (nr >= DRIVER_IOCTL_COUNT &&
+ (nr < DRM_COMMAND_BASE || nr >= DRM_COMMAND_END))
+ goto err_i1;
+ if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
+ && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
+ ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
+ else if (nr >= DRM_COMMAND_END || nr < DRM_COMMAND_BASE)
ioctl = &drm_ioctls[nr];
- else if ((nr >= DRM_COMMAND_BASE)
- && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
- ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
- else
+ else
goto err_i1;
+
+
func = ioctl->func;
if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl) /* Local override? */
func = dev->driver->dma_ioctl;
diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c
new file mode 100644
index 00000000..f656340e
--- /dev/null
+++ b/linux-core/drm_fence.c
@@ -0,0 +1,619 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+
+/*
+ * Typically called by the IRQ handler.
+ */
+
+void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type)
+{
+ int wake = 0;
+ uint32_t diff;
+ uint32_t relevant;
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_driver_t *driver = dev->driver->fence_driver;
+ struct list_head *list, *prev;
+ drm_fence_object_t *fence;
+ int found = 0;
+
+ if (list_empty(&fm->ring))
+ return;
+
+ list_for_each_entry(fence, &fm->ring, ring) {
+ diff = (sequence - fence->sequence) & driver->sequence_mask;
+ if (diff > driver->wrap_diff) {
+ found = 1;
+ break;
+ }
+ }
+
+ list = (found) ? fence->ring.prev : fm->ring.prev;
+ prev = list->prev;
+
+ for (; list != &fm->ring; list = prev, prev = list->prev) {
+ fence = list_entry(list, drm_fence_object_t, ring);
+
+ type |= fence->native_type;
+ relevant = type & fence->type;
+
+ if ((fence->signaled | relevant) != fence->signaled) {
+ fence->signaled |= relevant;
+ DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
+ fence->base.hash.key, fence->signaled);
+ fence->submitted_flush |= relevant;
+ wake = 1;
+ }
+
+ relevant = fence->flush_mask &
+ ~(fence->signaled | fence->submitted_flush);
+
+ if (relevant) {
+ fm->pending_flush |= relevant;
+ fence->submitted_flush = fence->flush_mask;
+ }
+
+ if (!(fence->type & ~fence->signaled)) {
+ DRM_DEBUG("Fence completely signaled 0x%08lx\n",
+ fence->base.hash.key);
+ list_del_init(&fence->ring);
+ }
+
+ }
+
+ if (wake) {
+ DRM_WAKEUP(&fm->fence_queue);
+ }
+}
+
+EXPORT_SYMBOL(drm_fence_handler);
+
+static void drm_fence_unring(drm_device_t * dev, struct list_head *ring)
+{
+ drm_fence_manager_t *fm = &dev->fm;
+ unsigned long flags;
+
+ write_lock_irqsave(&fm->lock, flags);
+ list_del_init(ring);
+ write_unlock_irqrestore(&fm->lock, flags);
+}
+
+void drm_fence_usage_deref_locked(drm_device_t * dev,
+ drm_fence_object_t * fence)
+{
+ drm_fence_manager_t *fm = &dev->fm;
+
+ if (atomic_dec_and_test(&fence->usage)) {
+ drm_fence_unring(dev, &fence->ring);
+ DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
+ fence->base.hash.key);
+ atomic_dec(&fm->count);
+ drm_ctl_cache_free(drm_cache.fence_object, sizeof(*fence),
+ fence);
+ }
+}
+
+void drm_fence_usage_deref_unlocked(drm_device_t * dev,
+ drm_fence_object_t * fence)
+{
+ drm_fence_manager_t *fm = &dev->fm;
+
+ if (atomic_dec_and_test(&fence->usage)) {
+ mutex_lock(&dev->struct_mutex);
+ if (atomic_read(&fence->usage) == 0) {
+ drm_fence_unring(dev, &fence->ring);
+ atomic_dec(&fm->count);
+ drm_ctl_cache_free(drm_cache.fence_object,
+ sizeof(*fence), fence);
+ }
+ mutex_unlock(&dev->struct_mutex);
+ }
+}
+
+static void drm_fence_object_destroy(drm_file_t * priv,
+ drm_user_object_t * base)
+{
+ drm_device_t *dev = priv->head->dev;
+ drm_fence_object_t *fence =
+ drm_user_object_entry(base, drm_fence_object_t, base);
+
+ drm_fence_usage_deref_locked(dev, fence);
+}
+
+static int fence_signaled(drm_device_t * dev, volatile
+ drm_fence_object_t * fence,
+ uint32_t mask, int poke_flush)
+{
+ unsigned long flags;
+ int signaled;
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_driver_t *driver = dev->driver->fence_driver;
+
+ if (poke_flush)
+ driver->poke_flush(dev);
+ read_lock_irqsave(&fm->lock, flags);
+ signaled =
+ (fence->type & mask & fence->signaled) == (fence->type & mask);
+ read_unlock_irqrestore(&fm->lock, flags);
+
+ return signaled;
+}
+
+static void drm_fence_flush_exe(drm_fence_manager_t * fm,
+ drm_fence_driver_t * driver, uint32_t sequence)
+{
+ uint32_t diff;
+
+ if (!fm->pending_exe_flush) {
+ volatile struct list_head *list;
+
+ /*
+ * Last_exe_flush is invalid. Find oldest sequence.
+ */
+
+/* list = fm->fence_types[_DRM_FENCE_TYPE_EXE];*/
+ list = &fm->ring;
+ if (list->next == &fm->ring) {
+ return;
+ } else {
+ drm_fence_object_t *fence =
+ list_entry(list->next, drm_fence_object_t, ring);
+ fm->last_exe_flush = (fence->sequence - 1) &
+ driver->sequence_mask;
+ }
+ diff = (sequence - fm->last_exe_flush) & driver->sequence_mask;
+ if (diff >= driver->wrap_diff)
+ return;
+ fm->exe_flush_sequence = sequence;
+ fm->pending_exe_flush = 1;
+ } else {
+ diff =
+ (sequence - fm->exe_flush_sequence) & driver->sequence_mask;
+ if (diff < driver->wrap_diff) {
+ fm->exe_flush_sequence = sequence;
+ }
+ }
+}
+
+int drm_fence_object_signaled(volatile drm_fence_object_t * fence,
+ uint32_t type)
+{
+ return ((fence->signaled & type) == type);
+}
+
+int drm_fence_object_flush(drm_device_t * dev,
+ volatile drm_fence_object_t * fence, uint32_t type)
+{
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_driver_t *driver = dev->driver->fence_driver;
+ unsigned long flags;
+
+ if (type & ~fence->type) {
+ DRM_ERROR("Flush trying to extend fence type, "
+ "0x%x, 0x%x\n", type, fence->type);
+ return -EINVAL;
+ }
+
+ write_lock_irqsave(&fm->lock, flags);
+ fence->flush_mask |= type;
+ if (fence->submitted_flush == fence->signaled) {
+ if ((fence->type & DRM_FENCE_TYPE_EXE) &&
+ !(fence->submitted_flush & DRM_FENCE_TYPE_EXE)) {
+ drm_fence_flush_exe(fm, driver, fence->sequence);
+ fence->submitted_flush |= DRM_FENCE_TYPE_EXE;
+ } else {
+ fm->pending_flush |= (fence->flush_mask &
+ ~fence->submitted_flush);
+ fence->submitted_flush = fence->flush_mask;
+ }
+ }
+ write_unlock_irqrestore(&fm->lock, flags);
+ driver->poke_flush(dev);
+ return 0;
+}
+
+/*
+ * Make sure old fence objects are signaled before their fence sequences are
+ * wrapped around and reused.
+ */
+
+void drm_fence_flush_old(drm_device_t * dev, uint32_t sequence)
+{
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_driver_t *driver = dev->driver->fence_driver;
+ uint32_t old_sequence;
+ unsigned long flags;
+ drm_fence_object_t *fence;
+ uint32_t diff;
+
+ mutex_lock(&dev->struct_mutex);
+ read_lock_irqsave(&fm->lock, flags);
+ if (fm->ring.next == &fm->ring) {
+ read_unlock_irqrestore(&fm->lock, flags);
+ mutex_unlock(&dev->struct_mutex);
+ return;
+ }
+ old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask;
+ fence = list_entry(fm->ring.next, drm_fence_object_t, ring);
+ atomic_inc(&fence->usage);
+ mutex_unlock(&dev->struct_mutex);
+ diff = (old_sequence - fence->sequence) & driver->sequence_mask;
+ read_unlock_irqrestore(&fm->lock, flags);
+ if (diff < driver->wrap_diff) {
+ drm_fence_object_flush(dev, fence, fence->type);
+ }
+ drm_fence_usage_deref_unlocked(dev, fence);
+}
+
+EXPORT_SYMBOL(drm_fence_flush_old);
+
+int drm_fence_object_wait(drm_device_t * dev,
+ volatile drm_fence_object_t * fence,
+ int lazy, int ignore_signals, uint32_t mask)
+{
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_driver_t *driver = dev->driver->fence_driver;
+ int ret = 0;
+ unsigned long _end;
+ int signaled;
+
+ if (mask & ~fence->type) {
+ DRM_ERROR("Wait trying to extend fence type"
+ " 0x%08x 0x%08x\n", mask, fence->type);
+ return -EINVAL;
+ }
+
+ if (fence_signaled(dev, fence, mask, 0))
+ return 0;
+
+ _end = jiffies + 3 * DRM_HZ;
+
+ drm_fence_object_flush(dev, fence, mask);
+
+ if (lazy && driver->lazy_capable) {
+
+ do {
+ DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ,
+ fence_signaled(dev, fence, mask, 1));
+ if (time_after_eq(jiffies, _end))
+ break;
+ } while (ret == -EINTR && ignore_signals);
+ if (time_after_eq(jiffies, _end) && (ret != 0))
+ ret = -EBUSY;
+ if (ret) {
+ if (ret == -EBUSY) {
+ DRM_ERROR("Fence timeout. "
+ "GPU lockup or fence driver was "
+ "taken down.\n");
+ }
+ return ((ret == -EINTR) ? -EAGAIN : ret);
+ }
+ } else if ((fence->class == 0) && (mask & DRM_FENCE_TYPE_EXE) &&
+ driver->lazy_capable) {
+
+ /*
+ * We use IRQ wait for EXE fence if available to gain
+ * CPU in some cases.
+ */
+
+ do {
+ DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ,
+ fence_signaled(dev, fence,
+ DRM_FENCE_TYPE_EXE, 1));
+ if (time_after_eq(jiffies, _end))
+ break;
+ } while (ret == -EINTR && ignore_signals);
+ if (time_after_eq(jiffies, _end) && (ret != 0))
+ ret = -EBUSY;
+ if (ret)
+ return ((ret == -EINTR) ? -EAGAIN : ret);
+ }
+
+ if (fence_signaled(dev, fence, mask, 0))
+ return 0;
+
+ /*
+ * Avoid kernel-space busy-waits.
+ */
+#if 1
+ if (!ignore_signals)
+ return -EAGAIN;
+#endif
+ do {
+ schedule();
+ signaled = fence_signaled(dev, fence, mask, 1);
+ } while (!signaled && !time_after_eq(jiffies, _end));
+
+ if (!signaled)
+ return -EBUSY;
+
+ return 0;
+}
+
+int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence,
+ uint32_t fence_flags, uint32_t type)
+{
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_driver_t *driver = dev->driver->fence_driver;
+ unsigned long flags;
+ uint32_t sequence;
+ uint32_t native_type;
+ int ret;
+
+ drm_fence_unring(dev, &fence->ring);
+ ret = driver->emit(dev, fence_flags, &sequence, &native_type);
+ if (ret)
+ return ret;
+
+ write_lock_irqsave(&fm->lock, flags);
+ fence->type = type;
+ fence->flush_mask = 0x00;
+ fence->submitted_flush = 0x00;
+ fence->signaled = 0x00;
+ fence->sequence = sequence;
+ fence->native_type = native_type;
+ list_add_tail(&fence->ring, &fm->ring);
+ write_unlock_irqrestore(&fm->lock, flags);
+ return 0;
+}
+
+static int drm_fence_object_init(drm_device_t * dev, uint32_t type,
+ uint32_t fence_flags,
+ drm_fence_object_t * fence)
+{
+ int ret = 0;
+ unsigned long flags;
+ drm_fence_manager_t *fm = &dev->fm;
+
+ mutex_lock(&dev->struct_mutex);
+ atomic_set(&fence->usage, 1);
+ mutex_unlock(&dev->struct_mutex);
+
+ write_lock_irqsave(&fm->lock, flags);
+ INIT_LIST_HEAD(&fence->ring);
+ fence->class = 0;
+ fence->type = type;
+ fence->flush_mask = 0;
+ fence->submitted_flush = 0;
+ fence->signaled = 0;
+ fence->sequence = 0;
+ write_unlock_irqrestore(&fm->lock, flags);
+ if (fence_flags & DRM_FENCE_FLAG_EMIT) {
+ ret = drm_fence_object_emit(dev, fence, fence_flags, type);
+ }
+ return ret;
+}
+
+int drm_fence_add_user_object(drm_file_t * priv, drm_fence_object_t * fence,
+ int shareable)
+{
+ drm_device_t *dev = priv->head->dev;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = drm_add_user_object(priv, &fence->base, shareable);
+ mutex_unlock(&dev->struct_mutex);
+ if (ret)
+ return ret;
+ fence->base.type = drm_fence_type;
+ fence->base.remove = &drm_fence_object_destroy;
+ DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
+ return 0;
+}
+
+EXPORT_SYMBOL(drm_fence_add_user_object);
+
+int drm_fence_object_create(drm_device_t * dev, uint32_t type,
+ unsigned flags, drm_fence_object_t ** c_fence)
+{
+ drm_fence_object_t *fence;
+ int ret;
+ drm_fence_manager_t *fm = &dev->fm;
+
+ fence = drm_ctl_cache_alloc(drm_cache.fence_object,
+ sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return -ENOMEM;
+ ret = drm_fence_object_init(dev, type, flags, fence);
+ if (ret) {
+ drm_fence_usage_deref_unlocked(dev, fence);
+ return ret;
+ }
+ *c_fence = fence;
+ atomic_inc(&fm->count);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(drm_fence_object_create);
+
+void drm_fence_manager_init(drm_device_t * dev)
+{
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_driver_t *fed = dev->driver->fence_driver;
+ int i;
+
+ fm->lock = RW_LOCK_UNLOCKED;
+ write_lock(&fm->lock);
+ INIT_LIST_HEAD(&fm->ring);
+ fm->pending_flush = 0;
+ DRM_INIT_WAITQUEUE(&fm->fence_queue);
+ fm->initialized = 0;
+ if (fed) {
+ fm->initialized = 1;
+ atomic_set(&fm->count, 0);
+ for (i = 0; i < fed->no_types; ++i) {
+ fm->fence_types[i] = &fm->ring;
+ }
+ }
+ write_unlock(&fm->lock);
+}
+
+void drm_fence_manager_takedown(drm_device_t * dev)
+{
+}
+
+drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle)
+{
+ drm_device_t *dev = priv->head->dev;
+ drm_user_object_t *uo;
+ drm_fence_object_t *fence;
+
+ mutex_lock(&dev->struct_mutex);
+ uo = drm_lookup_user_object(priv, handle);
+ if (!uo || (uo->type != drm_fence_type)) {
+ mutex_unlock(&dev->struct_mutex);
+ return NULL;
+ }
+ fence = drm_user_object_entry(uo, drm_fence_object_t, base);
+ atomic_inc(&fence->usage);
+ mutex_unlock(&dev->struct_mutex);
+ return fence;
+}
+
+int drm_fence_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ int ret;
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_arg_t arg;
+ drm_fence_object_t *fence;
+ drm_user_object_t *uo;
+ unsigned long flags;
+ ret = 0;
+
+ if (!fm->initialized) {
+ DRM_ERROR("The DRM driver does not support fencing.\n");
+ return -EINVAL;
+ }
+
+ DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
+ switch (arg.op) {
+ case drm_fence_create:
+ if (arg.flags & DRM_FENCE_FLAG_EMIT)
+ LOCK_TEST_WITH_RETURN(dev, filp);
+ ret = drm_fence_object_create(dev, arg.type, arg.flags, &fence);
+ if (ret)
+ return ret;
+ ret = drm_fence_add_user_object(priv, fence,
+ arg.flags &
+ DRM_FENCE_FLAG_SHAREABLE);
+ if (ret) {
+ drm_fence_usage_deref_unlocked(dev, fence);
+ return ret;
+ }
+
+ /*
+ * usage > 0. No need to lock dev->struct_mutex;
+ */
+
+ atomic_inc(&fence->usage);
+ arg.handle = fence->base.hash.key;
+ break;
+ case drm_fence_destroy:
+ mutex_lock(&dev->struct_mutex);
+ uo = drm_lookup_user_object(priv, arg.handle);
+ if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+ ret = drm_remove_user_object(priv, uo);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ case drm_fence_reference:
+ ret =
+ drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo);
+ if (ret)
+ return ret;
+ fence = drm_lookup_fence_object(priv, arg.handle);
+ break;
+ case drm_fence_unreference:
+ ret = drm_user_object_unref(priv, arg.handle, drm_fence_type);
+ return ret;
+ case drm_fence_signaled:
+ fence = drm_lookup_fence_object(priv, arg.handle);
+ if (!fence)
+ return -EINVAL;
+ break;
+ case drm_fence_flush:
+ fence = drm_lookup_fence_object(priv, arg.handle);
+ if (!fence)
+ return -EINVAL;
+ ret = drm_fence_object_flush(dev, fence, arg.type);
+ break;
+ case drm_fence_wait:
+ fence = drm_lookup_fence_object(priv, arg.handle);
+ if (!fence)
+ return -EINVAL;
+ ret =
+ drm_fence_object_wait(dev, fence,
+ arg.flags & DRM_FENCE_FLAG_WAIT_LAZY,
+ 0, arg.type);
+ break;
+ case drm_fence_emit:
+ LOCK_TEST_WITH_RETURN(dev, filp);
+ fence = drm_lookup_fence_object(priv, arg.handle);
+ if (!fence)
+ return -EINVAL;
+ ret = drm_fence_object_emit(dev, fence, arg.flags, arg.type);
+ break;
+ case drm_fence_buffers:
+ if (!dev->bm.initialized) {
+ DRM_ERROR("Buffer object manager is not initialized\n");
+ return -EINVAL;
+ }
+ LOCK_TEST_WITH_RETURN(dev, filp);
+ ret = drm_fence_buffer_objects(priv, NULL, arg.flags,
+ NULL, &fence);
+ if (ret)
+ return ret;
+ ret = drm_fence_add_user_object(priv, fence,
+ arg.flags &
+ DRM_FENCE_FLAG_SHAREABLE);
+ if (ret)
+ return ret;
+ atomic_inc(&fence->usage);
+ arg.handle = fence->base.hash.key;
+ break;
+ default:
+ return -EINVAL;
+ }
+ read_lock_irqsave(&fm->lock, flags);
+ arg.class = fence->class;
+ arg.type = fence->type;
+ arg.signaled = fence->signaled;
+ read_unlock_irqrestore(&fm->lock, flags);
+ drm_fence_usage_deref_unlocked(dev, fence);
+
+ DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
+ return ret;
+}
diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c
index 691edff9..b60ced34 100644
--- a/linux-core/drm_fops.c
+++ b/linux-core/drm_fops.c
@@ -47,6 +47,7 @@ static int drm_setup(drm_device_t * dev)
int i;
int ret;
+
if (dev->driver->firstopen) {
ret = dev->driver->firstopen(dev);
if (ret != 0)
@@ -56,6 +57,7 @@ static int drm_setup(drm_device_t * dev)
dev->magicfree.next = NULL;
/* prebuild the SAREA */
+
i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, _DRM_CONTAINS_LOCK, &map);
if (i != 0)
return i;
@@ -71,11 +73,11 @@ static int drm_setup(drm_device_t * dev)
return i;
}
- for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++)
+ for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
atomic_set(&dev->counts[i], 0);
- drm_ht_create(&dev->magiclist, DRM_MAGIC_HASH_ORDER);
- INIT_LIST_HEAD(&dev->magicfree);
+ drm_ht_create(&dev->magiclist, DRM_MAGIC_HASH_ORDER);
+ INIT_LIST_HEAD(&dev->magicfree);
dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist), DRM_MEM_CTXLIST);
if (dev->ctxlist == NULL)
@@ -156,6 +158,12 @@ int drm_open(struct inode *inode, struct file *filp)
}
spin_unlock(&dev->count_lock);
}
+ mutex_lock(&dev->struct_mutex);
+ BUG_ON((dev->dev_mapping != NULL) &&
+ (dev->dev_mapping != inode->i_mapping));
+ if (dev->dev_mapping == NULL)
+ dev->dev_mapping = inode->i_mapping;
+ mutex_unlock(&dev->struct_mutex);
return retcode;
}
@@ -175,7 +183,7 @@ int drm_stub_open(struct inode *inode, struct file *filp)
drm_device_t *dev = NULL;
int minor = iminor(inode);
int err = -ENODEV;
- struct file_operations *old_fops;
+ const struct file_operations *old_fops;
DRM_DEBUG("\n");
@@ -233,6 +241,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
int minor = iminor(inode);
drm_file_t *priv;
int ret;
+ int i,j;
if (filp->f_flags & O_EXCL)
return -EBUSY; /* No exclusive opens */
@@ -256,6 +265,22 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
priv->authenticated = capable(CAP_SYS_ADMIN);
priv->lock_count = 0;
+ INIT_LIST_HEAD(&priv->user_objects);
+ INIT_LIST_HEAD(&priv->refd_objects);
+
+ for (i=0; i<_DRM_NO_REF_TYPES; ++i) {
+ ret = drm_ht_create(&priv->refd_object_hash[i], DRM_FILE_HASH_ORDER);
+ if (ret)
+ break;
+ }
+
+ if (ret) {
+ for(j=0; j<i; ++j) {
+ drm_ht_remove(&priv->refd_object_hash[j]);
+ }
+ goto out_free;
+ }
+
if (dev->driver->open) {
ret = dev->driver->open(dev, priv);
if (ret < 0)
@@ -320,6 +345,53 @@ int drm_fasync(int fd, struct file *filp, int on)
}
EXPORT_SYMBOL(drm_fasync);
+static void drm_object_release(struct file *filp) {
+
+ drm_file_t *priv = filp->private_data;
+ struct list_head *head;
+ drm_user_object_t *user_object;
+ drm_ref_object_t *ref_object;
+ int i;
+
+ /*
+ * Free leftover ref objects created by me. Note that we cannot use
+ * list_for_each() here, as the struct_mutex may be temporarily released
+ * by the remove_() functions, and thus the lists may be altered.
+ * Also, a drm_remove_ref_object() will not remove it
+ * from the list unless its refcount is 1.
+ */
+
+ head = &priv->refd_objects;
+ while (head->next != head) {
+ ref_object = list_entry(head->next, drm_ref_object_t, list);
+ drm_remove_ref_object(priv, ref_object);
+ head = &priv->refd_objects;
+ }
+
+ /*
+ * Free leftover user objects created by me.
+ */
+
+ head = &priv->user_objects;
+ while (head->next != head) {
+ user_object = list_entry(head->next, drm_user_object_t, list);
+ drm_remove_user_object(priv, user_object);
+ head = &priv->user_objects;
+ }
+
+
+
+
+ for(i=0; i<_DRM_NO_REF_TYPES; ++i) {
+ drm_ht_remove(&priv->refd_object_hash[i]);
+ }
+}
+
+
+
+
+
+
/**
* Release file.
*
@@ -354,58 +426,43 @@ int drm_release(struct inode *inode, struct file *filp)
current->pid, (long)old_encode_dev(priv->head->device),
dev->open_count);
- if (priv->lock_count && dev->lock.hw_lock &&
- _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
- dev->lock.filp == filp) {
- DRM_DEBUG("File %p released, freeing lock for context %d\n",
- filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
-
- if (dev->driver->reclaim_buffers_locked)
- dev->driver->reclaim_buffers_locked(dev, filp);
+ if (dev->driver->reclaim_buffers_locked) {
+ unsigned long _end = jiffies + DRM_HZ*3;
- drm_lock_free(dev, &dev->lock.hw_lock->lock,
- _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+ do {
+ retcode = drm_kernel_take_hw_lock(filp);
+ } while(retcode && !time_after_eq(jiffies,_end));
- /* FIXME: may require heavy-handed reset of
- hardware at this point, possibly
- processed via a callback to the X
- server. */
- } else if (dev->driver->reclaim_buffers_locked && priv->lock_count
- && dev->lock.hw_lock) {
- /* The lock is required to reclaim buffers */
- DECLARE_WAITQUEUE(entry, current);
-
- add_wait_queue(&dev->lock.lock_queue, &entry);
- for (;;) {
- __set_current_state(TASK_INTERRUPTIBLE);
- if (!dev->lock.hw_lock) {
- /* Device has been unregistered */
- retcode = -EINTR;
- break;
- }
- if (drm_lock_take(&dev->lock.hw_lock->lock,
- DRM_KERNEL_CONTEXT)) {
- dev->lock.filp = filp;
- dev->lock.lock_time = jiffies;
- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
- break; /* Got lock */
- }
- /* Contention */
- schedule();
- if (signal_pending(current)) {
- retcode = -ERESTARTSYS;
- break;
- }
- }
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&dev->lock.lock_queue, &entry);
if (!retcode) {
dev->driver->reclaim_buffers_locked(dev, filp);
+
drm_lock_free(dev, &dev->lock.hw_lock->lock,
- DRM_KERNEL_CONTEXT);
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+ } else {
+
+ /*
+ * FIXME: This is not a good solution. We should perhaps associate the
+ * DRM lock with a process context, and check whether the current process
+ * holds the lock. Then we can run reclaim buffers locked anyway.
+ */
+
+ DRM_ERROR("Reclaim buffers locked deadlock.\n");
+ DRM_ERROR("This is probably a single thread having multiple\n");
+ DRM_ERROR("DRM file descriptors open either dying or "
+ "closing file descriptors\n");
+ DRM_ERROR("while having the lock. I will not reclaim buffers.\n");
+ DRM_ERROR("Locking context is 0x%08x\n",
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
}
+ } else if (drm_i_have_hw_lock(filp)) {
+ DRM_DEBUG("File %p released, freeing lock for context %d\n",
+ filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+
+ drm_lock_free(dev, &dev->lock.hw_lock->lock,
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
}
+
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
!dev->driver->reclaim_buffers_locked) {
dev->driver->reclaim_buffers(dev, filp);
@@ -414,6 +471,7 @@ int drm_release(struct inode *inode, struct file *filp)
drm_fasync(-1, filp, 0);
mutex_lock(&dev->ctxlist_mutex);
+
if (dev->ctxlist && (!list_empty(&dev->ctxlist->head))) {
drm_ctx_list_t *pos, *n;
@@ -435,6 +493,7 @@ int drm_release(struct inode *inode, struct file *filp)
mutex_unlock(&dev->ctxlist_mutex);
mutex_lock(&dev->struct_mutex);
+ drm_object_release(filp);
if (priv->remove_auth_on_close == 1) {
drm_file_t *temp = dev->file_first;
while (temp) {
diff --git a/linux-core/drm_hashtab.c b/linux-core/drm_hashtab.c
index 48061139..6f17e114 100644
--- a/linux-core/drm_hashtab.c
+++ b/linux-core/drm_hashtab.c
@@ -36,25 +36,34 @@
#include "drm_hashtab.h"
#include <linux/hash.h>
-int drm_ht_create(drm_open_hash_t *ht, unsigned int order)
+int drm_ht_create(drm_open_hash_t * ht, unsigned int order)
{
unsigned int i;
ht->size = 1 << order;
ht->order = order;
ht->fill = 0;
- ht->table = vmalloc(ht->size*sizeof(*ht->table));
+ ht->table = NULL;
+ ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE);
+ if (!ht->use_vmalloc) {
+ ht->table = drm_calloc(ht->size, sizeof(*ht->table),
+ DRM_MEM_HASHTAB);
+ }
+ if (!ht->table) {
+ ht->use_vmalloc = 1;
+ ht->table = vmalloc(ht->size * sizeof(*ht->table));
+ }
if (!ht->table) {
DRM_ERROR("Out of memory for hash table\n");
return -ENOMEM;
}
- for (i=0; i< ht->size; ++i) {
+ for (i = 0; i < ht->size; ++i) {
INIT_HLIST_HEAD(&ht->table[i]);
}
return 0;
}
-void drm_ht_verbose_list(drm_open_hash_t *ht, unsigned long key)
+void drm_ht_verbose_list(drm_open_hash_t * ht, unsigned long key)
{
drm_hash_item_t *entry;
struct hlist_head *h_list;
@@ -71,7 +80,7 @@ void drm_ht_verbose_list(drm_open_hash_t *ht, unsigned long key)
}
}
-static struct hlist_node *drm_ht_find_key(drm_open_hash_t *ht,
+static struct hlist_node *drm_ht_find_key(drm_open_hash_t * ht,
unsigned long key)
{
drm_hash_item_t *entry;
@@ -91,8 +100,7 @@ static struct hlist_node *drm_ht_find_key(drm_open_hash_t *ht,
return NULL;
}
-
-int drm_ht_insert_item(drm_open_hash_t *ht, drm_hash_item_t *item)
+int drm_ht_insert_item(drm_open_hash_t * ht, drm_hash_item_t * item)
{
drm_hash_item_t *entry;
struct hlist_head *h_list;
@@ -106,7 +114,7 @@ int drm_ht_insert_item(drm_open_hash_t *ht, drm_hash_item_t *item)
hlist_for_each(list, h_list) {
entry = hlist_entry(list, drm_hash_item_t, head);
if (entry->key == key)
- return -1;
+ return -EINVAL;
if (entry->key > key)
break;
parent = list;
@@ -123,7 +131,7 @@ int drm_ht_insert_item(drm_open_hash_t *ht, drm_hash_item_t *item)
* Just insert an item and return any "bits" bit key that hasn't been
* used before.
*/
-int drm_ht_just_insert_please(drm_open_hash_t *ht, drm_hash_item_t *item,
+int drm_ht_just_insert_please(drm_open_hash_t * ht, drm_hash_item_t * item,
unsigned long seed, int bits, int shift,
unsigned long add)
{
@@ -138,7 +146,7 @@ int drm_ht_just_insert_please(drm_open_hash_t *ht, drm_hash_item_t *item,
ret = drm_ht_insert_item(ht, item);
if (ret)
unshifted_key = (unshifted_key + 1) & mask;
- } while(ret && (unshifted_key != first));
+ } while (ret && (unshifted_key != first));
if (ret) {
DRM_ERROR("Available key bit space exhausted\n");
@@ -147,20 +155,20 @@ int drm_ht_just_insert_please(drm_open_hash_t *ht, drm_hash_item_t *item,
return 0;
}
-int drm_ht_find_item(drm_open_hash_t *ht, unsigned long key,
- drm_hash_item_t **item)
+int drm_ht_find_item(drm_open_hash_t * ht, unsigned long key,
+ drm_hash_item_t ** item)
{
struct hlist_node *list;
list = drm_ht_find_key(ht, key);
if (!list)
- return -1;
+ return -EINVAL;
*item = hlist_entry(list, drm_hash_item_t, head);
return 0;
}
-int drm_ht_remove_key(drm_open_hash_t *ht, unsigned long key)
+int drm_ht_remove_key(drm_open_hash_t * ht, unsigned long key)
{
struct hlist_node *list;
@@ -170,21 +178,24 @@ int drm_ht_remove_key(drm_open_hash_t *ht, unsigned long key)
ht->fill--;
return 0;
}
- return -1;
+ return -EINVAL;
}
-int drm_ht_remove_item(drm_open_hash_t *ht, drm_hash_item_t *item)
+int drm_ht_remove_item(drm_open_hash_t * ht, drm_hash_item_t * item)
{
hlist_del_init(&item->head);
ht->fill--;
return 0;
}
-void drm_ht_remove(drm_open_hash_t *ht)
+void drm_ht_remove(drm_open_hash_t * ht)
{
if (ht->table) {
- vfree(ht->table);
+ if (ht->use_vmalloc)
+ vfree(ht->table);
+ else
+ drm_free(ht->table, ht->size * sizeof(*ht->table),
+ DRM_MEM_HASHTAB);
ht->table = NULL;
}
}
-
diff --git a/linux-core/drm_hashtab.h b/linux-core/drm_hashtab.h
index 40afec05..613091c9 100644
--- a/linux-core/drm_hashtab.h
+++ b/linux-core/drm_hashtab.h
@@ -47,6 +47,7 @@ typedef struct drm_open_hash{
unsigned int order;
unsigned int fill;
struct hlist_head *table;
+ int use_vmalloc;
} drm_open_hash_t;
diff --git a/linux-core/drm_ioctl.c b/linux-core/drm_ioctl.c
index 54024e1b..776f462e 100644
--- a/linux-core/drm_ioctl.c
+++ b/linux-core/drm_ioctl.c
@@ -125,9 +125,10 @@ int drm_setunique(struct inode *inode, struct file *filp,
domain = bus >> 8;
bus &= 0xff;
- if ((domain != dev->pci_domain) ||
- (bus != dev->pci_bus) ||
- (slot != dev->pci_slot) || (func != dev->pci_func))
+ if ((domain != drm_get_pci_domain(dev)) ||
+ (bus != dev->pdev->bus->number) ||
+ (slot != PCI_SLOT(dev->pdev->devfn)) ||
+ (func != PCI_FUNC(dev->pdev->devfn)))
return -EINVAL;
return 0;
@@ -145,7 +146,10 @@ static int drm_set_busid(drm_device_t * dev)
return ENOMEM;
len = snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d",
- dev->pci_domain, dev->pci_bus, dev->pci_slot, dev->pci_func);
+ drm_get_pci_domain(dev),
+ dev->pdev->bus->number,
+ PCI_SLOT(dev->pdev->devfn),
+ PCI_FUNC(dev->pdev->devfn));
if (len > dev->unique_len)
DRM_ERROR("buffer overflow");
@@ -238,7 +242,7 @@ int drm_getclient(struct inode *inode, struct file *filp,
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
- drm_client_t __user *argp = (void __user *)arg;
+ drm_client_t __user *argp = (drm_client_t __user *)arg;
drm_client_t client;
drm_file_t *pt;
int idx;
@@ -325,21 +329,23 @@ int drm_setversion(DRM_IOCTL_ARGS)
int if_version;
drm_set_version_t __user *argp = (void __user *)data;
- DRM_COPY_FROM_USER_IOCTL(sv, argp, sizeof(sv));
+ if (copy_from_user(&sv, argp, sizeof(sv)))
+ return -EFAULT;
retv.drm_di_major = DRM_IF_MAJOR;
retv.drm_di_minor = DRM_IF_MINOR;
retv.drm_dd_major = dev->driver->major;
retv.drm_dd_minor = dev->driver->minor;
- DRM_COPY_TO_USER_IOCTL(argp, retv, sizeof(sv));
+ if (copy_to_user(argp, &retv, sizeof(sv)))
+ return -EFAULT;
if (sv.drm_di_major != -1) {
if (sv.drm_di_major != DRM_IF_MAJOR ||
sv.drm_di_minor < 0 || sv.drm_di_minor > DRM_IF_MINOR)
return EINVAL;
if_version = DRM_IF_VERSION(sv.drm_di_major, sv.drm_di_minor);
- dev->if_version = DRM_MAX(if_version, dev->if_version);
+ dev->if_version = max(if_version, dev->if_version);
if (sv.drm_di_minor >= 1) {
/*
* Version 1.1 includes tying of DRM to specific device
diff --git a/linux-core/drm_irq.c b/linux-core/drm_irq.c
index d76fd51d..c365c08e 100644
--- a/linux-core/drm_irq.c
+++ b/linux-core/drm_irq.c
@@ -64,9 +64,9 @@ int drm_irq_by_busid(struct inode *inode, struct file *filp,
if (copy_from_user(&p, argp, sizeof(p)))
return -EFAULT;
- if ((p.busnum >> 8) != dev->pci_domain ||
- (p.busnum & 0xff) != dev->pci_bus ||
- p.devnum != dev->pci_slot || p.funcnum != dev->pci_func)
+ if ((p.busnum >> 8) != drm_get_pci_domain(dev) ||
+ (p.busnum & 0xff) != dev->pdev->bus->number ||
+ p.devnum != PCI_SLOT(dev->pdev->devfn) || p.funcnum != PCI_FUNC(dev->pdev->devfn))
return -EINVAL;
p.irq = dev->irq;
@@ -118,8 +118,10 @@ static int drm_irq_install(drm_device_t * dev)
init_waitqueue_head(&dev->vbl_queue);
spin_lock_init(&dev->vbl_lock);
+ spin_lock_init(&dev->tasklet_lock);
INIT_LIST_HEAD(&dev->vbl_sigs.head);
+ INIT_LIST_HEAD(&dev->vbl_sigs2.head);
dev->vbl_pending = 0;
}
@@ -174,6 +176,8 @@ int drm_irq_uninstall(drm_device_t * dev)
free_irq(dev->irq, dev);
+ dev->locked_tasklet_func = NULL;
+
return 0;
}
EXPORT_SYMBOL(drm_irq_uninstall);
@@ -222,12 +226,12 @@ int drm_control(struct inode *inode, struct file *filp,
* Wait for VBLANK.
*
* \param inode device inode.
- * \param filp file pointer.rm.
+ * \param filp file pointer.
* \param cmd command.
* \param data user argument, pointing to a drm_wait_vblank structure.
* \return zero on success or a negative number on failure.
*
- * Verifies the IRQ is installed
+ * Verifies the IRQ is installed.
*
* If a signal is requested checks if this task has already scheduled the same signal
* for the same vblank sequence number - nothing to be done in
@@ -245,19 +249,34 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
drm_wait_vblank_t vblwait;
struct timeval now;
int ret = 0;
- unsigned int flags;
+ unsigned int flags, seq;
- if (!drm_core_check_feature(dev, DRIVER_IRQ_VBL))
+ if ((!dev->irq) || (!dev->irq_enabled))
return -EINVAL;
- if ((!dev->irq) || (!dev->irq_enabled))
+ if (copy_from_user(&vblwait, argp, sizeof(vblwait)))
+ return -EFAULT;
+
+ if (vblwait.request.type &
+ ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
+ DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
+ vblwait.request.type,
+ (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
return -EINVAL;
+ }
- DRM_COPY_FROM_USER_IOCTL(vblwait, argp, sizeof(vblwait));
+ flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
+
+ if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ?
+ DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL))
+ return -EINVAL;
- switch (vblwait.request.type & ~_DRM_VBLANK_FLAGS_MASK) {
+ seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2
+ : &dev->vbl_received);
+
+ switch (vblwait.request.type & _DRM_VBLANK_TYPES_MASK) {
case _DRM_VBLANK_RELATIVE:
- vblwait.request.sequence += atomic_read(&dev->vbl_received);
+ vblwait.request.sequence += seq;
vblwait.request.type &= ~_DRM_VBLANK_RELATIVE;
case _DRM_VBLANK_ABSOLUTE:
break;
@@ -265,26 +284,30 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
return -EINVAL;
}
- flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
+ if ((flags & _DRM_VBLANK_NEXTONMISS) &&
+ (seq - vblwait.request.sequence) <= (1<<23)) {
+ vblwait.request.sequence = seq + 1;
+ }
if (flags & _DRM_VBLANK_SIGNAL) {
unsigned long irqflags;
+ drm_vbl_sig_t *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY)
+ ? &dev->vbl_sigs2 : &dev->vbl_sigs;
drm_vbl_sig_t *vbl_sig;
- vblwait.reply.sequence = atomic_read(&dev->vbl_received);
-
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Check if this task has already scheduled the same signal
* for the same vblank sequence number; nothing to be done in
* that case
*/
- list_for_each_entry(vbl_sig, &dev->vbl_sigs.head, head) {
+ list_for_each_entry(vbl_sig, &vbl_sigs->head, head) {
if (vbl_sig->sequence == vblwait.request.sequence
&& vbl_sig->info.si_signo == vblwait.request.signal
&& vbl_sig->task == current) {
spin_unlock_irqrestore(&dev->vbl_lock,
irqflags);
+ vblwait.reply.sequence = seq;
goto done;
}
}
@@ -312,11 +335,16 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
spin_lock_irqsave(&dev->vbl_lock, irqflags);
- list_add_tail((struct list_head *)vbl_sig, &dev->vbl_sigs.head);
+ list_add_tail((struct list_head *)vbl_sig, &vbl_sigs->head);
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
+ vblwait.reply.sequence = seq;
} else {
- if (dev->driver->vblank_wait)
+ if (flags & _DRM_VBLANK_SECONDARY) {
+ if (dev->driver->vblank_wait2)
+ ret = dev->driver->vblank_wait2(dev, &vblwait.request.sequence);
+ } else if (dev->driver->vblank_wait)
ret =
dev->driver->vblank_wait(dev,
&vblwait.request.sequence);
@@ -327,7 +355,8 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
}
done:
- DRM_COPY_TO_USER_IOCTL(argp, vblwait, sizeof(vblwait));
+ if (copy_to_user(argp, &vblwait, sizeof(vblwait)))
+ return -EFAULT;
return ret;
}
@@ -343,28 +372,109 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
*/
void drm_vbl_send_signals(drm_device_t * dev)
{
- struct list_head *list, *tmp;
- drm_vbl_sig_t *vbl_sig;
- unsigned int vbl_seq = atomic_read(&dev->vbl_received);
unsigned long flags;
+ int i;
spin_lock_irqsave(&dev->vbl_lock, flags);
- list_for_each_safe(list, tmp, &dev->vbl_sigs.head) {
- vbl_sig = list_entry(list, drm_vbl_sig_t, head);
- if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
- vbl_sig->info.si_code = vbl_seq;
- send_sig_info(vbl_sig->info.si_signo, &vbl_sig->info,
- vbl_sig->task);
+ for (i = 0; i < 2; i++) {
+ struct list_head *list, *tmp;
+ drm_vbl_sig_t *vbl_sig;
+ drm_vbl_sig_t *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
+ unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 :
+ &dev->vbl_received);
+
+ list_for_each_safe(list, tmp, &vbl_sigs->head) {
+ vbl_sig = list_entry(list, drm_vbl_sig_t, head);
+ if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
+ vbl_sig->info.si_code = vbl_seq;
+ send_sig_info(vbl_sig->info.si_signo,
+ &vbl_sig->info, vbl_sig->task);
- list_del(list);
+ list_del(list);
- drm_free(vbl_sig, sizeof(*vbl_sig), DRM_MEM_DRIVER);
+ drm_free(vbl_sig, sizeof(*vbl_sig),
+ DRM_MEM_DRIVER);
- dev->vbl_pending--;
+ dev->vbl_pending--;
+ }
}
}
spin_unlock_irqrestore(&dev->vbl_lock, flags);
}
EXPORT_SYMBOL(drm_vbl_send_signals);
+
+/**
+ * Tasklet wrapper function.
+ *
+ * \param data DRM device in disguise.
+ *
+ * Attempts to grab the HW lock and calls the driver callback on success. On
+ * failure, leave the lock marked as contended so the callback can be called
+ * from drm_unlock().
+ */
+static void drm_locked_tasklet_func(unsigned long data)
+{
+ drm_device_t *dev = (drm_device_t*)data;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev->tasklet_lock, irqflags);
+
+ if (!dev->locked_tasklet_func ||
+ !drm_lock_take(&dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT)) {
+ spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
+ return;
+ }
+
+ dev->lock.lock_time = jiffies;
+ atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
+
+ dev->locked_tasklet_func(dev);
+
+ drm_lock_free(dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT);
+
+ dev->locked_tasklet_func = NULL;
+
+ spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
+}
+
+/**
+ * Schedule a tasklet to call back a driver hook with the HW lock held.
+ *
+ * \param dev DRM device.
+ * \param func Driver callback.
+ *
+ * This is intended for triggering actions that require the HW lock from an
+ * interrupt handler. The lock will be grabbed ASAP after the interrupt handler
+ * completes. Note that the callback may be called from interrupt or process
+ * context, it must not make any assumptions about this. Also, the HW lock will
+ * be held with the kernel context or any client context.
+ */
+void drm_locked_tasklet(drm_device_t *dev, void (*func)(drm_device_t*))
+{
+ unsigned long irqflags;
+ static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0);
+
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ) ||
+ test_bit(TASKLET_STATE_SCHED, &drm_tasklet.state))
+ return;
+
+ spin_lock_irqsave(&dev->tasklet_lock, irqflags);
+
+ if (dev->locked_tasklet_func) {
+ spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
+ return;
+ }
+
+ dev->locked_tasklet_func = func;
+
+ spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
+
+ drm_tasklet.data = (unsigned long)dev;
+
+ tasklet_hi_schedule(&drm_tasklet);
+}
+EXPORT_SYMBOL(drm_locked_tasklet);
diff --git a/linux-core/drm_lock.c b/linux-core/drm_lock.c
index a268d8ee..d11c570e 100644
--- a/linux-core/drm_lock.c
+++ b/linux-core/drm_lock.c
@@ -35,9 +35,12 @@
#include "drmP.h"
+#if 0
static int drm_lock_transfer(drm_device_t * dev,
__volatile__ unsigned int *lock,
unsigned int context);
+#endif
+
static int drm_notifier(void *priv);
/**
@@ -104,7 +107,7 @@ int drm_lock(struct inode *inode, struct file *filp,
__set_current_state(TASK_RUNNING);
remove_wait_queue(&dev->lock.lock_queue, &entry);
- DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );
+ DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );
if (ret) return ret;
sigemptyset(&dev->sigmask);
@@ -152,6 +155,7 @@ int drm_unlock(struct inode *inode, struct file *filp,
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
drm_lock_t lock;
+ unsigned long irqflags;
if (copy_from_user(&lock, (drm_lock_t __user *) arg, sizeof(lock)))
return -EFAULT;
@@ -162,6 +166,16 @@ int drm_unlock(struct inode *inode, struct file *filp,
return -EINVAL;
}
+ spin_lock_irqsave(&dev->tasklet_lock, irqflags);
+
+ if (dev->locked_tasklet_func) {
+ dev->locked_tasklet_func(dev);
+
+ dev->locked_tasklet_func = NULL;
+ }
+
+ spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
+
atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
/* kernel_context_switch isn't used by any of the x86 drm
@@ -170,12 +184,9 @@ int drm_unlock(struct inode *inode, struct file *filp,
if (dev->driver->kernel_context_switch_unlock)
dev->driver->kernel_context_switch_unlock(dev);
else {
- drm_lock_transfer(dev, &dev->lock.hw_lock->lock,
- DRM_KERNEL_CONTEXT);
-
if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
- DRM_KERNEL_CONTEXT)) {
- DRM_ERROR("\n");
+ lock.context)) {
+ /* FIXME: Should really bail out here. */
}
}
@@ -201,7 +212,7 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
if (old & _DRM_LOCK_HELD)
new = old | _DRM_LOCK_CONT;
else
- new = context | _DRM_LOCK_HELD;
+ new = context | _DRM_LOCK_HELD | _DRM_LOCK_CONT;
prev = cmpxchg(lock, old, new);
} while (prev != old);
if (_DRM_LOCKING_CONTEXT(old) == context) {
@@ -213,13 +224,14 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
return 0;
}
}
- if (new == (context | _DRM_LOCK_HELD)) {
+ if (new == (context | _DRM_LOCK_HELD | _DRM_LOCK_CONT)) {
/* Have lock */
return 1;
}
return 0;
}
+#if 0
/**
* This takes a lock forcibly and hands it to context. Should ONLY be used
* inside *_unlock to give lock to kernel before calling *_dma_schedule.
@@ -246,6 +258,7 @@ static int drm_lock_transfer(drm_device_t * dev,
} while (prev != old);
return 1;
}
+#endif
/**
* Free lock.
@@ -263,12 +276,12 @@ int drm_lock_free(drm_device_t * dev,
{
unsigned int old, new, prev;
- dev->lock.filp = NULL;
do {
old = *lock;
- new = 0;
+ new = _DRM_LOCKING_CONTEXT(old);
prev = cmpxchg(lock, old, new);
} while (prev != old);
+
if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
DRM_ERROR("%d freed heavyweight lock held by %d\n",
context, _DRM_LOCKING_CONTEXT(old));
@@ -308,3 +321,66 @@ static int drm_notifier(void *priv)
} while (prev != old);
return 0;
}
+
+/*
+ * Can be used by drivers to take the hardware lock if necessary.
+ * (Waiting for idle before reclaiming buffers etc.)
+ */
+
+int drm_i_have_hw_lock(struct file *filp)
+{
+ DRM_DEVICE;
+
+ return (priv->lock_count && dev->lock.hw_lock &&
+ _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
+ dev->lock.filp == filp);
+}
+
+EXPORT_SYMBOL(drm_i_have_hw_lock);
+
+int drm_kernel_take_hw_lock(struct file *filp)
+{
+ DRM_DEVICE;
+
+ int ret = 0;
+ unsigned long _end = jiffies + 3*DRM_HZ;
+
+ if (!drm_i_have_hw_lock(filp)) {
+
+ DECLARE_WAITQUEUE(entry, current);
+
+ add_wait_queue(&dev->lock.lock_queue, &entry);
+ for (;;) {
+ __set_current_state(TASK_INTERRUPTIBLE);
+ if (!dev->lock.hw_lock) {
+ /* Device has been unregistered */
+ ret = -EINTR;
+ break;
+ }
+ if (drm_lock_take(&dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT)) {
+ dev->lock.filp = filp;
+ dev->lock.lock_time = jiffies;
+ atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
+ break; /* Got lock */
+ }
+ /* Contention */
+ if (time_after_eq(jiffies,_end)) {
+ ret = -EBUSY;
+ break;
+ }
+
+ schedule_timeout(1);
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ }
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&dev->lock.lock_queue, &entry);
+ }
+ return ret;
+}
+
+EXPORT_SYMBOL(drm_kernel_take_hw_lock);
+
diff --git a/linux-core/drm_memory.c b/linux-core/drm_memory.c
index 9125cd47..3370c279 100644
--- a/linux-core/drm_memory.c
+++ b/linux-core/drm_memory.c
@@ -33,10 +33,78 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/config.h>
#include <linux/highmem.h>
#include "drmP.h"
+static struct {
+ spinlock_t lock;
+ drm_u64_t cur_used;
+ drm_u64_t low_threshold;
+ drm_u64_t high_threshold;
+} drm_memctl = {
+ .lock = SPIN_LOCK_UNLOCKED
+};
+
+static inline size_t drm_size_align(size_t size) {
+
+ register size_t tmpSize = 4;
+ if (size > PAGE_SIZE)
+ return PAGE_ALIGN(size);
+
+ while(tmpSize < size)
+ tmpSize <<= 1;
+
+ return (size_t) tmpSize;
+}
+
+int drm_alloc_memctl(size_t size)
+{
+ int ret;
+ unsigned long a_size = drm_size_align(size);
+
+ spin_lock(&drm_memctl.lock);
+ ret = ((drm_memctl.cur_used + a_size) > drm_memctl.high_threshold) ?
+ -ENOMEM : 0;
+ if (!ret)
+ drm_memctl.cur_used += a_size;
+ spin_unlock(&drm_memctl.lock);
+ return ret;
+}
+EXPORT_SYMBOL(drm_alloc_memctl);
+
+void drm_free_memctl(size_t size)
+{
+ unsigned long a_size = drm_size_align(size);
+
+ spin_lock(&drm_memctl.lock);
+ drm_memctl.cur_used -= a_size;
+ spin_unlock(&drm_memctl.lock);
+}
+EXPORT_SYMBOL(drm_free_memctl);
+
+void drm_query_memctl(drm_u64_t *cur_used,
+ drm_u64_t *low_threshold,
+ drm_u64_t *high_threshold)
+{
+ spin_lock(&drm_memctl.lock);
+ *cur_used = drm_memctl.cur_used;
+ *low_threshold = drm_memctl.low_threshold;
+ *high_threshold = drm_memctl.high_threshold;
+ spin_unlock(&drm_memctl.lock);
+}
+EXPORT_SYMBOL(drm_query_memctl);
+
+void drm_init_memctl(size_t p_low_threshold,
+ size_t p_high_threshold)
+{
+ spin_lock(&drm_memctl.lock);
+ drm_memctl.cur_used = 0;
+ drm_memctl.low_threshold = p_low_threshold << PAGE_SHIFT;
+ drm_memctl.high_threshold = p_high_threshold << PAGE_SHIFT;
+ spin_unlock(&drm_memctl.lock);
+}
+
+
#ifndef DEBUG_MEMORY
/** No-op. */
diff --git a/linux-core/drm_memory.h b/linux-core/drm_memory.h
index 4a4fd5c3..4a2c3583 100644
--- a/linux-core/drm_memory.h
+++ b/linux-core/drm_memory.h
@@ -33,7 +33,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/config.h>
#include <linux/highmem.h>
#include <linux/vmalloc.h>
#include "drmP.h"
diff --git a/linux-core/drm_memory_debug.c b/linux-core/drm_memory_debug.c
index 2fe7aeaa..aa1b2922 100644
--- a/linux-core/drm_memory_debug.c
+++ b/linux-core/drm_memory_debug.c
@@ -31,7 +31,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/config.h>
#include "drmP.h"
#ifdef DEBUG_MEMORY
diff --git a/linux-core/drm_memory_debug.h b/linux-core/drm_memory_debug.h
index 706b7525..1e0a63b7 100644
--- a/linux-core/drm_memory_debug.h
+++ b/linux-core/drm_memory_debug.h
@@ -31,7 +31,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/config.h>
#include "drmP.h"
typedef struct drm_mem_stats {
diff --git a/linux-core/drm_mm.c b/linux-core/drm_mm.c
index 617526bd..a5566b2f 100644
--- a/linux-core/drm_mm.c
+++ b/linux-core/drm_mm.c
@@ -42,36 +42,137 @@
*/
#include "drmP.h"
+#include <linux/slab.h>
+
+unsigned long drm_mm_tail_space(drm_mm_t *mm)
+{
+ struct list_head *tail_node;
+ drm_mm_node_t *entry;
+
+ tail_node = mm->root_node.ml_entry.prev;
+ entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
+ if (!entry->free)
+ return 0;
+
+ return entry->size;
+}
+
+int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size)
+{
+ struct list_head *tail_node;
+ drm_mm_node_t *entry;
+
+ tail_node = mm->root_node.ml_entry.prev;
+ entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
+ if (!entry->free)
+ return -ENOMEM;
+
+ if (entry->size <= size)
+ return -ENOMEM;
+
+ entry->size -= size;
+ return 0;
+}
+
+
+static int drm_mm_create_tail_node(drm_mm_t *mm,
+ unsigned long start,
+ unsigned long size)
+{
+ drm_mm_node_t *child;
+
+ child = (drm_mm_node_t *)
+ drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child),
+ GFP_KERNEL);
+ if (!child)
+ return -ENOMEM;
+
+ child->free = 1;
+ child->size = size;
+ child->start = start;
+ child->mm = mm;
+
+ list_add_tail(&child->ml_entry, &mm->root_node.ml_entry);
+ list_add_tail(&child->fl_entry, &mm->root_node.fl_entry);
+
+ return 0;
+}
+
+
+int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size)
+{
+ struct list_head *tail_node;
+ drm_mm_node_t *entry;
+
+ tail_node = mm->root_node.ml_entry.prev;
+ entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
+ if (!entry->free) {
+ return drm_mm_create_tail_node(mm, entry->start + entry->size, size);
+ }
+ entry->size += size;
+ return 0;
+}
+
+static drm_mm_node_t *drm_mm_split_at_start(drm_mm_node_t *parent,
+ unsigned long size)
+{
+ drm_mm_node_t *child;
+
+ child = (drm_mm_node_t *)
+ drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child),
+ GFP_KERNEL);
+ if (!child)
+ return NULL;
+
+ INIT_LIST_HEAD(&child->fl_entry);
+
+ child->free = 0;
+ child->size = size;
+ child->start = parent->start;
+ child->mm = parent->mm;
+
+ list_add_tail(&child->ml_entry, &parent->ml_entry);
+ INIT_LIST_HEAD(&child->fl_entry);
+
+ parent->size -= size;
+ parent->start += size;
+ return child;
+}
+
+
drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
unsigned long size, unsigned alignment)
{
+ drm_mm_node_t *align_splitoff = NULL;
drm_mm_node_t *child;
+ unsigned tmp = 0;
if (alignment)
- size += alignment - 1;
-
+ tmp = size % alignment;
+
+ if (tmp) {
+ align_splitoff = drm_mm_split_at_start(parent, alignment - tmp);
+ if (!align_splitoff)
+ return NULL;
+ }
+
if (parent->size == size) {
list_del_init(&parent->fl_entry);
parent->free = 0;
return parent;
} else {
- child = (drm_mm_node_t *) drm_alloc(sizeof(*child), DRM_MEM_MM);
- if (!child)
+ child = drm_mm_split_at_start(parent, size);
+ if (!child) {
+ if (align_splitoff)
+ drm_mm_put_block(align_splitoff);
return NULL;
-
- INIT_LIST_HEAD(&child->ml_entry);
- INIT_LIST_HEAD(&child->fl_entry);
-
- child->free = 0;
- child->size = size;
- child->start = parent->start;
-
- list_add_tail(&child->ml_entry, &parent->ml_entry);
- parent->size -= size;
- parent->start += size;
+ }
}
+ if (align_splitoff)
+ drm_mm_put_block(align_splitoff);
+
return child;
}
@@ -80,9 +181,10 @@ drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
* Otherwise add to the free stack.
*/
-void drm_mm_put_block(drm_mm_t * mm, drm_mm_node_t * cur)
+void drm_mm_put_block(drm_mm_node_t * cur)
{
+ drm_mm_t *mm = cur->mm;
drm_mm_node_t *list_root = &mm->root_node;
struct list_head *cur_head = &cur->ml_entry;
struct list_head *root_head = &list_root->ml_entry;
@@ -105,8 +207,9 @@ void drm_mm_put_block(drm_mm_t * mm, drm_mm_node_t * cur)
prev_node->size += next_node->size;
list_del(&next_node->ml_entry);
list_del(&next_node->fl_entry);
- drm_free(next_node, sizeof(*next_node),
- DRM_MEM_MM);
+ drm_ctl_cache_free(drm_cache.mm,
+ sizeof(*next_node),
+ next_node);
} else {
next_node->size += cur->size;
next_node->start = cur->start;
@@ -119,7 +222,7 @@ void drm_mm_put_block(drm_mm_t * mm, drm_mm_node_t * cur)
list_add(&cur->fl_entry, &list_root->fl_entry);
} else {
list_del(&cur->ml_entry);
- drm_free(cur, sizeof(*cur), DRM_MEM_MM);
+ drm_ctl_cache_free(drm_cache.mm, sizeof(*cur), cur);
}
}
@@ -132,16 +235,23 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm,
drm_mm_node_t *entry;
drm_mm_node_t *best;
unsigned long best_size;
+ unsigned wasted;
best = NULL;
best_size = ~0UL;
- if (alignment)
- size += alignment - 1;
-
list_for_each(list, free_stack) {
entry = list_entry(list, drm_mm_node_t, fl_entry);
- if (entry->size >= size) {
+ wasted = 0;
+
+ if (alignment) {
+ register unsigned tmp = size % alignment;
+ if (tmp)
+ wasted += alignment - tmp;
+ }
+
+
+ if (entry->size >= size + wasted) {
if (!best_match)
return entry;
if (size < best_size) {
@@ -154,27 +264,19 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm,
return best;
}
-int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size)
+int drm_mm_clean(drm_mm_t * mm)
{
- drm_mm_node_t *child;
+ struct list_head *head = &mm->root_node.ml_entry;
+
+ return (head->next->next == head);
+}
+int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size)
+{
INIT_LIST_HEAD(&mm->root_node.ml_entry);
INIT_LIST_HEAD(&mm->root_node.fl_entry);
- child = (drm_mm_node_t *) drm_alloc(sizeof(*child), DRM_MEM_MM);
- if (!child)
- return -ENOMEM;
- INIT_LIST_HEAD(&child->ml_entry);
- INIT_LIST_HEAD(&child->fl_entry);
-
- child->start = start;
- child->size = size;
- child->free = 1;
-
- list_add(&child->fl_entry, &mm->root_node.fl_entry);
- list_add(&child->ml_entry, &mm->root_node.ml_entry);
-
- return 0;
+ return drm_mm_create_tail_node(mm, start, size);
}
EXPORT_SYMBOL(drm_mm_init);
@@ -194,8 +296,7 @@ void drm_mm_takedown(drm_mm_t * mm)
list_del(&entry->fl_entry);
list_del(&entry->ml_entry);
-
- drm_free(entry, sizeof(*entry), DRM_MEM_MM);
+ drm_ctl_cache_free(drm_cache.mm, sizeof(*entry), entry);
}
EXPORT_SYMBOL(drm_mm_takedown);
diff --git a/linux-core/drm_object.c b/linux-core/drm_object.c
new file mode 100644
index 00000000..0157329c
--- /dev/null
+++ b/linux-core/drm_object.c
@@ -0,0 +1,287 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+
+#include "drmP.h"
+
+int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item,
+ int shareable)
+{
+ drm_device_t *dev = priv->head->dev;
+ int ret;
+
+ atomic_set(&item->refcount, 1);
+ item->shareable = shareable;
+ item->owner = priv;
+
+ ret = drm_ht_just_insert_please(&dev->object_hash, &item->hash,
+ (unsigned long)item, 32, 0, 0);
+ if (ret)
+ return ret;
+
+ list_add_tail(&item->list, &priv->user_objects);
+ return 0;
+}
+
+drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, uint32_t key)
+{
+ drm_device_t *dev = priv->head->dev;
+ drm_hash_item_t *hash;
+ int ret;
+ drm_user_object_t *item;
+
+ ret = drm_ht_find_item(&dev->object_hash, key, &hash);
+ if (ret) {
+ return NULL;
+ }
+ item = drm_hash_entry(hash, drm_user_object_t, hash);
+
+ if (priv != item->owner) {
+ drm_open_hash_t *ht = &priv->refd_object_hash[_DRM_REF_USE];
+ ret = drm_ht_find_item(ht, (unsigned long)item, &hash);
+ if (ret) {
+ DRM_ERROR("Object not registered for usage\n");
+ return NULL;
+ }
+ }
+ return item;
+}
+
+static void drm_deref_user_object(drm_file_t * priv, drm_user_object_t * item)
+{
+ drm_device_t *dev = priv->head->dev;
+ int ret;
+
+ if (atomic_dec_and_test(&item->refcount)) {
+ ret = drm_ht_remove_item(&dev->object_hash, &item->hash);
+ BUG_ON(ret);
+ list_del_init(&item->list);
+ item->remove(priv, item);
+ }
+}
+
+int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item)
+{
+ if (item->owner != priv) {
+ DRM_ERROR("Cannot destroy object not owned by you.\n");
+ return -EINVAL;
+ }
+ item->owner = 0;
+ item->shareable = 0;
+ list_del_init(&item->list);
+ drm_deref_user_object(priv, item);
+ return 0;
+}
+
+static int drm_object_ref_action(drm_file_t * priv, drm_user_object_t * ro,
+ drm_ref_t action)
+{
+ int ret = 0;
+
+ switch (action) {
+ case _DRM_REF_USE:
+ atomic_inc(&ro->refcount);
+ break;
+ default:
+ if (!ro->ref_struct_locked) {
+ break;
+ } else {
+ ro->ref_struct_locked(priv, ro, action);
+ }
+ }
+ return ret;
+}
+
+int drm_add_ref_object(drm_file_t * priv, drm_user_object_t * referenced_object,
+ drm_ref_t ref_action)
+{
+ int ret = 0;
+ drm_ref_object_t *item;
+ drm_open_hash_t *ht = &priv->refd_object_hash[ref_action];
+
+ if (!referenced_object->shareable && priv != referenced_object->owner) {
+ DRM_ERROR("Not allowed to reference this object\n");
+ return -EINVAL;
+ }
+
+ /*
+ * If this is not a usage reference, Check that usage has been registered
+ * first. Otherwise strange things may happen on destruction.
+ */
+
+ if ((ref_action != _DRM_REF_USE) && priv != referenced_object->owner) {
+ item =
+ drm_lookup_ref_object(priv, referenced_object,
+ _DRM_REF_USE);
+ if (!item) {
+ DRM_ERROR
+ ("Object not registered for usage by this client\n");
+ return -EINVAL;
+ }
+ }
+
+ if (NULL !=
+ (item =
+ drm_lookup_ref_object(priv, referenced_object, ref_action))) {
+ atomic_inc(&item->refcount);
+ return drm_object_ref_action(priv, referenced_object,
+ ref_action);
+ }
+
+ item = drm_ctl_calloc(1, sizeof(*item), DRM_MEM_OBJECTS);
+ if (item == NULL) {
+ DRM_ERROR("Could not allocate reference object\n");
+ return -ENOMEM;
+ }
+
+ atomic_set(&item->refcount, 1);
+ item->hash.key = (unsigned long)referenced_object;
+ ret = drm_ht_insert_item(ht, &item->hash);
+ item->unref_action = ref_action;
+
+ if (ret)
+ goto out;
+
+ list_add(&item->list, &priv->refd_objects);
+ ret = drm_object_ref_action(priv, referenced_object, ref_action);
+ out:
+ return ret;
+}
+
+drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv,
+ drm_user_object_t * referenced_object,
+ drm_ref_t ref_action)
+{
+ drm_hash_item_t *hash;
+ int ret;
+
+ ret = drm_ht_find_item(&priv->refd_object_hash[ref_action],
+ (unsigned long)referenced_object, &hash);
+ if (ret)
+ return NULL;
+
+ return drm_hash_entry(hash, drm_ref_object_t, hash);
+}
+
+static void drm_remove_other_references(drm_file_t * priv,
+ drm_user_object_t * ro)
+{
+ int i;
+ drm_open_hash_t *ht;
+ drm_hash_item_t *hash;
+ drm_ref_object_t *item;
+
+ for (i = _DRM_REF_USE + 1; i < _DRM_NO_REF_TYPES; ++i) {
+ ht = &priv->refd_object_hash[i];
+ while (!drm_ht_find_item(ht, (unsigned long)ro, &hash)) {
+ item = drm_hash_entry(hash, drm_ref_object_t, hash);
+ drm_remove_ref_object(priv, item);
+ }
+ }
+}
+
+void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item)
+{
+ int ret;
+ drm_user_object_t *user_object = (drm_user_object_t *) item->hash.key;
+ drm_open_hash_t *ht = &priv->refd_object_hash[item->unref_action];
+ drm_ref_t unref_action;
+
+ unref_action = item->unref_action;
+ if (atomic_dec_and_test(&item->refcount)) {
+ ret = drm_ht_remove_item(ht, &item->hash);
+ BUG_ON(ret);
+ list_del_init(&item->list);
+ if (unref_action == _DRM_REF_USE)
+ drm_remove_other_references(priv, user_object);
+ drm_ctl_free(item, sizeof(*item), DRM_MEM_OBJECTS);
+ }
+
+ switch (unref_action) {
+ case _DRM_REF_USE:
+ drm_deref_user_object(priv, user_object);
+ break;
+ default:
+ BUG_ON(!user_object->unref);
+ user_object->unref(priv, user_object, unref_action);
+ break;
+ }
+
+}
+
+int drm_user_object_ref(drm_file_t * priv, uint32_t user_token,
+ drm_object_type_t type, drm_user_object_t ** object)
+{
+ drm_device_t *dev = priv->head->dev;
+ drm_user_object_t *uo;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+ uo = drm_lookup_user_object(priv, user_token);
+ if (!uo || (uo->type != type)) {
+ ret = -EINVAL;
+ goto out_err;
+ }
+ ret = drm_add_ref_object(priv, uo, _DRM_REF_USE);
+ if (ret)
+ goto out_err;
+ mutex_unlock(&dev->struct_mutex);
+ *object = uo;
+ DRM_ERROR("Referenced an object\n");
+ return 0;
+ out_err:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int drm_user_object_unref(drm_file_t * priv, uint32_t user_token,
+ drm_object_type_t type)
+{
+ drm_device_t *dev = priv->head->dev;
+ drm_user_object_t *uo;
+ drm_ref_object_t *ro;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+ uo = drm_lookup_user_object(priv, user_token);
+ if (!uo || (uo->type != type)) {
+ ret = -EINVAL;
+ goto out_err;
+ }
+ ro = drm_lookup_ref_object(priv, uo, _DRM_REF_USE);
+ if (!ro) {
+ ret = -EINVAL;
+ goto out_err;
+ }
+ drm_remove_ref_object(priv, ro);
+ mutex_unlock(&dev->struct_mutex);
+ DRM_ERROR("Unreferenced an object\n");
+ return 0;
+ out_err:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
diff --git a/linux-core/drm_pci.c b/linux-core/drm_pci.c
index b69dda22..40a65f3e 100644
--- a/linux-core/drm_pci.c
+++ b/linux-core/drm_pci.c
@@ -37,6 +37,7 @@
*/
#include <linux/pci.h>
+#include <linux/dma-mapping.h>
#include "drmP.h"
/**********************************************************************/
@@ -83,11 +84,7 @@ drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
return NULL;
dmah->size = size;
-#if 0
- dmah->vaddr = pci_alloc_consistent(dev->pdev, size, &dmah->busaddr);
-#else
dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
-#endif
#ifdef DRM_DEBUG_MEMORY
if (dmah->vaddr == NULL) {
@@ -112,14 +109,12 @@ drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
memset(dmah->vaddr, 0, size);
-#if 1
/* XXX - Is virt_to_page() legal for consistent mem? */
/* Reserve */
for (addr = (unsigned long)dmah->vaddr, sz = size;
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
SetPageReserved(virt_to_page(addr));
}
-#endif
return dmah;
}
@@ -132,10 +127,8 @@ EXPORT_SYMBOL(drm_pci_alloc);
*/
void __drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah)
{
-#if 1
unsigned long addr;
size_t sz;
-#endif
#ifdef DRM_DEBUG_MEMORY
int area = DRM_MEM_DMA;
int alloc_count;
@@ -147,21 +140,14 @@ void __drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah)
DRM_MEM_ERROR(area, "Attempt to free address 0\n");
#endif
} else {
-#if 1
/* XXX - Is virt_to_page() legal for consistent mem? */
/* Unreserve */
for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
}
-#endif
-#if 0
- pci_free_consistent(dev->pdev, dmah->size, dmah->vaddr,
- dmah->busaddr);
-#else
dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
dmah->busaddr);
-#endif
}
#ifdef DRM_DEBUG_MEMORY
@@ -181,7 +167,7 @@ void __drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah)
}
/**
- * \brief Free a PCI consistent memory block.
+ * \brief Free a PCI consistent memory block
*/
void drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah)
{
diff --git a/linux-core/drm_proc.c b/linux-core/drm_proc.c
index 014486c1..863cacfc 100644
--- a/linux-core/drm_proc.c
+++ b/linux-core/drm_proc.c
@@ -49,6 +49,8 @@ static int drm_queues_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_bufs_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
+static int drm_objects_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data);
#if DRM_DEBUG_CODE
static int drm_vma_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
@@ -67,6 +69,7 @@ static struct drm_proc_list {
{"clients", drm_clients_info},
{"queues", drm_queues_info},
{"bufs", drm_bufs_info},
+ {"objects", drm_objects_info},
#if DRM_DEBUG_CODE
{"vma", drm_vma_info},
#endif
@@ -238,10 +241,11 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
type = "??";
else
type = types[map->type];
- DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08x ",
+ DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
i,
map->offset,
- map->size, type, map->flags, r_list->user_token);
+ map->size, type, map->flags,
+ (unsigned long) r_list->user_token);
if (map->mtrr < 0) {
DRM_PROC_PRINT("none\n");
@@ -258,7 +262,7 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
}
/**
- * Simply calls _vm_info() while holding the drm_device::struct_sem lock.
+ * Simply calls _vm_info() while holding the drm_device::struct_mutex lock.
*/
static int drm_vm_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
@@ -331,7 +335,7 @@ static int drm__queues_info(char *buf, char **start, off_t offset,
}
/**
- * Simply calls _queues_info() while holding the drm_device::struct_sem lock.
+ * Simply calls _queues_info() while holding the drm_device::struct_mutex lock.
*/
static int drm_queues_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
@@ -403,7 +407,7 @@ static int drm__bufs_info(char *buf, char **start, off_t offset, int request,
}
/**
- * Simply calls _bufs_info() while holding the drm_device::struct_sem lock.
+ * Simply calls _bufs_info() while holding the drm_device::struct_mutex lock.
*/
static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
@@ -418,6 +422,89 @@ static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
}
/**
+ * Called when "/proc/dri/.../objects" is read.
+ *
+ * \param buf output buffer.
+ * \param start start of output data.
+ * \param offset requested start offset.
+ * \param request requested number of bytes.
+ * \param eof whether there is no more data to return.
+ * \param data private data.
+ * \return number of written bytes.
+ */
+static int drm__objects_info(char *buf, char **start, off_t offset, int request,
+ int *eof, void *data)
+{
+ drm_device_t *dev = (drm_device_t *) data;
+ int len = 0;
+ drm_buffer_manager_t *bm = &dev->bm;
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_u64_t used_mem;
+ drm_u64_t low_mem;
+ drm_u64_t high_mem;
+
+
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
+ }
+
+ *start = &buf[offset];
+ *eof = 0;
+
+ if (fm->initialized) {
+ DRM_PROC_PRINT("Number of active fence objects: %d.\n\n",
+ atomic_read(&fm->count));
+ } else {
+ DRM_PROC_PRINT("Fence objects are not supported by this driver\n\n");
+ }
+
+ if (bm->initialized) {
+ DRM_PROC_PRINT("Number of active buffer objects: %d.\n\n",
+ atomic_read(&bm->count));
+ DRM_PROC_PRINT("Number of locked GATT pages: %lu.\n", bm->cur_pages);
+ } else {
+ DRM_PROC_PRINT("Buffer objects are not supported by this driver.\n\n");
+ }
+
+ drm_query_memctl(&used_mem, &low_mem, &high_mem);
+
+ if (used_mem > 16*PAGE_SIZE) {
+ DRM_PROC_PRINT("Used object memory is %lu pages.\n",
+ (unsigned long) (used_mem >> PAGE_SHIFT));
+ } else {
+ DRM_PROC_PRINT("Used object memory is %lu bytes.\n",
+ (unsigned long) used_mem);
+ }
+ DRM_PROC_PRINT("Soft object memory usage threshold is %lu pages.\n",
+ (unsigned long) (low_mem >> PAGE_SHIFT));
+ DRM_PROC_PRINT("Hard object memory usage threshold is %lu pages.\n",
+ (unsigned long) (high_mem >> PAGE_SHIFT));
+
+ DRM_PROC_PRINT("\n");
+
+ if (len > request + offset)
+ return request;
+ *eof = 1;
+ return len - offset;
+}
+
+/**
+ * Simply calls _objects_info() while holding the drm_device::struct_mutex lock.
+ */
+static int drm_objects_info(char *buf, char **start, off_t offset, int request,
+ int *eof, void *data)
+{
+ drm_device_t *dev = (drm_device_t *) data;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = drm__objects_info(buf, start, offset, request, eof, data);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+/**
* Called when "/proc/dri/.../clients" is read.
*
* \param buf output buffer.
@@ -459,7 +546,7 @@ static int drm__clients_info(char *buf, char **start, off_t offset,
}
/**
- * Simply calls _clients_info() while holding the drm_device::struct_sem lock.
+ * Simply calls _clients_info() while holding the drm_device::struct_mutex lock.
*/
static int drm_clients_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
@@ -500,7 +587,7 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request,
for (pt = dev->vmalist; pt; pt = pt->next) {
if (!(vma = pt->vma))
continue;
- DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx",
+ DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
pt->pid,
vma->vm_start,
vma->vm_end,
@@ -510,7 +597,7 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request,
vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
vma->vm_flags & VM_LOCKED ? 'l' : '-',
vma->vm_flags & VM_IO ? 'i' : '-',
- VM_OFFSET(vma));
+ vma->vm_pgoff);
#if defined(__i386__)
pgprot = pgprot_val(vma->vm_page_prot);
diff --git a/linux-core/drm_scatter.c b/linux-core/drm_scatter.c
index a7144f1a..e5c9f877 100644
--- a/linux-core/drm_scatter.c
+++ b/linux-core/drm_scatter.c
@@ -31,7 +31,6 @@
* DEALINGS IN THE SOFTWARE.
*/
-#include <linux/config.h>
#include <linux/vmalloc.h>
#include "drmP.h"
diff --git a/linux-core/drm_sman.c b/linux-core/drm_sman.c
index b92f0ee7..19a13f3a 100644
--- a/linux-core/drm_sman.c
+++ b/linux-core/drm_sman.c
@@ -101,10 +101,9 @@ static void *drm_sman_mm_allocate(void *private, unsigned long size,
static void drm_sman_mm_free(void *private, void *ref)
{
- drm_mm_t *mm = (drm_mm_t *) private;
drm_mm_node_t *node = (drm_mm_node_t *) ref;
- drm_mm_put_block(mm, node);
+ drm_mm_put_block(node);
}
static void drm_sman_mm_destroy(void *private)
@@ -114,7 +113,7 @@ static void drm_sman_mm_destroy(void *private)
drm_free(mm, sizeof(*mm), DRM_MEM_MM);
}
-unsigned long drm_sman_mm_offset(void *private, void *ref)
+static unsigned long drm_sman_mm_offset(void *private, void *ref)
{
drm_mm_node_t *node = (drm_mm_node_t *) ref;
return node->start;
diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c
index 25bb5f33..c03a56a1 100644
--- a/linux-core/drm_stub.c
+++ b/linux-core/drm_stub.c
@@ -54,6 +54,11 @@ drm_head_t **drm_heads;
struct drm_sysfs_class *drm_class;
struct proc_dir_entry *drm_proc_root;
+drm_cache_t drm_cache =
+{ .mm = NULL,
+ .fence_object = NULL
+};
+
static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
const struct pci_device_id *ent,
struct drm_driver *driver)
@@ -61,31 +66,44 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
int retcode;
spin_lock_init(&dev->count_lock);
+ spin_lock_init(&dev->drw_lock);
+ spin_lock_init(&dev->tasklet_lock);
init_timer(&dev->timer);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
+ mutex_init(&dev->bm.init_mutex);
dev->pdev = pdev;
+ dev->pci_device = pdev->device;
+ dev->pci_vendor = pdev->vendor;
#ifdef __alpha__
dev->hose = pdev->sysdata;
- dev->pci_domain = dev->hose->bus->number;
-#else
- dev->pci_domain = 0;
#endif
- dev->pci_bus = pdev->bus->number;
- dev->pci_slot = PCI_SLOT(pdev->devfn);
- dev->pci_func = PCI_FUNC(pdev->devfn);
dev->irq = pdev->irq;
+ if (drm_ht_create(&dev->map_hash, DRM_MAP_HASH_ORDER)) {
+ drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
+ return -ENOMEM;
+ }
+ if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
+ DRM_FILE_PAGE_OFFSET_SIZE)) {
+ drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
+ drm_ht_remove(&dev->map_hash);
+ return -ENOMEM;
+ }
+
+ if (drm_ht_create(&dev->object_hash, DRM_OBJECT_HASH_ORDER)) {
+ drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
+ drm_ht_remove(&dev->map_hash);
+ drm_mm_takedown(&dev->offset_manager);
+ return -ENOMEM;
+ }
+
dev->maplist = drm_calloc(1, sizeof(*dev->maplist), DRM_MEM_MAPS);
if (dev->maplist == NULL)
return -ENOMEM;
INIT_LIST_HEAD(&dev->maplist->head);
- if (drm_ht_create(&dev->map_hash, 12)) {
- drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
- return -ENOMEM;
- }
/* the DRM has 6 counters */
dev->counters = 6;
@@ -127,6 +145,7 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
goto error_out_unreg;
}
+ drm_fence_manager_init(dev);
return 0;
error_out_unreg:
diff --git a/linux-core/drm_sysfs.c b/linux-core/drm_sysfs.c
index df75d7b0..e5dd0532 100644
--- a/linux-core/drm_sysfs.c
+++ b/linux-core/drm_sysfs.c
@@ -11,7 +11,6 @@
*
*/
-#include <linux/config.h>
#include <linux/device.h>
#include <linux/kdev_t.h>
#include <linux/err.h>
diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c
new file mode 100644
index 00000000..931972af
--- /dev/null
+++ b/linux-core/drm_ttm.c
@@ -0,0 +1,519 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+
+#include "drmP.h"
+
+static void drm_ttm_ipi_handler(void *null)
+{
+ flush_agp_cache();
+}
+
+static void drm_ttm_cache_flush(void)
+{
+ if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0)
+ DRM_ERROR("Timed out waiting for drm cache flush.\n");
+}
+
+
+/*
+ * Use kmalloc if possible. Otherwise fall back to vmalloc.
+ */
+
+static void *ttm_alloc(unsigned long size, int type)
+{
+ void *ret = NULL;
+
+ if (drm_alloc_memctl(size))
+ return NULL;
+ if (size <= PAGE_SIZE) {
+ ret = drm_alloc(size, type);
+ }
+ if (!ret) {
+ ret = vmalloc(size);
+ }
+ if (!ret) {
+ drm_free_memctl(size);
+ }
+ return ret;
+}
+
+static void ttm_free(void *pointer, unsigned long size, int type)
+{
+
+ if ((unsigned long)pointer >= VMALLOC_START &&
+ (unsigned long)pointer <= VMALLOC_END) {
+ vfree(pointer);
+ } else {
+ drm_free(pointer, size, type);
+ }
+ drm_free_memctl(size);
+}
+
+/*
+ * Unmap all vma pages from vmas mapping this ttm.
+ */
+
+static int unmap_vma_pages(drm_ttm_t * ttm)
+{
+ drm_device_t *dev = ttm->dev;
+ loff_t offset = ((loff_t) ttm->mapping_offset) << PAGE_SHIFT;
+ loff_t holelen = ((loff_t) ttm->num_pages) << PAGE_SHIFT;
+
+#ifdef DRM_ODD_MM_COMPAT
+ int ret;
+ ret = drm_ttm_lock_mm(ttm);
+ if (ret)
+ return ret;
+#endif
+ unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
+#ifdef DRM_ODD_MM_COMPAT
+ drm_ttm_finish_unmap(ttm);
+#endif
+ return 0;
+}
+
+/*
+ * Change caching policy for the linear kernel map
+ * for range of pages in a ttm.
+ */
+
+static int drm_set_caching(drm_ttm_t * ttm, int noncached)
+{
+ int i;
+ struct page **cur_page;
+ int do_tlbflush = 0;
+
+ if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached)
+ return 0;
+
+ if (noncached)
+ drm_ttm_cache_flush();
+
+ for (i = 0; i < ttm->num_pages; ++i) {
+ cur_page = ttm->pages + i;
+ if (*cur_page) {
+ if (!PageHighMem(*cur_page)) {
+ if (noncached) {
+ map_page_into_agp(*cur_page);
+ } else {
+ unmap_page_from_agp(*cur_page);
+ }
+ do_tlbflush = 1;
+ }
+ }
+ }
+ if (do_tlbflush)
+ flush_agp_mappings();
+
+ DRM_MASK_VAL(ttm->page_flags, DRM_TTM_PAGE_UNCACHED, noncached);
+
+ return 0;
+}
+
+/*
+ * Free all resources associated with a ttm.
+ */
+
+int drm_destroy_ttm(drm_ttm_t * ttm)
+{
+
+ int i;
+ struct page **cur_page;
+ drm_ttm_backend_t *be;
+
+ if (!ttm)
+ return 0;
+
+ if (atomic_read(&ttm->vma_count) > 0) {
+ ttm->destroy = 1;
+ DRM_ERROR("VMAs are still alive. Skipping destruction.\n");
+ return -EBUSY;
+ }
+
+ DRM_DEBUG("Destroying a ttm\n");
+
+#ifdef DRM_TTM_ODD_COMPAT
+ BUG_ON(!list_empty(&ttm->vma_list));
+ BUG_ON(!list_empty(&ttm->p_mm_list));
+#endif
+ be = ttm->be;
+ if (be) {
+ be->destroy(be);
+ ttm->be = NULL;
+ }
+
+ if (ttm->pages) {
+ drm_buffer_manager_t *bm = &ttm->dev->bm;
+ if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED)
+ drm_set_caching(ttm, 0);
+
+ for (i = 0; i < ttm->num_pages; ++i) {
+ cur_page = ttm->pages + i;
+ if (*cur_page) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
+ unlock_page(*cur_page);
+#else
+ ClearPageReserved(*cur_page);
+#endif
+ if (page_count(*cur_page) != 1) {
+ DRM_ERROR("Erroneous page count. "
+ "Leaking pages.\n");
+ }
+ if (page_mapped(*cur_page)) {
+ DRM_ERROR("Erroneous map count. "
+ "Leaking page mappings.\n");
+ }
+
+ /*
+ * End debugging.
+ */
+
+ drm_free_gatt_pages(*cur_page, 0);
+ drm_free_memctl(PAGE_SIZE);
+ --bm->cur_pages;
+ }
+ }
+ ttm_free(ttm->pages, ttm->num_pages * sizeof(*ttm->pages),
+ DRM_MEM_TTM);
+ ttm->pages = NULL;
+ }
+
+ drm_ctl_free(ttm, sizeof(*ttm), DRM_MEM_TTM);
+ return 0;
+}
+
+static int drm_ttm_populate(drm_ttm_t * ttm)
+{
+ struct page *page;
+ unsigned long i;
+ drm_buffer_manager_t *bm;
+ drm_ttm_backend_t *be;
+
+ if (ttm->state != ttm_unpopulated)
+ return 0;
+
+ bm = &ttm->dev->bm;
+ be = ttm->be;
+ for (i = 0; i < ttm->num_pages; ++i) {
+ page = ttm->pages[i];
+ if (!page) {
+ if (drm_alloc_memctl(PAGE_SIZE)) {
+ return -ENOMEM;
+ }
+ page = drm_alloc_gatt_pages(0);
+ if (!page) {
+ drm_free_memctl(PAGE_SIZE);
+ return -ENOMEM;
+ }
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
+ SetPageLocked(page);
+#else
+ SetPageReserved(page);
+#endif
+ ttm->pages[i] = page;
+ ++bm->cur_pages;
+ }
+ }
+ be->populate(be, ttm->num_pages, ttm->pages);
+ ttm->state = ttm_unbound;
+ return 0;
+}
+
+/*
+ * Initialize a ttm.
+ */
+
+static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size)
+{
+ drm_bo_driver_t *bo_driver = dev->driver->bo_driver;
+ drm_ttm_t *ttm;
+
+ if (!bo_driver)
+ return NULL;
+
+ ttm = drm_ctl_calloc(1, sizeof(*ttm), DRM_MEM_TTM);
+ if (!ttm)
+ return NULL;
+
+#ifdef DRM_ODD_MM_COMPAT
+ INIT_LIST_HEAD(&ttm->p_mm_list);
+ INIT_LIST_HEAD(&ttm->vma_list);
+#endif
+
+ ttm->dev = dev;
+ atomic_set(&ttm->vma_count, 0);
+
+ ttm->destroy = 0;
+ ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ ttm->page_flags = 0;
+
+ /*
+ * Account also for AGP module memory usage.
+ */
+
+ ttm->pages = ttm_alloc(ttm->num_pages * sizeof(*ttm->pages),
+ DRM_MEM_TTM);
+ if (!ttm->pages) {
+ drm_destroy_ttm(ttm);
+ DRM_ERROR("Failed allocating page table\n");
+ return NULL;
+ }
+ memset(ttm->pages, 0, ttm->num_pages * sizeof(*ttm->pages));
+ ttm->be = bo_driver->create_ttm_backend_entry(dev);
+ if (!ttm->be) {
+ drm_destroy_ttm(ttm);
+ DRM_ERROR("Failed creating ttm backend entry\n");
+ return NULL;
+ }
+ ttm->state = ttm_unpopulated;
+ return ttm;
+}
+
+/*
+ * Unbind a ttm region from the aperture.
+ */
+
+int drm_evict_ttm(drm_ttm_t * ttm)
+{
+ drm_ttm_backend_t *be = ttm->be;
+ int ret;
+
+ switch (ttm->state) {
+ case ttm_bound:
+ if (be->needs_ub_cache_adjust(be)) {
+ ret = unmap_vma_pages(ttm);
+ if (ret) {
+ return ret;
+ }
+ }
+ be->unbind(be);
+ break;
+ default:
+ break;
+ }
+ ttm->state = ttm_evicted;
+ return 0;
+}
+
+void drm_fixup_ttm_caching(drm_ttm_t * ttm)
+{
+
+ if (ttm->state == ttm_evicted) {
+ drm_ttm_backend_t *be = ttm->be;
+ if (be->needs_ub_cache_adjust(be)) {
+ drm_set_caching(ttm, 0);
+ }
+ ttm->state = ttm_unbound;
+ }
+}
+
+int drm_unbind_ttm(drm_ttm_t * ttm)
+{
+ int ret = 0;
+
+ if (ttm->state == ttm_bound)
+ ret = drm_evict_ttm(ttm);
+
+ if (ret)
+ return ret;
+
+ drm_fixup_ttm_caching(ttm);
+ return 0;
+}
+
+int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset)
+{
+
+ int ret = 0;
+ drm_ttm_backend_t *be;
+
+ if (!ttm)
+ return -EINVAL;
+ if (ttm->state == ttm_bound)
+ return 0;
+
+ be = ttm->be;
+
+ ret = drm_ttm_populate(ttm);
+ if (ret)
+ return ret;
+ if (ttm->state == ttm_unbound && !cached) {
+ ret = unmap_vma_pages(ttm);
+ if (ret)
+ return ret;
+
+ drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
+ }
+#ifdef DRM_ODD_MM_COMPAT
+ else if (ttm->state == ttm_evicted && !cached) {
+ ret = drm_ttm_lock_mm(ttm);
+ if (ret)
+ return ret;
+ }
+#endif
+ if ((ret = be->bind(be, aper_offset, cached))) {
+ ttm->state = ttm_evicted;
+#ifdef DRM_ODD_MM_COMPAT
+ if (be->needs_ub_cache_adjust(be))
+ drm_ttm_unlock_mm(ttm);
+#endif
+ DRM_ERROR("Couldn't bind backend.\n");
+ return ret;
+ }
+
+ ttm->aper_offset = aper_offset;
+ ttm->state = ttm_bound;
+
+#ifdef DRM_ODD_MM_COMPAT
+ if (be->needs_ub_cache_adjust(be)) {
+ ret = drm_ttm_remap_bound(ttm);
+ if (ret)
+ return ret;
+ }
+#endif
+
+ return 0;
+}
+
+/*
+ * dev->struct_mutex locked.
+ */
+static void drm_ttm_object_remove(drm_device_t * dev, drm_ttm_object_t * object)
+{
+ drm_map_list_t *list = &object->map_list;
+ drm_local_map_t *map;
+
+ if (list->user_token)
+ drm_ht_remove_item(&dev->map_hash, &list->hash);
+
+ if (list->file_offset_node) {
+ drm_mm_put_block(list->file_offset_node);
+ list->file_offset_node = NULL;
+ }
+
+ map = list->map;
+
+ if (map) {
+ drm_ttm_t *ttm = (drm_ttm_t *) map->offset;
+ if (ttm) {
+ if (drm_destroy_ttm(ttm) != -EBUSY) {
+ drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
+ }
+ } else {
+ drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
+ }
+ }
+
+ drm_ctl_free(object, sizeof(*object), DRM_MEM_TTM);
+}
+
+void drm_ttm_object_deref_locked(drm_device_t * dev, drm_ttm_object_t * to)
+{
+ if (atomic_dec_and_test(&to->usage)) {
+ drm_ttm_object_remove(dev, to);
+ }
+}
+
+void drm_ttm_object_deref_unlocked(drm_device_t * dev, drm_ttm_object_t * to)
+{
+ if (atomic_dec_and_test(&to->usage)) {
+ mutex_lock(&dev->struct_mutex);
+ if (atomic_read(&to->usage) == 0)
+ drm_ttm_object_remove(dev, to);
+ mutex_unlock(&dev->struct_mutex);
+ }
+}
+
+/*
+ * Create a ttm and add it to the drm book-keeping.
+ * dev->struct_mutex locked.
+ */
+
+int drm_ttm_object_create(drm_device_t * dev, unsigned long size,
+ uint32_t flags, drm_ttm_object_t ** ttm_object)
+{
+ drm_ttm_object_t *object;
+ drm_map_list_t *list;
+ drm_local_map_t *map;
+ drm_ttm_t *ttm;
+
+ object = drm_ctl_calloc(1, sizeof(*object), DRM_MEM_TTM);
+ if (!object)
+ return -ENOMEM;
+ object->flags = flags;
+ list = &object->map_list;
+
+ list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_TTM);
+ if (!list->map) {
+ drm_ttm_object_remove(dev, object);
+ return -ENOMEM;
+ }
+ map = list->map;
+
+ ttm = drm_init_ttm(dev, size);
+ if (!ttm) {
+ DRM_ERROR("Could not create ttm\n");
+ drm_ttm_object_remove(dev, object);
+ return -ENOMEM;
+ }
+
+ map->offset = (unsigned long)ttm;
+ map->type = _DRM_TTM;
+ map->flags = _DRM_REMOVABLE;
+ map->size = ttm->num_pages * PAGE_SIZE;
+ map->handle = (void *)object;
+
+ /*
+ * Add a one-page "hole" to the block size to avoid the mm subsystem
+ * merging vmas.
+ * FIXME: Is this really needed?
+ */
+
+ list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
+ ttm->num_pages + 1, 0, 0);
+ if (!list->file_offset_node) {
+ drm_ttm_object_remove(dev, object);
+ return -ENOMEM;
+ }
+ list->file_offset_node = drm_mm_get_block(list->file_offset_node,
+ ttm->num_pages + 1, 0);
+
+ list->hash.key = list->file_offset_node->start;
+
+ if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
+ drm_ttm_object_remove(dev, object);
+ return -ENOMEM;
+ }
+
+ list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT;
+ ttm->mapping_offset = list->hash.key;
+ atomic_set(&object->usage, 1);
+ *ttm_object = object;
+ return 0;
+}
diff --git a/linux-core/drm_ttm.h b/linux-core/drm_ttm.h
new file mode 100644
index 00000000..11a13754
--- /dev/null
+++ b/linux-core/drm_ttm.h
@@ -0,0 +1,145 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifndef _DRM_TTM_H
+#define _DRM_TTM_H
+#define DRM_HAS_TTM
+
+/*
+ * The backend GART interface. (In our case AGP). Any similar type of device (PCIE?)
+ * needs only to implement these functions to be usable with the "TTM" interface.
+ * The AGP backend implementation lives in drm_agpsupport.c
+ * basically maps these calls to available functions in agpgart. Each drm device driver gets an
+ * additional function pointer that creates these types,
+ * so that the device can choose the correct aperture.
+ * (Multiple AGP apertures, etc.)
+ * Most device drivers will let this point to the standard AGP implementation.
+ */
+
+#define DRM_BE_FLAG_NEEDS_FREE 0x00000001
+#define DRM_BE_FLAG_BOUND_CACHED 0x00000002
+#define DRM_BE_FLAG_CBA 0x00000004
+
+typedef struct drm_ttm_backend {
+ unsigned long aperture_base;
+ void *private;
+ uint32_t flags;
+ uint32_t drm_map_type;
+ int (*needs_ub_cache_adjust) (struct drm_ttm_backend * backend);
+ int (*populate) (struct drm_ttm_backend * backend,
+ unsigned long num_pages, struct page ** pages);
+ void (*clear) (struct drm_ttm_backend * backend);
+ int (*bind) (struct drm_ttm_backend * backend,
+ unsigned long offset, int cached);
+ int (*unbind) (struct drm_ttm_backend * backend);
+ void (*destroy) (struct drm_ttm_backend * backend);
+} drm_ttm_backend_t;
+
+typedef struct drm_ttm {
+ struct page **pages;
+ uint32_t page_flags;
+ unsigned long num_pages;
+ unsigned long aper_offset;
+ atomic_t vma_count;
+ struct drm_device *dev;
+ int destroy;
+ uint32_t mapping_offset;
+ drm_ttm_backend_t *be;
+ enum {
+ ttm_bound,
+ ttm_evicted,
+ ttm_unbound,
+ ttm_unpopulated,
+ } state;
+#ifdef DRM_ODD_MM_COMPAT
+ struct list_head vma_list;
+ struct list_head p_mm_list;
+#endif
+
+} drm_ttm_t;
+
+typedef struct drm_ttm_object {
+ atomic_t usage;
+ uint32_t flags;
+ drm_map_list_t map_list;
+} drm_ttm_object_t;
+
+extern int drm_ttm_object_create(struct drm_device *dev, unsigned long size,
+ uint32_t flags,
+ drm_ttm_object_t ** ttm_object);
+extern void drm_ttm_object_deref_locked(struct drm_device *dev,
+ drm_ttm_object_t * to);
+extern void drm_ttm_object_deref_unlocked(struct drm_device *dev,
+ drm_ttm_object_t * to);
+extern drm_ttm_object_t *drm_lookup_ttm_object(drm_file_t * priv,
+ uint32_t handle,
+ int check_owner);
+extern int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset);
+
+extern int drm_unbind_ttm(drm_ttm_t * ttm);
+
+/*
+ * Evict a ttm region. Keeps Aperture caching policy.
+ */
+
+extern int drm_evict_ttm(drm_ttm_t * ttm);
+extern void drm_fixup_ttm_caching(drm_ttm_t * ttm);
+
+/*
+ * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this,
+ * which calls this function iff there are no vmas referencing it anymore. Otherwise it is called
+ * when the last vma exits.
+ */
+
+extern int drm_destroy_ttm(drm_ttm_t * ttm);
+extern int drm_ttm_ioctl(DRM_IOCTL_ARGS);
+
+static __inline__ drm_ttm_t *drm_ttm_from_object(drm_ttm_object_t * to)
+{
+ return (drm_ttm_t *) to->map_list.map->offset;
+}
+
+#define DRM_MASK_VAL(dest, mask, val) \
+ (dest) = ((dest) & ~(mask)) | ((val) & (mask));
+
+#define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1)
+#define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS)
+
+/*
+ * Page flags.
+ */
+
+#define DRM_TTM_PAGE_UNCACHED 0x01
+#define DRM_TTM_PAGE_USED 0x02
+#define DRM_TTM_PAGE_BOUND 0x04
+#define DRM_TTM_PAGE_PRESENT 0x08
+
+#endif
diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c
index cf3bc3cf..6eb996ad 100644
--- a/linux-core/drm_vm.c
+++ b/linux-core/drm_vm.c
@@ -34,12 +34,42 @@
*/
#include "drmP.h"
+
#if defined(__ia64__)
#include <linux/efi.h>
#endif
static void drm_vm_open(struct vm_area_struct *vma);
static void drm_vm_close(struct vm_area_struct *vma);
+static void drm_vm_ttm_close(struct vm_area_struct *vma);
+static int drm_vm_ttm_open(struct vm_area_struct *vma);
+static void drm_vm_ttm_open_wrapper(struct vm_area_struct *vma);
+
+
+pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
+{
+ pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
+
+#if defined(__i386__) || defined(__x86_64__)
+ if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
+ pgprot_val(tmp) |= _PAGE_PCD;
+ pgprot_val(tmp) &= ~_PAGE_PWT;
+ }
+#elif defined(__powerpc__)
+ pgprot_val(tmp) |= _PAGE_NO_CACHE;
+ if (map_type == _DRM_REGISTERS)
+ pgprot_val(tmp) |= _PAGE_GUARDED;
+#endif
+#if defined(__ia64__)
+ if (efi_range_is_wc(vma->vm_start, vma->vm_end -
+ vma->vm_start))
+ tmp = pgprot_writecombine(tmp);
+ else
+ tmp = pgprot_noncached(tmp);
+#endif
+ return tmp;
+}
+
/**
* \c nopage method for AGP virtual memory.
@@ -59,7 +89,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
drm_device_t *dev = priv->head->dev;
drm_map_t *map = NULL;
drm_map_list_t *r_list;
- drm_hash_item_t *hash;
+ drm_hash_item_t *hash;
/*
* Find the right map
@@ -70,10 +100,10 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
if (!dev->agp || !dev->agp->cant_use_aperture)
goto vm_nopage_error;
- if (drm_ht_find_item(&dev->map_hash, VM_OFFSET(vma), &hash))
- goto vm_nopage_error;
+ if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
+ goto vm_nopage_error;
- r_list = drm_hash_entry(hash, drm_map_list_t, hash);
+ r_list = drm_hash_entry(hash, drm_map_list_t, hash);
map = r_list->map;
if (map && map->type == _DRM_AGP) {
@@ -129,6 +159,95 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
}
#endif /* __OS_HAS_AGP */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) || \
+ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
+static
+#endif
+struct page *drm_vm_ttm_fault(struct vm_area_struct *vma,
+ struct fault_data *data)
+{
+ unsigned long address = data->address;
+ drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
+ unsigned long page_offset;
+ struct page *page;
+ drm_ttm_t *ttm;
+ drm_buffer_manager_t *bm;
+ drm_device_t *dev;
+ unsigned long pfn;
+ int err;
+ pgprot_t pgprot;
+
+ if (!map) {
+ data->type = VM_FAULT_OOM;
+ return NULL;
+ }
+
+ if (address > vma->vm_end) {
+ data->type = VM_FAULT_SIGBUS;
+ return NULL;
+ }
+
+ ttm = (drm_ttm_t *) map->offset;
+
+ dev = ttm->dev;
+
+ /*
+ * Perhaps retry here?
+ */
+
+ mutex_lock(&dev->struct_mutex);
+ drm_fixup_ttm_caching(ttm);
+
+ bm = &dev->bm;
+ page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
+ page = ttm->pages[page_offset];
+
+ if (!page) {
+ if (drm_alloc_memctl(PAGE_SIZE)) {
+ data->type = VM_FAULT_OOM;
+ goto out;
+ }
+ page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0);
+ if (!page) {
+ drm_free_memctl(PAGE_SIZE);
+ data->type = VM_FAULT_OOM;
+ goto out;
+ }
+ ++bm->cur_pages;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
+ SetPageLocked(page);
+#else
+ SetPageReserved(page);
+#endif
+ }
+
+ if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) {
+
+ /*
+ * FIXME: Check can't map aperture flag.
+ */
+
+ pfn = ttm->aper_offset + page_offset +
+ (ttm->be->aperture_base >> PAGE_SHIFT);
+ pgprot = drm_io_prot(ttm->be->drm_map_type, vma);
+ } else {
+ pfn = page_to_pfn(page);
+ pgprot = vma->vm_page_prot;
+ }
+
+ err = vm_insert_pfn(vma, address, pfn, pgprot);
+
+ if (!err || err == -EBUSY)
+ data->type = VM_FAULT_MINOR;
+ else
+ data->type = VM_FAULT_OOM;
+ out:
+ mutex_unlock(&dev->struct_mutex);
+ return NULL;
+}
+#endif
+
/**
* \c nopage method for shared virtual memory.
*
@@ -198,7 +317,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
} else {
dev->vmalist = pt->next;
}
- drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
+ drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
} else {
prev = pt;
}
@@ -243,6 +362,9 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
dmah.size = map->size;
__drm_pci_free(dev, &dmah);
break;
+ case _DRM_TTM:
+ BUG_ON(1);
+ break;
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
}
@@ -358,6 +480,7 @@ static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
return drm_do_vm_sg_nopage(vma, address);
}
+
#else /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */
static struct page *drm_vm_nopage(struct vm_area_struct *vma,
@@ -414,6 +537,20 @@ static struct vm_operations_struct drm_vm_sg_ops = {
.close = drm_vm_close,
};
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
+static struct vm_operations_struct drm_vm_ttm_ops = {
+ .nopage = drm_vm_ttm_nopage,
+ .open = drm_vm_ttm_open_wrapper,
+ .close = drm_vm_ttm_close,
+};
+#else
+static struct vm_operations_struct drm_vm_ttm_ops = {
+ .fault = drm_vm_ttm_fault,
+ .open = drm_vm_ttm_open_wrapper,
+ .close = drm_vm_ttm_close,
+};
+#endif
+
/**
* \c open method for shared virtual memory.
*
@@ -432,7 +569,7 @@ static void drm_vm_open(struct vm_area_struct *vma)
vma->vm_start, vma->vm_end - vma->vm_start);
atomic_inc(&dev->vma_count);
- vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
+ vma_entry = drm_ctl_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
if (vma_entry) {
mutex_lock(&dev->struct_mutex);
vma_entry->vma = vma;
@@ -443,6 +580,29 @@ static void drm_vm_open(struct vm_area_struct *vma)
}
}
+static int drm_vm_ttm_open(struct vm_area_struct *vma) {
+
+ drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data;
+ drm_ttm_t *ttm;
+ drm_file_t *priv = vma->vm_file->private_data;
+ drm_device_t *dev = priv->head->dev;
+
+ drm_vm_open(vma);
+ mutex_lock(&dev->struct_mutex);
+ ttm = (drm_ttm_t *) map->offset;
+ atomic_inc(&ttm->vma_count);
+#ifdef DRM_ODD_MM_COMPAT
+ drm_ttm_add_vma(ttm, vma);
+#endif
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+static void drm_vm_ttm_open_wrapper(struct vm_area_struct *vma)
+{
+ drm_vm_ttm_open(vma);
+}
+
/**
* \c close method for all virtual memory types.
*
@@ -469,13 +629,42 @@ static void drm_vm_close(struct vm_area_struct *vma)
} else {
dev->vmalist = pt->next;
}
- drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
+ drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
break;
}
}
mutex_unlock(&dev->struct_mutex);
}
+
+static void drm_vm_ttm_close(struct vm_area_struct *vma)
+{
+ drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
+ drm_ttm_t *ttm;
+ drm_device_t *dev;
+ int ret;
+
+ drm_vm_close(vma);
+ if (map) {
+ ttm = (drm_ttm_t *) map->offset;
+ dev = ttm->dev;
+ mutex_lock(&dev->struct_mutex);
+#ifdef DRM_ODD_MM_COMPAT
+ drm_ttm_delete_vma(ttm, vma);
+#endif
+ if (atomic_dec_and_test(&ttm->vma_count)) {
+ if (ttm->destroy) {
+ ret = drm_destroy_ttm(ttm);
+ BUG_ON(ret);
+ drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
+ }
+ }
+ mutex_unlock(&dev->struct_mutex);
+ }
+ return;
+}
+
+
/**
* mmap DMA memory.
*
@@ -496,8 +685,8 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
lock_kernel();
dev = priv->head->dev;
dma = dev->dma;
- DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
- vma->vm_start, vma->vm_end, VM_OFFSET(vma));
+ DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
+ vma->vm_start, vma->vm_end, vma->vm_pgoff);
/* Length must match exact page count */
if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
@@ -506,6 +695,22 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
}
unlock_kernel();
+ if (!capable(CAP_SYS_ADMIN) &&
+ (dma->flags & _DRM_DMA_USE_PCI_RO)) {
+ vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+#if defined(__i386__) || defined(__x86_64__)
+ pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
+#else
+ /* Ye gads this is ugly. With more thought
+ we could move this up higher and use
+ `protection_map' instead. */
+ vma->vm_page_prot =
+ __pgprot(pte_val
+ (pte_wrprotect
+ (__pte(pgprot_val(vma->vm_page_prot)))));
+#endif
+ }
+
vma->vm_ops = &drm_vm_dma_ops;
#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
@@ -554,10 +759,10 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
drm_device_t *dev = priv->head->dev;
drm_map_t *map = NULL;
unsigned long offset = 0;
- drm_hash_item_t *hash;
+ drm_hash_item_t *hash;
- DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
- vma->vm_start, vma->vm_end, VM_OFFSET(vma));
+ DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
+ vma->vm_start, vma->vm_end, vma->vm_pgoff);
if (!priv->authenticated)
return -EACCES;
@@ -566,7 +771,7 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
* the AGP mapped at physical address 0
* --BenH.
*/
- if (!VM_OFFSET(vma)
+ if (!vma->vm_pgoff
#if __OS_HAS_AGP
&& (!dev->agp
|| dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
@@ -574,11 +779,11 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
)
return drm_mmap_dma(filp, vma);
- if (drm_ht_find_item(&dev->map_hash, VM_OFFSET(vma), &hash)) {
- DRM_ERROR("Could not find map\n");
+ if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff , &hash)) {
+ DRM_ERROR("Could not find map\n");
return -EINVAL;
- }
-
+ }
+
map = drm_hash_entry(hash,drm_map_list_t, hash)->map;
if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
@@ -620,27 +825,9 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
/* fall through to _DRM_FRAME_BUFFER... */
case _DRM_FRAME_BUFFER:
case _DRM_REGISTERS:
-#if defined(__i386__) || defined(__x86_64__)
- if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
- pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
- pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
- }
-#elif defined(__powerpc__)
- pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
- if (map->type == _DRM_REGISTERS)
- pgprot_val(vma->vm_page_prot) |= _PAGE_GUARDED;
-#endif
- vma->vm_flags |= VM_IO; /* not in core dump */
-#if defined(__ia64__)
- if (efi_range_is_wc(vma->vm_start, vma->vm_end -
- vma->vm_start))
- vma->vm_page_prot =
- pgprot_writecombine(vma->vm_page_prot);
- else
- vma->vm_page_prot =
- pgprot_noncached(vma->vm_page_prot);
-#endif
offset = dev->driver->get_reg_ofs(dev);
+ vma->vm_flags |= VM_IO; /* not in core dump */
+ vma->vm_page_prot = drm_io_prot(map->type, vma);
#ifdef __sparc__
if (io_remap_pfn_range(vma, vma->vm_start,
(map->offset + offset) >>PAGE_SHIFT,
@@ -687,6 +874,20 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags |= VM_RESERVED;
#endif
break;
+ case _DRM_TTM: {
+ vma->vm_ops = &drm_vm_ttm_ops;
+ vma->vm_private_data = (void *) map;
+ vma->vm_file = filp;
+ vma->vm_flags |= VM_RESERVED | VM_IO;
+#ifdef DRM_ODD_MM_COMPAT
+ mutex_lock(&dev->struct_mutex);
+ drm_ttm_map_bound(vma);
+ mutex_unlock(&dev->struct_mutex);
+#endif
+ if (drm_vm_ttm_open(vma))
+ return -EAGAIN;
+ return 0;
+ }
default:
return -EINVAL; /* This should never happen. */
}
diff --git a/linux-core/ffb_drv.c b/linux-core/ffb_drv.c
index 7b028c86..9c88f061 100644
--- a/linux-core/ffb_drv.c
+++ b/linux-core/ffb_drv.c
@@ -4,7 +4,6 @@
* Copyright (C) 2000 David S. Miller (davem@redhat.com)
*/
-#include <linux/config.h>
#include <linux/sched.h>
#include <linux/smp_lock.h>
#include <asm/shmparam.h>
diff --git a/linux-core/i810_dma.c b/linux-core/i810_dma.c
index 800c5f61..bdbb31fa 100644
--- a/linux-core/i810_dma.c
+++ b/linux-core/i810_dma.c
@@ -151,7 +151,7 @@ static int i810_map_buffer(drm_buf_t * buf, struct file *filp)
drm_device_t *dev = priv->head->dev;
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
drm_i810_private_t *dev_priv = dev->dev_private;
- struct file_operations *old_fops;
+ const struct file_operations *old_fops;
int retcode = 0;
if (buf_priv->currently_mapped == I810_BUF_MAPPED)
@@ -166,10 +166,10 @@ static int i810_map_buffer(drm_buf_t * buf, struct file *filp)
MAP_SHARED, buf->bus_address);
dev_priv->mmap_buffer = NULL;
filp->f_op = old_fops;
- if ((unsigned long)buf_priv->virtual > -1024UL) {
+ if (IS_ERR(buf_priv->virtual)) {
/* Real error */
DRM_ERROR("mmap error\n");
- retcode = (signed int)buf_priv->virtual;
+ retcode = PTR_ERR(buf_priv->virtual);
buf_priv->virtual = NULL;
}
up_write(&current->mm->mmap_sem);
@@ -833,7 +833,7 @@ static void i810_dma_dispatch_vertex(drm_device_t * dev,
((GFX_OP_PRIMITIVE | prim | ((used / 4) - 2)));
if (used & 4) {
- *(u32 *) ((u32) buf_priv->kernel_virtual + used) = 0;
+ *(u32 *) ((char *) buf_priv->kernel_virtual + used) = 0;
used += 4;
}
@@ -1191,7 +1191,7 @@ static void i810_dma_dispatch_mc(drm_device_t * dev, drm_buf_t * buf, int used,
if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
if (used & 4) {
- *(u32 *) ((u32) buf_priv->virtual + used) = 0;
+ *(u32 *) ((char *) buf_priv->virtual + used) = 0;
used += 4;
}
diff --git a/linux-core/i810_drv.c b/linux-core/i810_drv.c
index d4b73760..fc784a02 100644
--- a/linux-core/i810_drv.c
+++ b/linux-core/i810_drv.c
@@ -30,7 +30,6 @@
* Gareth Hughes <gareth@valinux.com>
*/
-#include <linux/config.h>
#include "drmP.h"
#include "drm.h"
#include "i810_drm.h"
diff --git a/linux-core/i810_drv.h b/linux-core/i810_drv.h
index e8cf3ff6..bb7358d2 100644
--- a/linux-core/i810_drv.h
+++ b/linux-core/i810_drv.h
@@ -141,8 +141,8 @@ extern int i810_max_ioctl;
volatile char *virt;
#define BEGIN_LP_RING(n) do { \
- if (I810_VERBOSE) \
- DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", n, __FUNCTION__); \
+ if (I810_VERBOSE) \
+ DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", n, __FUNCTION__);\
if (dev_priv->ring.space < n*4) \
i810_wait_ring(dev, n*4); \
dev_priv->ring.space -= n*4; \
@@ -151,17 +151,17 @@ extern int i810_max_ioctl;
virt = dev_priv->ring.virtual_start; \
} while (0)
-#define ADVANCE_LP_RING() do { \
+#define ADVANCE_LP_RING() do { \
if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \
- dev_priv->ring.tail = outring; \
- I810_WRITE(LP_RING + RING_TAIL, outring); \
+ dev_priv->ring.tail = outring; \
+ I810_WRITE(LP_RING + RING_TAIL, outring); \
} while(0)
-#define OUT_RING(n) do { \
+#define OUT_RING(n) do { \
if (I810_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
- *(volatile unsigned int *)(virt + outring) = n; \
- outring += 4; \
- outring &= ringmask; \
+ *(volatile unsigned int *)(virt + outring) = n; \
+ outring += 4; \
+ outring &= ringmask; \
} while (0)
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
@@ -193,7 +193,7 @@ extern int i810_max_ioctl;
#define HEAD_WRAP_ONE 0x00200000
#define HEAD_ADDR 0x001FFFFC
#define RING_START 0x08
-#define START_ADDR 0x00FFFFF8
+#define START_ADDR 0x00FFFFF8
#define RING_LEN 0x0C
#define RING_NR_PAGES 0x000FF000
#define RING_REPORT_MASK 0x00000006
diff --git a/linux-core/i830_dma.c b/linux-core/i830_dma.c
index a122898b..4526ccf1 100644
--- a/linux-core/i830_dma.c
+++ b/linux-core/i830_dma.c
@@ -137,7 +137,7 @@ static int i830_map_buffer(drm_buf_t * buf, struct file *filp)
drm_device_t *dev = priv->head->dev;
drm_i830_buf_priv_t *buf_priv = buf->dev_private;
drm_i830_private_t *dev_priv = dev->dev_private;
- struct file_operations *old_fops;
+ const struct file_operations *old_fops;
unsigned long virtual;
int retcode = 0;
@@ -155,7 +155,7 @@ static int i830_map_buffer(drm_buf_t * buf, struct file *filp)
if (IS_ERR((void *)virtual)) { /* ugh */
/* Real error */
DRM_ERROR("mmap error\n");
- retcode = virtual;
+ retcode = PTR_ERR((void *)virtual);
buf_priv->virtual = NULL;
} else {
buf_priv->virtual = (void __user *)virtual;
diff --git a/linux-core/i830_drv.c b/linux-core/i830_drv.c
index 74b574aa..6416161e 100644
--- a/linux-core/i830_drv.c
+++ b/linux-core/i830_drv.c
@@ -32,8 +32,6 @@
* Keith Whitwell <keith@tungstengraphics.com>
*/
-#include <linux/config.h>
-
#include "drmP.h"
#include "drm.h"
#include "i830_drm.h"
diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c
new file mode 100644
index 00000000..c3e54468
--- /dev/null
+++ b/linux-core/i915_buffer.c
@@ -0,0 +1,66 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+
+drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t * dev)
+{
+ return drm_agp_init_ttm(dev, NULL);
+}
+
+int i915_fence_types(uint32_t buffer_flags, uint32_t * class, uint32_t * type)
+{
+ *class = 0;
+ if (buffer_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
+ *type = 3;
+ else
+ *type = 1;
+ return 0;
+}
+
+int i915_invalidate_caches(drm_device_t * dev, uint32_t flags)
+{
+ /*
+ * FIXME: Only emit once per batchbuffer submission.
+ */
+
+ uint32_t flush_cmd = MI_NO_WRITE_FLUSH;
+
+ if (flags & DRM_BO_FLAG_READ)
+ flush_cmd |= MI_READ_FLUSH;
+ if (flags & DRM_BO_FLAG_EXE)
+ flush_cmd |= MI_EXE_FLUSH;
+
+ return i915_emit_mi_flush(dev, flush_cmd);
+}
diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c
index c6e25f9b..2c5b43d0 100644
--- a/linux-core/i915_drv.c
+++ b/linux-core/i915_drv.c
@@ -38,6 +38,27 @@ static struct pci_device_id pciidlist[] = {
i915_PCI_IDS
};
+#ifdef I915_HAVE_FENCE
+static drm_fence_driver_t i915_fence_driver = {
+ .no_types = 2,
+ .wrap_diff = (1 << 30),
+ .flush_diff = (1 << 29),
+ .sequence_mask = 0xffffffffU,
+ .lazy_capable = 1,
+ .emit = i915_fence_emit_sequence,
+ .poke_flush = i915_poke_flush,
+};
+#endif
+#ifdef I915_HAVE_BUFFER
+static drm_bo_driver_t i915_bo_driver = {
+ .iomap = {NULL, NULL},
+ .cached = {1, 1},
+ .create_ttm_backend_entry = i915_create_ttm_backend_entry,
+ .fence_type = i915_fence_types,
+ .invalidate_caches = i915_invalidate_caches
+};
+#endif
+
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static struct drm_driver driver = {
/* don't use mtrr's here, the Xserver or user space app should
@@ -45,12 +66,14 @@ static struct drm_driver driver = {
*/
.driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR | */
- DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL |
+ DRIVER_IRQ_VBL2,
.load = i915_driver_load,
.lastclose = i915_driver_lastclose,
.preclose = i915_driver_preclose,
.device_is_agp = i915_driver_device_is_agp,
.vblank_wait = i915_driver_vblank_wait,
+ .vblank_wait2 = i915_driver_vblank_wait2,
.irq_preinstall = i915_driver_irq_preinstall,
.irq_postinstall = i915_driver_irq_postinstall,
.irq_uninstall = i915_driver_irq_uninstall,
@@ -77,7 +100,12 @@ static struct drm_driver driver = {
.probe = probe,
.remove = __devexit_p(drm_cleanup_pci),
},
-
+#ifdef I915_HAVE_FENCE
+ .fence_driver = &i915_fence_driver,
+#endif
+#ifdef I915_HAVE_BUFFER
+ .bo_driver = &i915_bo_driver,
+#endif
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/linux-core/i915_fence.c b/linux-core/i915_fence.c
new file mode 100644
index 00000000..2182604c
--- /dev/null
+++ b/linux-core/i915_fence.c
@@ -0,0 +1,146 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+/*
+ * Implements an intel sync flush operation.
+ */
+
+static void i915_perform_flush(drm_device_t * dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ drm_fence_manager_t *fm = &dev->fm;
+ drm_fence_driver_t *driver = dev->driver->fence_driver;
+ uint32_t flush_flags = 0;
+ uint32_t flush_sequence = 0;
+ uint32_t i_status;
+ uint32_t diff;
+ uint32_t sequence;
+
+ if (!dev_priv)
+ return;
+
+ if (fm->pending_exe_flush) {
+ sequence = READ_BREADCRUMB(dev_priv);
+
+ /*
+ * First update fences with the current breadcrumb.
+ */
+
+ diff = sequence - fm->last_exe_flush;
+ if (diff < driver->wrap_diff && diff != 0) {
+ drm_fence_handler(dev, sequence, DRM_FENCE_TYPE_EXE);
+ }
+
+ diff = sequence - fm->exe_flush_sequence;
+ if (diff < driver->wrap_diff) {
+ fm->pending_exe_flush = 0;
+ if (dev_priv->fence_irq_on) {
+ i915_user_irq_off(dev_priv);
+ dev_priv->fence_irq_on = 0;
+ }
+ } else if (!dev_priv->fence_irq_on) {
+ i915_user_irq_on(dev_priv);
+ dev_priv->fence_irq_on = 1;
+ }
+ }
+
+ if (dev_priv->flush_pending) {
+ i_status = READ_HWSP(dev_priv, 0);
+ if ((i_status & (1 << 12)) !=
+ (dev_priv->saved_flush_status & (1 << 12))) {
+ flush_flags = dev_priv->flush_flags;
+ flush_sequence = dev_priv->flush_sequence;
+ dev_priv->flush_pending = 0;
+ drm_fence_handler(dev, flush_sequence, flush_flags);
+ }
+ }
+
+ if (fm->pending_flush && !dev_priv->flush_pending) {
+ dev_priv->flush_sequence = (uint32_t) READ_BREADCRUMB(dev_priv);
+ dev_priv->flush_flags = fm->pending_flush;
+ dev_priv->saved_flush_status = READ_HWSP(dev_priv, 0);
+ I915_WRITE(I915REG_INSTPM, (1 << 5) | (1 << 21));
+ dev_priv->flush_pending = 1;
+ fm->pending_flush = 0;
+ }
+
+ if (dev_priv->flush_pending) {
+ i_status = READ_HWSP(dev_priv, 0);
+ if ((i_status & (1 << 12)) !=
+ (dev_priv->saved_flush_status & (1 << 12))) {
+ flush_flags = dev_priv->flush_flags;
+ flush_sequence = dev_priv->flush_sequence;
+ dev_priv->flush_pending = 0;
+ drm_fence_handler(dev, flush_sequence, flush_flags);
+ }
+ }
+
+}
+
+void i915_poke_flush(drm_device_t * dev)
+{
+ drm_fence_manager_t *fm = &dev->fm;
+ unsigned long flags;
+
+ write_lock_irqsave(&fm->lock, flags);
+ i915_perform_flush(dev);
+ write_unlock_irqrestore(&fm->lock, flags);
+}
+
+int i915_fence_emit_sequence(drm_device_t * dev, uint32_t flags,
+ uint32_t * sequence, uint32_t * native_type)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ if (!dev_priv)
+ return -EINVAL;
+
+ i915_emit_irq(dev);
+ *sequence = (uint32_t) dev_priv->counter;
+ *native_type = DRM_FENCE_TYPE_EXE;
+ if (flags & DRM_I915_FENCE_FLAG_FLUSHED)
+ *native_type |= DRM_I915_FENCE_TYPE_RW;
+
+ return 0;
+}
+
+void i915_fence_handler(drm_device_t * dev)
+{
+ drm_fence_manager_t *fm = &dev->fm;
+
+ write_lock(&fm->lock);
+ i915_perform_flush(dev);
+ write_unlock(&fm->lock);
+}
diff --git a/linux-core/imagine_drv.c b/linux-core/imagine_drv.c
index bec2fae4..6d050999 100644
--- a/linux-core/imagine_drv.c
+++ b/linux-core/imagine_drv.c
@@ -22,7 +22,6 @@
/* derived from tdfx_drv.c */
-#include <linux/config.h>
#include "drmP.h"
#include "imagine_drv.h"
diff --git a/linux-core/mach64_drv.c b/linux-core/mach64_drv.c
index ba45132b..9709934d 100644
--- a/linux-core/mach64_drv.c
+++ b/linux-core/mach64_drv.c
@@ -27,7 +27,6 @@
* Leif Delgass <ldelgass@retinalburn.net>
*/
-#include <linux/config.h>
#include "drmP.h"
#include "drm.h"
#include "mach64_drm.h"
diff --git a/linux-core/mga_drv.c b/linux-core/mga_drv.c
index 3a1e4b25..ef6f1e44 100644
--- a/linux-core/mga_drv.c
+++ b/linux-core/mga_drv.c
@@ -29,7 +29,6 @@
* Gareth Hughes <gareth@valinux.com>
*/
-#include <linux/config.h>
#include "drmP.h"
#include "drm.h"
#include "mga_drm.h"
@@ -49,6 +48,7 @@ static struct drm_driver driver = {
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
DRIVER_IRQ_VBL,
+ .dev_priv_size = sizeof (drm_mga_buf_priv_t),
.load = mga_driver_load,
.unload = mga_driver_unload,
.lastclose = mga_driver_lastclose,
diff --git a/linux-core/nv_drv.c b/linux-core/nv_drv.c
index a6afb024..5049473a 100644
--- a/linux-core/nv_drv.c
+++ b/linux-core/nv_drv.c
@@ -32,7 +32,6 @@
* Lars Knoll <lars@trolltech.com>
*/
-#include <linux/config.h>
#include "drmP.h"
#include "nv_drv.h"
diff --git a/linux-core/r128_drv.c b/linux-core/r128_drv.c
index edc04b03..ef4a5cbd 100644
--- a/linux-core/r128_drv.c
+++ b/linux-core/r128_drv.c
@@ -29,7 +29,6 @@
* Gareth Hughes <gareth@valinux.com>
*/
-#include <linux/config.h>
#include "drmP.h"
#include "drm.h"
#include "r128_drm.h"
diff --git a/linux-core/radeon_drv.c b/linux-core/radeon_drv.c
index fca2d4e7..43b9aca0 100644
--- a/linux-core/radeon_drv.c
+++ b/linux-core/radeon_drv.c
@@ -29,7 +29,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/config.h>
#include "drmP.h"
#include "drm.h"
#include "radeon_drm.h"
@@ -45,7 +44,7 @@ module_param_named(no_wb, radeon_no_wb, int, 0444);
static int dri_library_name(struct drm_device * dev, char * buf)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
- int family = dev_priv->flags & CHIP_FAMILY_MASK;
+ int family = dev_priv->flags & RADEON_FAMILY_MASK;
return snprintf(buf, PAGE_SIZE, "%s\n",
(family < CHIP_R200) ? "radeon" :
diff --git a/linux-core/savage_drv.c b/linux-core/savage_drv.c
index 9f12dfe2..bb3561e6 100644
--- a/linux-core/savage_drv.c
+++ b/linux-core/savage_drv.c
@@ -23,7 +23,6 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/config.h>
#include "drmP.h"
#include "savage_drm.h"
#include "savage_drv.h"
diff --git a/linux-core/sis_drv.c b/linux-core/sis_drv.c
index 3fdbd88e..9b0b9830 100644
--- a/linux-core/sis_drv.c
+++ b/linux-core/sis_drv.c
@@ -25,7 +25,6 @@
*
*/
-#include <linux/config.h>
#include "drmP.h"
#include "sis_drm.h"
#include "sis_drv.h"
@@ -40,15 +39,15 @@ static struct pci_device_id pciidlist[] = {
static int sis_driver_load(drm_device_t *dev, unsigned long chipset)
{
drm_sis_private_t *dev_priv;
- int ret;
+ int ret;
dev_priv = drm_calloc(1, sizeof(drm_sis_private_t), DRM_MEM_DRIVER);
if (dev_priv == NULL)
return DRM_ERR(ENOMEM);
dev->dev_private = (void *)dev_priv;
- dev_priv->chipset = chipset;
- ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
+ dev_priv->chipset = chipset;
+ ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
if (ret) {
drm_free(dev_priv, sizeof(dev_priv), DRM_MEM_DRIVER);
}
@@ -60,7 +59,7 @@ static int sis_driver_unload(drm_device_t *dev)
{
drm_sis_private_t *dev_priv = dev->dev_private;
- drm_sman_takedown(&dev_priv->sman);
+ drm_sman_takedown(&dev_priv->sman);
drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
return 0;
@@ -70,10 +69,10 @@ static int sis_driver_unload(drm_device_t *dev)
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static struct drm_driver driver = {
.driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR,
- .load = sis_driver_load,
- .unload = sis_driver_unload,
+ .load = sis_driver_load,
+ .unload = sis_driver_unload,
.context_dtor = NULL,
- .dma_quiescent = sis_idle,
+ .dma_quiescent = sis_idle,
.reclaim_buffers = NULL,
.reclaim_buffers_locked = sis_reclaim_buffers_locked,
.lastclose = sis_lastclose,
diff --git a/linux-core/tdfx_drv.c b/linux-core/tdfx_drv.c
index ce1b7c5a..bc69c06a 100644
--- a/linux-core/tdfx_drv.c
+++ b/linux-core/tdfx_drv.c
@@ -30,7 +30,6 @@
* Gareth Hughes <gareth@valinux.com>
*/
-#include <linux/config.h>
#include "drmP.h"
#include "tdfx_drv.h"
diff --git a/linux-core/via_dmablit.c b/linux-core/via_dmablit.c
index 4a3a7524..fdc2bd67 100644
--- a/linux-core/via_dmablit.c
+++ b/linux-core/via_dmablit.c
@@ -121,19 +121,18 @@ via_map_blit_for_device(struct pci_dev *pdev,
while (line_len > 0) {
- remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
+ remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
line_len -= remaining_len;
if (mode == 1) {
- desc_ptr->mem_addr =
- dma_map_page(&pdev->dev,
- vsg->pages[VIA_PFN(cur_mem) -
- VIA_PFN(first_addr)],
- VIA_PGOFF(cur_mem), remaining_len,
- vsg->direction);
- desc_ptr->dev_addr = cur_fb;
+ desc_ptr->mem_addr = dma_map_page(&pdev->dev,
+ vsg->pages[VIA_PFN(cur_mem) -
+ VIA_PFN(first_addr)],
+ VIA_PGOFF(cur_mem), remaining_len,
+ vsg->direction);
+ desc_ptr->dev_addr = cur_fb;
- desc_ptr->size = remaining_len;
+ desc_ptr->size = remaining_len;
desc_ptr->next = (uint32_t) next;
next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
DMA_TO_DEVICE);
@@ -167,7 +166,7 @@ via_map_blit_for_device(struct pci_dev *pdev,
*/
-void
+static void
via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
{
struct page *page;
@@ -648,13 +647,13 @@ via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *
if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) {
DRM_ERROR("Invalid DRM bitblt alignment.\n");
- return DRM_ERR(EINVAL);
+ return DRM_ERR(EINVAL);
}
#else
if ((((unsigned long)xfer->mem_addr & 15) || ((unsigned long)xfer->fb_addr & 3)) ||
((xfer->num_lines > 1) && ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
DRM_ERROR("Invalid DRM bitblt alignment.\n");
- return DRM_ERR(EINVAL);
+ return DRM_ERR(EINVAL);
}
#endif
@@ -732,7 +731,7 @@ via_dmablit(drm_device_t *dev, drm_via_dmablit_t *xfer)
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
drm_via_sg_info_t *vsg;
drm_via_blitq_t *blitq;
- int ret;
+ int ret;
int engine;
unsigned long irqsave;
diff --git a/linux-core/via_dmablit.h b/linux-core/via_dmablit.h
index 64863917..f6ae03ec 100644
--- a/linux-core/via_dmablit.h
+++ b/linux-core/via_dmablit.h
@@ -30,6 +30,8 @@
#ifndef _VIA_DMABLIT_H
#define _VIA_DMABLIT_H
+#include <linux/dma-mapping.h>
+
#define VIA_NUM_BLIT_ENGINES 2
#define VIA_NUM_BLIT_SLOTS 8
@@ -43,12 +45,12 @@ typedef struct _drm_via_sg_info {
int num_desc;
enum dma_data_direction direction;
unsigned char *bounce_buffer;
- dma_addr_t chain_start;
+ dma_addr_t chain_start;
uint32_t free_on_sequence;
- unsigned int descriptors_per_page;
+ unsigned int descriptors_per_page;
int aborted;
enum {
- dr_via_device_mapped,
+ dr_via_device_mapped,
dr_via_desc_pages_alloc,
dr_via_pages_locked,
dr_via_pages_alloc,
@@ -66,7 +68,7 @@ typedef struct _drm_via_blitq {
unsigned num_free;
unsigned num_outstanding;
unsigned long end;
- int aborting;
+ int aborting;
int is_active;
drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS];
spinlock_t blit_lock;