diff options
Diffstat (limited to 'linux-core')
| -rw-r--r-- | linux-core/Makefile.kernel | 6 | ||||
| -rw-r--r-- | linux-core/drmP.h | 467 | ||||
| -rw-r--r-- | linux-core/drm_agpsupport.c | 158 | ||||
| -rw-r--r-- | linux-core/drm_bo.c | 1994 | ||||
| -rw-r--r-- | linux-core/drm_bufs.c | 11 | ||||
| -rw-r--r-- | linux-core/drm_compat.c | 421 | ||||
| -rw-r--r-- | linux-core/drm_compat.h | 145 | ||||
| -rw-r--r-- | linux-core/drm_drv.c | 88 | ||||
| -rw-r--r-- | linux-core/drm_fence.c | 619 | ||||
| -rw-r--r-- | linux-core/drm_fops.c | 147 | ||||
| -rw-r--r-- | linux-core/drm_hashtab.c | 43 | ||||
| -rw-r--r-- | linux-core/drm_hashtab.h | 1 | ||||
| -rw-r--r-- | linux-core/drm_irq.c | 1 | ||||
| -rw-r--r-- | linux-core/drm_lock.c | 83 | ||||
| -rw-r--r-- | linux-core/drm_memory.c | 69 | ||||
| -rw-r--r-- | linux-core/drm_mm.c | 33 | ||||
| -rw-r--r-- | linux-core/drm_object.c | 287 | ||||
| -rw-r--r-- | linux-core/drm_proc.c | 95 | ||||
| -rw-r--r-- | linux-core/drm_sman.c | 3 | ||||
| -rw-r--r-- | linux-core/drm_stub.c | 29 | ||||
| -rw-r--r-- | linux-core/drm_ttm.c | 498 | ||||
| -rw-r--r-- | linux-core/drm_ttm.h | 145 | ||||
| -rw-r--r-- | linux-core/drm_vm.c | 243 | ||||
| -rw-r--r-- | linux-core/i915_buffer.c | 66 | ||||
| -rw-r--r-- | linux-core/i915_drv.c | 28 | ||||
| -rw-r--r-- | linux-core/i915_fence.c | 146 | 
26 files changed, 5678 insertions, 148 deletions
| diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 211e5b05..fba57ddf 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -12,13 +12,15 @@ drm-objs    := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \  		drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \  		drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \  		drm_memory_debug.o ati_pcigart.o drm_sman.o \ -		drm_hashtab.o drm_mm.o +		drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \ +	        drm_fence.o drm_ttm.o drm_bo.o  tdfx-objs   := tdfx_drv.o  r128-objs   := r128_drv.o r128_cce.o r128_state.o r128_irq.o  mga-objs    := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o  i810-objs   := i810_drv.o i810_dma.o  i830-objs   := i830_drv.o i830_dma.o i830_irq.o -i915-objs   := i915_drv.o i915_dma.o i915_irq.o i915_mem.o +i915-objs   := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \ +	i915_buffer.o  radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o  sis-objs    := sis_drv.o sis_mm.o  ffb-objs    := ffb_drv.o ffb_context.o diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 1b314be1..4ce5a3ec 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -83,6 +83,7 @@  #include <linux/poll.h>  #include <asm/pgalloc.h>  #include "drm.h" +#include <linux/slab.h>  #define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))  #define __OS_HAS_MTRR (defined(CONFIG_MTRR)) @@ -154,9 +155,18 @@  #define DRM_MEM_CTXLIST   21  #define DRM_MEM_MM        22  #define DRM_MEM_HASHTAB   23 +#define DRM_MEM_OBJECTS   24 +#define DRM_MEM_FENCE     25 +#define DRM_MEM_TTM       26 +#define DRM_MEM_BUFOBJ    27  #define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)  #define DRM_MAP_HASH_OFFSET 0x10000000 +#define DRM_MAP_HASH_ORDER 12 +#define DRM_OBJECT_HASH_ORDER 12 +#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) +#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) +#define DRM_MM_INIT_MAX_PAGES 256  /*@}*/ @@ -387,6 +397,19 @@ typedef struct drm_buf_entry {  	drm_freelist_t freelist;  } drm_buf_entry_t; +/* + * This should be small enough to allow the use of kmalloc for hash tables + * instead of vmalloc. + */ + +#define DRM_FILE_HASH_ORDER 8 +typedef enum{ +	_DRM_REF_USE=0, +	_DRM_REF_TYPE1, +	_DRM_NO_REF_TYPES +} drm_ref_t; + +  /** File private data */  typedef struct drm_file {  	int authenticated; @@ -401,6 +424,18 @@ typedef struct drm_file {  	struct drm_head *head;  	int remove_auth_on_close;  	unsigned long lock_count; +	 +	/* +	 * The user object hash table is global and resides in the +	 * drm_device structure. We protect the lists and hash tables with the +	 * device struct_mutex. A bit coarse-grained but probably the best  +	 * option. +	 */ + +        struct list_head refd_objects; +	struct list_head user_objects; + +        drm_open_hash_t refd_object_hash[_DRM_NO_REF_TYPES];  	void *driver_priv;  } drm_file_t; @@ -502,6 +537,26 @@ typedef struct drm_sigdata {  	drm_hw_lock_t *lock;  } drm_sigdata_t; + +/*  + * Generic memory manager structs + */ + +typedef struct drm_mm_node { +	struct list_head fl_entry; +	struct list_head ml_entry; +	int free; +	unsigned long start; +	unsigned long size; +        struct drm_mm *mm; +	void *private; +} drm_mm_node_t; + +typedef struct drm_mm { +	drm_mm_node_t root_node; +} drm_mm_t; + +  /**   * Mappings list   */ @@ -509,7 +564,8 @@ typedef struct drm_map_list {  	struct list_head head;		/**< list head */  	drm_hash_item_t hash;  	drm_map_t *map;			/**< mapping */ -	unsigned int user_token; +	drm_u64_t user_token; +        drm_mm_node_t *file_offset_node;  } drm_map_list_t;  typedef drm_map_t drm_local_map_t; @@ -542,22 +598,77 @@ typedef struct ati_pcigart_info {  	drm_local_map_t mapping;  } drm_ati_pcigart_info; -/*  - * Generic memory manager structs +/* + * User space objects and their references.   */ -typedef struct drm_mm_node { -	struct list_head fl_entry; -	struct list_head ml_entry; -	int free; -	unsigned long start; -	unsigned long size; -	void *private; -} drm_mm_node_t; +#define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) -typedef struct drm_mm { -	drm_mm_node_t root_node; -} drm_mm_t; +typedef enum { +		drm_fence_type, +		drm_buffer_type, +		drm_ttm_type + +		/* +		 * Add other user space object types here.  +		 */ + +} drm_object_type_t; + + + + +/* + * A user object is a structure that helps the drm give out user handles + * to kernel internal objects and to keep track of these objects so that  + * they can be destroyed, for example when the user space process exits. + * Designed to be accessible using a user space 32-bit handle.  + */ + +typedef struct drm_user_object{ +	drm_hash_item_t hash; +	struct list_head list; +	drm_object_type_t type; +        atomic_t refcount; +        int shareable; +        drm_file_t *owner; +	void (*ref_struct_locked) (drm_file_t *priv, struct drm_user_object *obj,  +				   drm_ref_t ref_action);  +	void (*unref)(drm_file_t *priv, struct drm_user_object *obj,  +		      drm_ref_t unref_action); +	void (*remove)(drm_file_t *priv, struct drm_user_object *obj); +} drm_user_object_t; + +/* + * A ref object is a structure which is used to + * keep track of references to user objects and to keep track of these + * references so that they can be destroyed for example when the user space + * process exits. Designed to be accessible using a pointer to the _user_ object. + */ + + +typedef struct drm_ref_object { +	drm_hash_item_t hash; +	struct list_head list; +	atomic_t refcount; +	drm_ref_t unref_action; +} drm_ref_object_t; + + +#include "drm_ttm.h" + +/* + * buffer object driver + */ + +typedef struct drm_bo_driver{ +	int cached[DRM_BO_MEM_TYPES]; +        drm_local_map_t *iomap[DRM_BO_MEM_TYPES]; +	drm_ttm_backend_t *(*create_ttm_backend_entry)  +		(struct drm_device *dev); +	int (*fence_type)(uint32_t flags, uint32_t *class, uint32_t *type); +	int (*invalidate_caches)(struct drm_device *dev, uint32_t flags); +} drm_bo_driver_t;  /** @@ -565,6 +676,7 @@ typedef struct drm_mm {   * a family of cards. There will one drm_device for each card present   * in this family   */ +  struct drm_device;  struct drm_driver {  	int (*load) (struct drm_device *, unsigned long flags); @@ -611,6 +723,9 @@ struct drm_driver {  	unsigned long (*get_reg_ofs) (struct drm_device * dev);  	void (*set_version) (struct drm_device * dev, drm_set_version_t * sv); +        struct drm_fence_driver *fence_driver; +	struct drm_bo_driver *bo_driver; +          	int major;  	int minor;  	int patchlevel; @@ -640,6 +755,71 @@ typedef struct drm_head {  	struct class_device *dev_class;  } drm_head_t; +typedef struct drm_cache { + +	/* +	 * Memory caches +	 */ + +	kmem_cache_t *mm; +	kmem_cache_t *fence_object; +} drm_cache_t; + + + +typedef struct drm_fence_driver{ +	int no_types; +	uint32_t wrap_diff; +	uint32_t flush_diff; +        uint32_t sequence_mask; +        int lazy_capable; +	int (*emit) (struct drm_device *dev, uint32_t flags, +		     uint32_t *breadcrumb, +		     uint32_t *native_type); +	void (*poke_flush) (struct drm_device *dev); +} drm_fence_driver_t; + +#define _DRM_FENCE_TYPE_EXE 0x00 + +typedef struct drm_fence_manager{ +        int initialized; +	rwlock_t lock; + +	/* +	 * The list below should be maintained in sequence order and  +	 * access is protected by the above spinlock. +	 */ + +	struct list_head ring; +	struct list_head *fence_types[32]; +	volatile uint32_t pending_flush; +	wait_queue_head_t fence_queue; +	int pending_exe_flush; +	uint32_t last_exe_flush; +	uint32_t exe_flush_sequence; +        atomic_t count; +} drm_fence_manager_t; + +typedef struct drm_buffer_manager{ +	struct mutex init_mutex; +	int nice_mode; +	int initialized; +        drm_file_t *last_to_validate; +	int has_type[DRM_BO_MEM_TYPES]; +        int use_type[DRM_BO_MEM_TYPES]; +	drm_mm_t manager[DRM_BO_MEM_TYPES]; +	struct list_head lru[DRM_BO_MEM_TYPES]; +        struct list_head pinned[DRM_BO_MEM_TYPES]; +	struct list_head unfenced; +	struct list_head ddestroy; +        struct work_struct wq; +        uint32_t fence_type; +        unsigned long cur_pages; +        atomic_t count; +} drm_buffer_manager_t; + + +  /**   * DRM device structure. This structure represent a complete card that   * may contain multiple heads. @@ -686,7 +866,11 @@ typedef struct drm_device {  	/*@{ */  	drm_map_list_t *maplist;	/**< Linked list of regions */  	int map_count;			/**< Number of mappable regions */ -	drm_open_hash_t map_hash;       /**< User token hash table for maps */ +        drm_open_hash_t map_hash;       /**< User token hash table for maps */ +        drm_mm_t offset_manager;        /**< User token manager */ +        drm_open_hash_t object_hash;    /**< User token hash table for objects */ +        struct address_space *dev_mapping;  /**< For unmap_mapping_range() */ +        struct page *ttm_dummy_page;  	/** \name Context handle management */  	/*@{ */ @@ -773,6 +957,9 @@ typedef struct drm_device {  	unsigned int agp_buffer_token;  	drm_head_t primary;		/**< primary screen head */ +	drm_fence_manager_t fm; +	drm_buffer_manager_t bm; +    	/** \name Drawable information */  	/*@{ */  	spinlock_t drw_lock; @@ -783,6 +970,75 @@ typedef struct drm_device {  	/*@} */  } drm_device_t; +#if __OS_HAS_AGP +typedef struct drm_agp_ttm_priv { +	DRM_AGP_MEM *mem; +	struct agp_bridge_data *bridge; +	unsigned alloc_type; +	unsigned cached_type; +	unsigned uncached_type; +	int populated; +} drm_agp_ttm_priv; +#endif + +typedef struct drm_fence_object{ +	drm_user_object_t base; +        atomic_t usage; + +	/* +	 * The below three fields are protected by the fence manager spinlock. +	 */ + +	struct list_head ring; +        int class; +        uint32_t native_type; +	uint32_t type; +	uint32_t signaled; +	uint32_t sequence; +	uint32_t flush_mask; +	uint32_t submitted_flush; +} drm_fence_object_t; + + +typedef struct drm_buffer_object{ +	drm_device_t *dev; +	drm_user_object_t base; + +    /* +     * If there is a possibility that the usage variable is zero, +     * then dev->struct_mutext should be locked before incrementing it. +     */ + +	atomic_t usage; +	drm_ttm_object_t *ttm_object; +        drm_ttm_t *ttm; +	unsigned long num_pages; +        unsigned long buffer_start; +        drm_bo_type_t type; +        unsigned long offset; + +	atomic_t mapped; +	uint32_t flags; +	uint32_t mask; + +	drm_mm_node_t *node_ttm;    /* MM node for on-card RAM */ +	drm_mm_node_t *node_card;   /* MM node for ttm*/ +	struct list_head lru_ttm;   /* LRU for the ttm pages*/ +        struct list_head lru_card;  /* For memory types with on-card RAM */ +	struct list_head ddestroy; + +	uint32_t fence_type; +        uint32_t fence_class; +	drm_fence_object_t *fence; +        uint32_t priv_flags; +	wait_queue_head_t event_queue; +        struct mutex mutex; +} drm_buffer_object_t; + +#define _DRM_BO_FLAG_UNFENCED 0x00000001 +#define _DRM_BO_FLAG_EVICTED  0x00000002 + +  static __inline__ int drm_core_check_feature(struct drm_device *dev,  					     int feature)  { @@ -840,6 +1096,7 @@ static inline int drm_mtrr_del(int handle, unsigned long offset,  #define drm_core_has_MTRR(dev) (0)  #endif +  /******************************************************************/  /** \name Internal function definitions */  /*@{*/ @@ -868,6 +1125,7 @@ unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);  extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);  extern unsigned long drm_core_get_map_ofs(drm_map_t * map);  extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev); +extern pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma);  				/* Memory management support (drm_memory.h) */  #include "drm_memory.h" @@ -883,6 +1141,14 @@ extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);  extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);  extern int drm_unbind_agp(DRM_AGP_MEM * handle); +extern void drm_free_memctl(size_t size); +extern int drm_alloc_memctl(size_t size); +extern void drm_query_memctl(drm_u64_t *cur_used, +			     drm_u64_t *low_threshold, +			     drm_u64_t *high_threshold);  +extern void drm_init_memctl(size_t low_threshold, +			    size_t high_threshold); +  				/* Misc. IOCTL support (drm_ioctl.h) */  extern int drm_irq_by_busid(struct inode *inode, struct file *filp,  			    unsigned int cmd, unsigned long arg); @@ -950,6 +1216,13 @@ extern int drm_unlock(struct inode *inode, struct file *filp,  extern int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context);  extern int drm_lock_free(drm_device_t * dev,  			 __volatile__ unsigned int *lock, unsigned int context); +/* + * These are exported to drivers so that they can implement fencing using + * DMA quiscent + idle. DMA quiescent usually requires the hardware lock.  + */ + +extern int drm_i_have_hw_lock(struct file *filp); +extern int drm_kernel_take_hw_lock(struct file *filp);  				/* Buffer management support (drm_bufs.h) */  extern int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request); @@ -1035,7 +1308,8 @@ extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size  extern int drm_agp_free_memory(DRM_AGP_MEM * handle);  extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start);  extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle); - +extern drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev, +					   drm_ttm_backend_t *backend);  				/* Stub support (drm_stub.h) */  extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,  		     struct drm_driver *driver); @@ -1044,6 +1318,7 @@ extern int drm_put_head(drm_head_t * head);  extern unsigned int drm_debug; /* 1 to enable debug output */  extern unsigned int drm_cards_limit;  extern drm_head_t **drm_heads; +extern drm_cache_t drm_cache;  extern struct drm_sysfs_class *drm_class;  extern struct proc_dir_entry *drm_proc_root; @@ -1087,11 +1362,117 @@ extern void drm_sysfs_device_remove(struct class_device *class_dev);  extern drm_mm_node_t * drm_mm_get_block(drm_mm_node_t * parent, unsigned long size,  					       unsigned alignment); -extern void drm_mm_put_block(drm_mm_t *mm, drm_mm_node_t *cur); +extern void drm_mm_put_block(drm_mm_node_t *cur);  extern drm_mm_node_t *drm_mm_search_free(const drm_mm_t *mm, unsigned long size,   						unsigned alignment, int best_match);  extern int drm_mm_init(drm_mm_t *mm, unsigned long start, unsigned long size);  extern void drm_mm_takedown(drm_mm_t *mm); +extern int drm_mm_clean(drm_mm_t *mm); +static inline drm_mm_t *drm_get_mm(drm_mm_node_t *block)  +{ +	return block->mm; +} +   + +/* + * User space object bookkeeping (drm_object.c) + */ + +/* + * Must be called with the struct_mutex held. + */ + +extern int drm_add_user_object(drm_file_t *priv, drm_user_object_t *item,  + +/* + * Must be called with the struct_mutex held. + */ +			       int shareable); +extern drm_user_object_t *drm_lookup_user_object(drm_file_t *priv, uint32_t key); + +/* + * Must be called with the struct_mutex held. + * If "item" has been obtained by a call to drm_lookup_user_object. You may not + * release the struct_mutex before calling drm_remove_ref_object. + * This function may temporarily release the struct_mutex. + */ + +extern int drm_remove_user_object(drm_file_t *priv, drm_user_object_t *item); + +/* + * Must be called with the struct_mutex held. May temporarily release it. + */ + +extern int drm_add_ref_object(drm_file_t *priv, drm_user_object_t *referenced_object, +			      drm_ref_t ref_action); + +/* + * Must be called with the struct_mutex held. + */ + +drm_ref_object_t *drm_lookup_ref_object(drm_file_t *priv,  +					drm_user_object_t *referenced_object, +					drm_ref_t ref_action); +/* + * Must be called with the struct_mutex held. + * If "item" has been obtained by a call to drm_lookup_ref_object. You may not + * release the struct_mutex before calling drm_remove_ref_object. + * This function may temporarily release the struct_mutex. + */ + +extern void drm_remove_ref_object(drm_file_t *priv, drm_ref_object_t *item); +extern int drm_user_object_ref(drm_file_t *priv, uint32_t user_token, drm_object_type_t type, +			       drm_user_object_t **object); +extern int drm_user_object_unref(drm_file_t *priv, uint32_t user_token, drm_object_type_t type); + + + +/* + * fence objects (drm_fence.c) + */ + +extern void drm_fence_handler(drm_device_t *dev, uint32_t breadcrumb, uint32_t type); +extern void drm_fence_manager_init(drm_device_t *dev); +extern void drm_fence_manager_takedown(drm_device_t *dev); +extern void drm_fence_flush_old(drm_device_t *dev, uint32_t sequence); +extern int drm_fence_object_flush(drm_device_t * dev, +				  volatile drm_fence_object_t * fence,  +				  uint32_t type); +extern int drm_fence_object_signaled(volatile drm_fence_object_t * fence,  +				     uint32_t type); +extern void drm_fence_usage_deref_locked(drm_device_t * dev, +					 drm_fence_object_t * fence); +extern void drm_fence_usage_deref_unlocked(drm_device_t * dev, +					 drm_fence_object_t * fence); +extern int drm_fence_object_wait(drm_device_t * dev,  +				 volatile drm_fence_object_t * fence, +				 int lazy, int ignore_signals, uint32_t mask); +extern int drm_fence_object_create(drm_device_t *dev, uint32_t type, +				   uint32_t fence_flags,  +				   drm_fence_object_t **c_fence); +extern int drm_fence_add_user_object(drm_file_t *priv,  +				     drm_fence_object_t *fence, +				     int shareable); + + + + + +extern int drm_fence_ioctl(DRM_IOCTL_ARGS); + +/* + * buffer objects (drm_bo.c) + */ + +extern int drm_bo_ioctl(DRM_IOCTL_ARGS); +extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS); +extern int drm_bo_driver_finish(drm_device_t *dev); +extern int drm_bo_driver_init(drm_device_t *dev); +extern int drm_fence_buffer_objects(drm_file_t * priv, +				    struct list_head *list,  +				    uint32_t fence_flags, +				    drm_fence_object_t *fence, +				    drm_fence_object_t **used_fence);  /* Inline replacements for DRM_IOREMAP macros */ @@ -1163,6 +1544,58 @@ extern void *drm_alloc(size_t size, int area);  extern void drm_free(void *pt, size_t size, int area);  #endif +/* + * Accounting variants of standard calls. + */ + +static inline void *drm_ctl_alloc(size_t size, int area) +{ +	void *ret; +	if (drm_alloc_memctl(size)) +		return NULL; +	ret = drm_alloc(size, area); +	if (!ret) +		drm_free_memctl(size); +	return ret; +} + +static inline void *drm_ctl_calloc(size_t nmemb, size_t size, int area) +{ +	void *ret; + +	if (drm_alloc_memctl(nmemb*size)) +		return NULL; +	ret = drm_calloc(nmemb, size, area); +	if (!ret) +		drm_free_memctl(nmemb*size); +	return ret; +} + +static inline void drm_ctl_free(void *pt, size_t size, int area) +{ +	drm_free(pt, size, area); +	drm_free_memctl(size); +} + +static inline void *drm_ctl_cache_alloc(kmem_cache_t *cache, size_t size,  +					int flags) +{ +	void *ret; +	if (drm_alloc_memctl(size)) +		return NULL; +	ret = kmem_cache_alloc(cache, flags); +	if (!ret) +		drm_free_memctl(size); +	return ret; +} + +static inline void drm_ctl_cache_free(kmem_cache_t *cache, size_t size, +				      void *obj) +{ +	kmem_cache_free(cache, obj); +	drm_free_memctl(size); +} +  /*@}*/  #endif				/* __KERNEL__ */ diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c index dce27cdf..a5f1f9ee 100644 --- a/linux-core/drm_agpsupport.c +++ b/linux-core/drm_agpsupport.c @@ -552,4 +552,162 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle)  	return agp_unbind_memory(handle);  } + + +/* + * AGP ttm backend interface. + */ + +#ifndef AGP_USER_TYPES +#define AGP_USER_TYPES (1 << 16) +#define AGP_USER_MEMORY (AGP_USER_TYPES) +#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1) +#endif + +static int drm_agp_needs_unbind_cache_adjust(drm_ttm_backend_t *backend) { +	return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1); +} + + +static int drm_agp_populate(drm_ttm_backend_t *backend, unsigned long num_pages,  +			    struct page **pages) { + +	drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private; +	struct page **cur_page, **last_page = pages + num_pages; +	DRM_AGP_MEM *mem; + +	if (drm_alloc_memctl(num_pages * sizeof(void *))) +		return -1; + +	DRM_DEBUG("drm_agp_populate_ttm\n"); +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) +	mem = drm_agp_allocate_memory(num_pages, agp_priv->alloc_type); +#else +	mem = drm_agp_allocate_memory(agp_priv->bridge, num_pages, agp_priv->alloc_type); +#endif +	if (!mem) { +		drm_free_memctl(num_pages *sizeof(void *)); +		return -1; +	} + +	DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count); +	mem->page_count = 0; +	for (cur_page = pages; cur_page < last_page; ++cur_page) { +		mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(*cur_page)); +	} +	agp_priv->mem = mem; +	return 0; +} + +static int drm_agp_bind_ttm(drm_ttm_backend_t *backend,  +			    unsigned long offset, +			    int cached)  +{ +	drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private; +	DRM_AGP_MEM *mem = agp_priv->mem; +	int ret; + +	DRM_DEBUG("drm_agp_bind_ttm\n"); +	DRM_MASK_VAL(backend->flags, DRM_BE_FLAG_BOUND_CACHED, +		     (cached) ? DRM_BE_FLAG_BOUND_CACHED : 0); +	mem->is_flushed = TRUE; +	mem->type = (cached) ? agp_priv->cached_type : agp_priv->uncached_type; +	ret = drm_agp_bind_memory(mem, offset); +	if (ret) { +		DRM_ERROR("AGP Bind memory failed\n"); +	} +	return ret; +} + +static int drm_agp_unbind_ttm(drm_ttm_backend_t *backend) { + +	drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private; + +	DRM_DEBUG("drm_agp_unbind_ttm\n"); +	if (agp_priv->mem->is_bound) +		return drm_agp_unbind_memory(agp_priv->mem); +	else +		return 0; +} + +static void drm_agp_clear_ttm(drm_ttm_backend_t *backend) { + +	drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private; +	DRM_AGP_MEM *mem = agp_priv->mem; + +	DRM_DEBUG("drm_agp_clear_ttm\n"); +	if (mem) { +		unsigned long num_pages = mem->page_count; +		backend->unbind(backend); +		agp_free_memory(mem); +		drm_free_memctl(num_pages *sizeof(void *)); +	} + +	agp_priv->mem = NULL; +} + +static void drm_agp_destroy_ttm(drm_ttm_backend_t *backend) { + +	drm_agp_ttm_priv *agp_priv;  +	 +	if (backend) { +		DRM_DEBUG("drm_agp_destroy_ttm\n"); +		agp_priv = (drm_agp_ttm_priv *) backend->private; +		if (agp_priv) { +			if (agp_priv->mem) { +				backend->clear(backend); +			} +			drm_ctl_free(agp_priv, sizeof(*agp_priv), DRM_MEM_MAPPINGS); +			backend->private = NULL; +		} +		if (backend->flags & DRM_BE_FLAG_NEEDS_FREE) { +			drm_ctl_free(backend, sizeof(*backend), DRM_MEM_MAPPINGS);                      +		} +	} +} +	 + +drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev, +				    drm_ttm_backend_t *backend) +{ + +        drm_ttm_backend_t *agp_be; +	drm_agp_ttm_priv *agp_priv; + +	agp_be = (backend != NULL) ? backend: +		drm_ctl_calloc(1, sizeof(*agp_be), DRM_MEM_MAPPINGS); + +	if (!agp_be) +		return NULL; +	 +	agp_priv = drm_ctl_calloc(1, sizeof(*agp_priv), DRM_MEM_MAPPINGS); +	 +	if (!agp_priv) { +		drm_ctl_free(agp_be, sizeof(*agp_be), DRM_MEM_MAPPINGS); +		return NULL; +	} +	 +	agp_priv->mem = NULL; +	agp_priv->alloc_type = AGP_USER_MEMORY; +	agp_priv->cached_type = AGP_USER_CACHED_MEMORY; +	agp_priv->uncached_type = AGP_USER_MEMORY; +	agp_priv->bridge = dev->agp->bridge; +	agp_priv->populated = FALSE; +	agp_be->aperture_base = dev->agp->agp_info.aper_base; +	agp_be->private = (void *) agp_priv; +	agp_be->needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust; +	agp_be->populate = drm_agp_populate; +	agp_be->clear = drm_agp_clear_ttm; +	agp_be->bind = drm_agp_bind_ttm; +	agp_be->unbind = drm_agp_unbind_ttm; +	agp_be->destroy = drm_agp_destroy_ttm; +	DRM_MASK_VAL(agp_be->flags, DRM_BE_FLAG_NEEDS_FREE, +		     (backend == NULL) ? DRM_BE_FLAG_NEEDS_FREE : 0); +	DRM_MASK_VAL(agp_be->flags, DRM_BE_FLAG_CBA, +		     (dev->agp->cant_use_aperture) ? DRM_BE_FLAG_CBA : 0); +	agp_be->drm_map_type = _DRM_AGP; +	return agp_be; +} +EXPORT_SYMBOL(drm_agp_init_ttm); +  #endif				/* __OS_HAS_AGP */ diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c new file mode 100644 index 00000000..954b7a03 --- /dev/null +++ b/linux-core/drm_bo.c @@ -0,0 +1,1994 @@ +/************************************************************************** + *  + * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA + * All Rights Reserved. + *  + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + *  + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR  + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE  + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + *  + *  + **************************************************************************/ +/* + * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com> + */ + +#include "drmP.h" + +/* + * Buffer object locking policy: + * Lock dev->struct_mutex; + * Increase usage + * Unlock dev->struct_mutex; + * Lock buffer->mutex; + * Do whatever you want; + * Unlock buffer->mutex; + * Decrease usage. Call destruction if zero. + * + * User object visibility ups usage just once, since it has its own  + * refcounting. + * + * Destruction: + * lock dev->struct_mutex; + * Verify that usage is zero. Otherwise unlock and continue. + * Destroy object. + * unlock dev->struct_mutex; + * + * Mutex and spinlock locking orders: + * 1.) Buffer mutex + * 2.) Refer to ttm locking orders. + */ + +#define DRM_FLAG_MASKED(_old, _new, _mask) {\ +(_old) ^= (((_old) ^ (_new)) & (_mask)); \ +} + +static inline uint32_t drm_bo_type_flags(unsigned type) +{ +	return (1 << (24 + type)); +} + +static inline drm_buffer_object_t *drm_bo_entry(struct list_head *list, +						unsigned type) +{ +	switch (type) { +	case DRM_BO_MEM_LOCAL: +	case DRM_BO_MEM_TT: +		return list_entry(list, drm_buffer_object_t, lru_ttm); +	case DRM_BO_MEM_VRAM: +	case DRM_BO_MEM_VRAM_NM: +		return list_entry(list, drm_buffer_object_t, lru_card); +	default: +		BUG_ON(1); +	} +	return NULL; +} + +static inline drm_mm_node_t *drm_bo_mm_node(drm_buffer_object_t * bo, +					    unsigned type) +{ +	switch (type) { +	case DRM_BO_MEM_LOCAL: +	case DRM_BO_MEM_TT: +		return bo->node_ttm; +	case DRM_BO_MEM_VRAM: +	case DRM_BO_MEM_VRAM_NM: +		return bo->node_card; +	default: +		BUG_ON(1); +	} +	return NULL; +} + +/* + * bo locked. dev->struct_mutex locked. + */ + +static void drm_bo_add_to_lru(drm_buffer_object_t * buf, +			      drm_buffer_manager_t * bm) +{ +	struct list_head *list; +	unsigned mem_type; + +	if (buf->flags & DRM_BO_FLAG_MEM_TT) { +		mem_type = DRM_BO_MEM_TT; +		list = +		    (buf-> +		     flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ? +		    &bm->pinned[mem_type] : &bm->lru[mem_type]; +		list_add_tail(&buf->lru_ttm, list); +	} else { +		mem_type = DRM_BO_MEM_LOCAL; +		list = +		    (buf-> +		     flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ? +		    &bm->pinned[mem_type] : &bm->lru[mem_type]; +		list_add_tail(&buf->lru_ttm, list); +	} +	if (buf->flags & DRM_BO_FLAG_MEM_VRAM) { +		mem_type = DRM_BO_MEM_VRAM; +		list = +		    (buf-> +		     flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ? +		    &bm->pinned[mem_type] : &bm->lru[mem_type]; +		list_add_tail(&buf->lru_card, list); +	} +} + +/* + * bo locked. + */ + +static int drm_move_tt_to_local(drm_buffer_object_t * buf, int evict, +				int force_no_move) +{ +	drm_device_t *dev = buf->dev; +	int ret; + +	if (buf->node_ttm) { +		mutex_lock(&dev->struct_mutex); +		if (evict) +			ret = drm_evict_ttm(buf->ttm); +		else +			ret = drm_unbind_ttm(buf->ttm); + +		if (ret) { +			mutex_unlock(&dev->struct_mutex); +			if (ret == -EAGAIN) +				schedule(); +			return ret; +		} + +		if (!(buf->flags & DRM_BO_FLAG_NO_MOVE) || force_no_move) { +			drm_mm_put_block(buf->node_ttm); +			buf->node_ttm = NULL; +		} +		mutex_unlock(&dev->struct_mutex); +	} + +	buf->flags &= ~DRM_BO_FLAG_MEM_TT; +	buf->flags |= DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED; + +	return 0; +} + +/* + * Lock dev->struct_mutex + */ + +static void drm_bo_destroy_locked(drm_device_t * dev, drm_buffer_object_t * bo) +{ + +	drm_buffer_manager_t *bm = &dev->bm; + +	DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); + +	/* +	 * Somone might try to access us through the still active BM lists. +	 */ + +	if (atomic_read(&bo->usage) != 0) +		return; +	if (!list_empty(&bo->ddestroy)) +		return; + +	if (bo->fence) { +		if (!drm_fence_object_signaled(bo->fence, bo->fence_type)) { + +			drm_fence_object_flush(dev, bo->fence, bo->fence_type); +			list_add_tail(&bo->ddestroy, &bm->ddestroy); +			schedule_delayed_work(&bm->wq, +					      ((DRM_HZ / 100) < +					       1) ? 1 : DRM_HZ / 100); +			return; +		} else { +			drm_fence_usage_deref_locked(dev, bo->fence); +			bo->fence = NULL; +		} +	} +	/* +	 * Take away from lru lists. +	 */ + +	list_del_init(&bo->lru_ttm); +	list_del_init(&bo->lru_card); + +	if (bo->ttm) { +		unsigned long _end = jiffies + DRM_HZ; +		int ret; + +		/* +		 * This temporarily unlocks struct_mutex.  +		 */ + +		do { +			ret = drm_unbind_ttm(bo->ttm); +			if (ret == -EAGAIN) { +				mutex_unlock(&dev->struct_mutex); +				schedule(); +				mutex_lock(&dev->struct_mutex); +			} +		} while (ret == -EAGAIN && !time_after_eq(jiffies, _end)); + +		if (ret) { +			DRM_ERROR("Couldn't unbind buffer. " +				  "Bad. Continuing anyway\n"); +		} +	} + +	if (bo->node_ttm) { +		drm_mm_put_block(bo->node_ttm); +		bo->node_ttm = NULL; +	} +	if (bo->node_card) { +		drm_mm_put_block(bo->node_card); +		bo->node_card = NULL; +	} +	if (bo->ttm_object) { +		drm_ttm_object_deref_locked(dev, bo->ttm_object); +	} +	atomic_dec(&bm->count); +	drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ); +} + +/* + * Call bo->mutex locked. + * Wait until the buffer is idle. + */ + +static int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, +		       int no_wait) +{ + +	drm_fence_object_t *fence = bo->fence; +	int ret; + +	if (fence) { +		drm_device_t *dev = bo->dev; +		if (drm_fence_object_signaled(fence, bo->fence_type)) { +			drm_fence_usage_deref_unlocked(dev, fence); +			bo->fence = NULL; +			return 0; +		} +		if (no_wait) { +			return -EBUSY; +		} +		ret = +		    drm_fence_object_wait(dev, fence, lazy, ignore_signals, +					  bo->fence_type); +		if (ret) +			return ret; + +		drm_fence_usage_deref_unlocked(dev, fence); +		bo->fence = NULL; + +	} +	return 0; +} + +/* + * Call dev->struct_mutex locked. + */ + +static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all) +{ +	drm_buffer_manager_t *bm = &dev->bm; + +	drm_buffer_object_t *entry, *nentry; +	struct list_head *list, *next; +	drm_fence_object_t *fence; + +	list_for_each_safe(list, next, &bm->ddestroy) { +		entry = list_entry(list, drm_buffer_object_t, ddestroy); +		atomic_inc(&entry->usage); +		if (atomic_read(&entry->usage) != 1) { +			atomic_dec(&entry->usage); +			continue; +		} + +		nentry = NULL; +		if (next != &bm->ddestroy) { +			nentry = list_entry(next, drm_buffer_object_t, +					    ddestroy); +			atomic_inc(&nentry->usage); +		} + +		mutex_unlock(&dev->struct_mutex); +		mutex_lock(&entry->mutex); +		fence = entry->fence; +		if (fence && drm_fence_object_signaled(fence, +						       entry->fence_type)) { +			drm_fence_usage_deref_locked(dev, fence); +			entry->fence = NULL; +		} + +		if (entry->fence && remove_all) { +			if (bm->nice_mode) { +				unsigned long _end = jiffies + 3 * DRM_HZ; +				int ret; +				do { +					ret = drm_bo_wait(entry, 0, 1, 0); +				} while (ret && !time_after_eq(jiffies, _end)); + +				if (entry->fence) { +					bm->nice_mode = 0; +					DRM_ERROR("Detected GPU lockup or " +						  "fence driver was taken down. " +						  "Evicting waiting buffers.\n"); +				} +			} +			if (entry->fence) { +				drm_fence_usage_deref_unlocked(dev, +							       entry->fence); +				entry->fence = NULL; +			} +		} +		mutex_lock(&dev->struct_mutex); +		mutex_unlock(&entry->mutex); +		if (atomic_dec_and_test(&entry->usage) && (!entry->fence)) { +			list_del_init(&entry->ddestroy); +			drm_bo_destroy_locked(dev, entry); +		} +		if (nentry) { +			atomic_dec(&nentry->usage); +		} +	} + +} + +static void drm_bo_delayed_workqueue(void *data) +{ +	drm_device_t *dev = (drm_device_t *) data; +	drm_buffer_manager_t *bm = &dev->bm; + +	DRM_DEBUG("Delayed delete Worker\n"); + +	mutex_lock(&dev->struct_mutex); +	if (!bm->initialized) { +		mutex_unlock(&dev->struct_mutex); +		return; +	} +	drm_bo_delayed_delete(dev, 0); +	if (bm->initialized && !list_empty(&bm->ddestroy)) { +		schedule_delayed_work(&bm->wq, +				      ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100); +	} +	mutex_unlock(&dev->struct_mutex); +} + +void drm_bo_usage_deref_locked(drm_device_t * dev, drm_buffer_object_t * bo) +{ +	if (atomic_dec_and_test(&bo->usage)) { +		drm_bo_destroy_locked(dev, bo); +	} +} + +static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo) +{ +	drm_bo_usage_deref_locked(priv->head->dev, +				  drm_user_object_entry(uo, drm_buffer_object_t, +							base)); +} + +void drm_bo_usage_deref_unlocked(drm_device_t * dev, drm_buffer_object_t * bo) +{ +	if (atomic_dec_and_test(&bo->usage)) { +		mutex_lock(&dev->struct_mutex); +		if (atomic_read(&bo->usage) == 0) +			drm_bo_destroy_locked(dev, bo); +		mutex_unlock(&dev->struct_mutex); +	} +} + +/* + * Note. The caller has to register (if applicable)  + * and deregister fence object usage. + */ + +int drm_fence_buffer_objects(drm_file_t * priv, +			     struct list_head *list, +			     uint32_t fence_flags, +			     drm_fence_object_t * fence, +			     drm_fence_object_t ** used_fence) +{ +	drm_device_t *dev = priv->head->dev; +	drm_buffer_manager_t *bm = &dev->bm; + +	drm_buffer_object_t *entry; +	uint32_t fence_type = 0; +	int count = 0; +	int ret = 0; +	struct list_head f_list, *l; + +	mutex_lock(&dev->struct_mutex); + +	if (!list) +		list = &bm->unfenced; + +	list_for_each_entry(entry, list, lru_ttm) { +		BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED)); +		fence_type |= entry->fence_type; +		if (entry->fence_class != 0) { +			DRM_ERROR("Fence class %d is not implemented yet.\n", +				  entry->fence_class); +			ret = -EINVAL; +			goto out; +		} +		count++; +	} + +	if (!count) { +		ret = -EINVAL; +		goto out; +	} + +	/* +	 * Transfer to a local list before we release the dev->struct_mutex; +	 * This is so we don't get any new unfenced objects while fencing  +	 * the ones we already have.. +	 */ + +	list_add_tail(&f_list, list); +	list_del_init(list); + +	if (fence) { +		if ((fence_type & fence->type) != fence_type) { +			DRM_ERROR("Given fence doesn't match buffers " +				  "on unfenced list.\n"); +			ret = -EINVAL; +			goto out; +		} +	} else { +		mutex_unlock(&dev->struct_mutex); +		ret = drm_fence_object_create(dev, fence_type, +					      fence_flags | DRM_FENCE_FLAG_EMIT, +					      &fence); +		mutex_lock(&dev->struct_mutex); +		if (ret) +			goto out; +	} + +	count = 0; +	l = f_list.next; +	while (l != &f_list) { +		entry = list_entry(l, drm_buffer_object_t, lru_ttm); +		atomic_inc(&entry->usage); +		mutex_unlock(&dev->struct_mutex); +		mutex_lock(&entry->mutex); +		mutex_lock(&dev->struct_mutex); +		list_del_init(l); +		list_del_init(&entry->lru_card); +		if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) { +			count++; +			if (entry->fence) +				drm_fence_usage_deref_locked(dev, entry->fence); +			entry->fence = fence; +			DRM_FLAG_MASKED(entry->priv_flags, 0, +					_DRM_BO_FLAG_UNFENCED); +			DRM_WAKEUP(&entry->event_queue); +			drm_bo_add_to_lru(entry, bm); +		} +		mutex_unlock(&entry->mutex); +		drm_bo_usage_deref_locked(dev, entry); +		l = f_list.next; +	} +	atomic_add(count, &fence->usage); +	DRM_DEBUG("Fenced %d buffers\n", count); +      out: +	mutex_unlock(&dev->struct_mutex); +	*used_fence = fence; +	return ret; +} + +EXPORT_SYMBOL(drm_fence_buffer_objects); + +/* + * bo->mutex locked  + */ + +static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, +			int no_wait, int force_no_move) +{ +	int ret = 0; +	drm_device_t *dev = bo->dev; +	drm_buffer_manager_t *bm = &dev->bm; + +	/* +	 * Someone might have modified the buffer before we took the buffer mutex. +	 */ + +	if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) +		goto out; +	if (!(bo->flags & drm_bo_type_flags(mem_type))) +		goto out; + +	ret = drm_bo_wait(bo, 0, 0, no_wait); + +	if (ret) { +		if (ret != -EAGAIN) +			DRM_ERROR("Failed to expire fence before " +				  "buffer eviction.\n"); +		goto out; +	} + +	if (mem_type == DRM_BO_MEM_TT) { +		ret = drm_move_tt_to_local(bo, 1, force_no_move); +		if (ret) +			goto out; +		mutex_lock(&dev->struct_mutex); +		list_del_init(&bo->lru_ttm); +		drm_bo_add_to_lru(bo, bm); +		mutex_unlock(&dev->struct_mutex); +	} +#if 0 +	else { +		ret = drm_move_vram_to_local(bo); +		mutex_lock(&dev->struct_mutex); +		list_del_init(&bo->lru_card); +		mutex_unlock(&dev->struct_mutex); +	} +#endif +	if (ret) +		goto out; + +	DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED, +			_DRM_BO_FLAG_EVICTED); +      out: +	return ret; +} + +/* + * buf->mutex locked. + */ + +int drm_bo_alloc_space(drm_buffer_object_t * buf, unsigned mem_type, +		       int no_wait) +{ +	drm_device_t *dev = buf->dev; +	drm_mm_node_t *node; +	drm_buffer_manager_t *bm = &dev->bm; +	drm_buffer_object_t *bo; +	drm_mm_t *mm = &bm->manager[mem_type]; +	struct list_head *lru; +	unsigned long size = buf->num_pages; +	int ret; + +	mutex_lock(&dev->struct_mutex); +	do { +		node = drm_mm_search_free(mm, size, 0, 1); +		if (node) +			break; + +		lru = &bm->lru[mem_type]; +		if (lru->next == lru) +			break; + +		bo = drm_bo_entry(lru->next, mem_type); + +		atomic_inc(&bo->usage); +		mutex_unlock(&dev->struct_mutex); +		mutex_lock(&bo->mutex); +		BUG_ON(bo->flags & DRM_BO_FLAG_NO_MOVE); +		ret = drm_bo_evict(bo, mem_type, no_wait, 0); +		mutex_unlock(&bo->mutex); +		drm_bo_usage_deref_unlocked(dev, bo); +		if (ret) +			return ret; +		mutex_lock(&dev->struct_mutex); +	} while (1); + +	if (!node) { +		DRM_ERROR("Out of videoram / aperture space\n"); +		mutex_unlock(&dev->struct_mutex); +		return -ENOMEM; +	} + +	node = drm_mm_get_block(node, size, 0); +	mutex_unlock(&dev->struct_mutex); +	BUG_ON(!node); +	node->private = (void *)buf; + +	if (mem_type == DRM_BO_MEM_TT) { +		buf->node_ttm = node; +	} else { +		buf->node_card = node; +	} +	buf->offset = node->start * PAGE_SIZE; +	return 0; +} + +static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait) +{ +	drm_device_t *dev = bo->dev; +	drm_ttm_backend_t *be; +	int ret; + +	if (!(bo->node_ttm && (bo->flags & DRM_BO_FLAG_NO_MOVE))) { +		BUG_ON(bo->node_ttm); +		ret = drm_bo_alloc_space(bo, DRM_BO_MEM_TT, no_wait); +		if (ret) +			return ret; +	} + +	DRM_DEBUG("Flipping in to AGP 0x%08lx\n", bo->node_ttm->start); + +	mutex_lock(&dev->struct_mutex); +	ret = drm_bind_ttm(bo->ttm, bo->flags & DRM_BO_FLAG_BIND_CACHED, +			   bo->node_ttm->start); +	if (ret) { +		drm_mm_put_block(bo->node_ttm); +		bo->node_ttm = NULL; +	} +	mutex_unlock(&dev->struct_mutex); + +	if (ret) { +		return ret; +	} + +	be = bo->ttm->be; +	if (be->needs_ub_cache_adjust(be)) +		bo->flags &= ~DRM_BO_FLAG_CACHED; +	bo->flags &= ~DRM_BO_MASK_MEM; +	bo->flags |= DRM_BO_FLAG_MEM_TT; + +	if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) { +		ret = dev->driver->bo_driver->invalidate_caches(dev, bo->flags); +		if (ret) +			DRM_ERROR("Could not flush read caches\n"); +	} +	DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_EVICTED); + +	return 0; +} + +static int drm_bo_new_flags(drm_device_t * dev, +			    uint32_t flags, uint32_t new_mask, uint32_t hint, +			    int init, uint32_t * n_flags, uint32_t * n_mask) +{ +	uint32_t new_flags = 0; +	uint32_t new_props; +	drm_bo_driver_t *driver = dev->driver->bo_driver; +	drm_buffer_manager_t *bm = &dev->bm; +	unsigned i; + +	/* +	 * First adjust the mask to take away nonexistant memory types.  +	 */ + +	for (i = 0; i < DRM_BO_MEM_TYPES; ++i) { +		if (!bm->use_type[i]) +			new_mask &= ~drm_bo_type_flags(i); +	} + +	if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) { +		DRM_ERROR +		    ("DRM_BO_FLAG_NO_EVICT is only available to priviliged " +		     "processes\n"); +		return -EPERM; +	} +	if (new_mask & DRM_BO_FLAG_BIND_CACHED) { +		if (((new_mask & DRM_BO_FLAG_MEM_TT) && +		     !driver->cached[DRM_BO_MEM_TT]) && +		    ((new_mask & DRM_BO_FLAG_MEM_VRAM) +		     && !driver->cached[DRM_BO_MEM_VRAM])) { +			new_mask &= ~DRM_BO_FLAG_BIND_CACHED; +		} else { +			if (!driver->cached[DRM_BO_MEM_TT]) +				new_flags &= DRM_BO_FLAG_MEM_TT; +			if (!driver->cached[DRM_BO_MEM_VRAM]) +				new_flags &= DRM_BO_FLAG_MEM_VRAM; +		} +	} + +	if ((new_mask & DRM_BO_FLAG_READ_CACHED) && +	    !(new_mask & DRM_BO_FLAG_BIND_CACHED)) { +		if ((new_mask & DRM_BO_FLAG_NO_EVICT) && +		    !(new_mask & DRM_BO_FLAG_MEM_LOCAL)) { +			DRM_ERROR +			    ("Cannot read cached from a pinned VRAM / TT buffer\n"); +			return -EINVAL; +		} +	} + +	/* +	 * Determine new memory location: +	 */ + +	if (!(flags & new_mask & DRM_BO_MASK_MEM) || init) { + +		new_flags = new_mask & DRM_BO_MASK_MEM; + +		if (!new_flags) { +			DRM_ERROR("Invalid buffer object memory flags\n"); +			return -EINVAL; +		} + +		if (new_flags & DRM_BO_FLAG_MEM_LOCAL) { +			if ((hint & DRM_BO_HINT_AVOID_LOCAL) && +			    new_flags & (DRM_BO_FLAG_MEM_VRAM | +					 DRM_BO_FLAG_MEM_TT)) { +				new_flags &= ~DRM_BO_FLAG_MEM_LOCAL; +			} else { +				new_flags = DRM_BO_FLAG_MEM_LOCAL; +			} +		} +		if (new_flags & DRM_BO_FLAG_MEM_TT) { +			if ((new_mask & DRM_BO_FLAG_PREFER_VRAM) && +			    new_flags & DRM_BO_FLAG_MEM_VRAM) { +				new_flags = DRM_BO_FLAG_MEM_VRAM; +			} else { +				new_flags = DRM_BO_FLAG_MEM_TT; +			} +		} +	} else { +		new_flags = flags & DRM_BO_MASK_MEM; +	} + +	new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE | +				DRM_BO_FLAG_READ); + +	if (!new_props) { +		DRM_ERROR("Invalid buffer object rwx properties\n"); +		return -EINVAL; +	} + +	new_flags |= new_mask & ~DRM_BO_MASK_MEM; + +	if (((flags ^ new_flags) & DRM_BO_FLAG_BIND_CACHED) && +	    (new_flags & DRM_BO_FLAG_NO_EVICT) && +	    (flags & (DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MEM_VRAM))) { +		if (!(flags & DRM_BO_FLAG_CACHED)) { +			DRM_ERROR +			    ("Cannot change caching policy of pinned buffer\n"); +			return -EINVAL; +		} else { +			new_flags &= ~DRM_BO_FLAG_CACHED; +		} +	} + +	*n_flags = new_flags; +	*n_mask = new_mask; +	return 0; +} + +/* + * Call dev->struct_mutex locked. + */ + +drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv, +					      uint32_t handle, int check_owner) +{ +	drm_user_object_t *uo; +	drm_buffer_object_t *bo; + +	uo = drm_lookup_user_object(priv, handle); + +	if (!uo || (uo->type != drm_buffer_type)) { +		DRM_ERROR("Could not find buffer object 0x%08x\n", handle); +		return NULL; +	} + +	if (check_owner && priv != uo->owner) { +		if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE)) +			return NULL; +	} + +	bo = drm_user_object_entry(uo, drm_buffer_object_t, base); +	atomic_inc(&bo->usage); +	return bo; +} + +/* + * Call bo->mutex locked. + * Returns 1 if the buffer is currently rendered to or from. 0 otherwise. + * Doesn't do any fence flushing as opposed to the drm_bo_busy function. + */ + +static int drm_bo_quick_busy(drm_buffer_object_t * bo) +{ +	drm_fence_object_t *fence = bo->fence; + +	BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); +	if (fence) { +		drm_device_t *dev = bo->dev; +		if (drm_fence_object_signaled(fence, bo->fence_type)) { +			drm_fence_usage_deref_unlocked(dev, fence); +			bo->fence = NULL; +			return 0; +		} +		return 1; +	} +	return 0; +} + +/* + * Call bo->mutex locked. + * Returns 1 if the buffer is currently rendered to or from. 0 otherwise. + */ + +static int drm_bo_busy(drm_buffer_object_t * bo) +{ +	drm_fence_object_t *fence = bo->fence; + +	BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); +	if (fence) { +		drm_device_t *dev = bo->dev; +		if (drm_fence_object_signaled(fence, bo->fence_type)) { +			drm_fence_usage_deref_unlocked(dev, fence); +			bo->fence = NULL; +			return 0; +		} +		drm_fence_object_flush(dev, fence, DRM_FENCE_TYPE_EXE); +		if (drm_fence_object_signaled(fence, bo->fence_type)) { +			drm_fence_usage_deref_unlocked(dev, fence); +			bo->fence = NULL; +			return 0; +		} +		return 1; +	} +	return 0; +} + +static int drm_bo_read_cached(drm_buffer_object_t * bo) +{ +	int ret = 0; + +	BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); +	if (bo->node_card) +		ret = drm_bo_evict(bo, DRM_BO_MEM_VRAM, 1, 0); +	if (ret) +		return ret; +	if (bo->node_ttm) +		ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1, 0); +	return ret; +} + +/* + * Wait until a buffer is unmapped. + */ + +static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait) +{ +	int ret = 0; + +	if ((atomic_read(&bo->mapped) >= 0) && no_wait) +		return -EBUSY; + +	DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ, +		    atomic_read(&bo->mapped) == -1); + +	if (ret == -EINTR) +		ret = -EAGAIN; + +	return ret; +} + +static int drm_bo_check_unfenced(drm_buffer_object_t * bo) +{ +	int ret; + +	mutex_lock(&bo->mutex); +	ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED); +	mutex_unlock(&bo->mutex); +	return ret; +} + +/* + * Wait until a buffer, scheduled to be fenced moves off the unfenced list. + * Until then, we cannot really do anything with it except delete it. + * The unfenced list is a PITA, and the operations + * 1) validating + * 2) submitting commands + * 3) fencing + * Should really be an atomic operation.  + * We now "solve" this problem by keeping + * the buffer "unfenced" after validating, but before fencing. + */ + +static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait, +				int eagain_if_wait) +{ +	int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED); +	unsigned long _end = jiffies + 3 * DRM_HZ; + +	if (ret && no_wait) +		return -EBUSY; +	else if (!ret) +		return 0; + +	do { +		mutex_unlock(&bo->mutex); +		DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ, +			    !drm_bo_check_unfenced(bo)); +		mutex_lock(&bo->mutex); +		if (ret == -EINTR) +			return -EAGAIN; +		if (ret) { +			DRM_ERROR +			    ("Error waiting for buffer to become fenced\n"); +			return ret; +		} +		ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED); +	} while (ret && !time_after_eq(jiffies, _end)); +	if (ret) { +		DRM_ERROR("Timeout waiting for buffer to become fenced\n"); +		return ret; +	} +	if (eagain_if_wait) +		return -EAGAIN; + +	return 0; +} + +/* + * Fill in the ioctl reply argument with buffer info. + * Bo locked.  + */ + +static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo, +				drm_bo_arg_reply_t * rep) +{ +	rep->handle = bo->base.hash.key; +	rep->flags = bo->flags; +	rep->size = bo->num_pages * PAGE_SIZE; +	rep->offset = bo->offset; + +	if (bo->ttm_object) { +		rep->arg_handle = bo->ttm_object->map_list.user_token; +	} else { +		rep->arg_handle = 0; +	} + +	rep->mask = bo->mask; +	rep->buffer_start = bo->buffer_start; +	rep->fence_flags = bo->fence_type; +	rep->rep_flags = 0; + +	if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) { +		DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY, +				DRM_BO_REP_BUSY); +	} +} + +/* + * Wait for buffer idle and register that we've mapped the buffer. + * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,  + * so that if the client dies, the mapping is automatically  + * unregistered. + */ + +static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle, +				 uint32_t map_flags, unsigned hint, +				 drm_bo_arg_reply_t * rep) +{ +	drm_buffer_object_t *bo; +	drm_device_t *dev = priv->head->dev; +	int ret = 0; +	int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; + +	mutex_lock(&dev->struct_mutex); +	bo = drm_lookup_buffer_object(priv, handle, 1); +	mutex_unlock(&dev->struct_mutex); + +	if (!bo) +		return -EINVAL; + +	mutex_lock(&bo->mutex); +	if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) { +		ret = drm_bo_wait_unfenced(bo, no_wait, 0); +		if (ret) +			goto out; +	} + +	/* +	 * If this returns true, we are currently unmapped. +	 * We need to do this test, because unmapping can +	 * be done without the bo->mutex held. +	 */ + +	while (1) { +		if (atomic_inc_and_test(&bo->mapped)) { +			if (no_wait && drm_bo_busy(bo)) { +				atomic_dec(&bo->mapped); +				ret = -EBUSY; +				goto out; +			} +			ret = drm_bo_wait(bo, 0, 0, no_wait); +			if (ret) { +				atomic_dec(&bo->mapped); +				goto out; +			} + +			if ((map_flags & DRM_BO_FLAG_READ) && +			    (bo->flags & DRM_BO_FLAG_READ_CACHED) && +			    (!(bo->flags & DRM_BO_FLAG_CACHED))) { +				drm_bo_read_cached(bo); +			} +			break; +		} else if ((map_flags & DRM_BO_FLAG_READ) && +			   (bo->flags & DRM_BO_FLAG_READ_CACHED) && +			   (!(bo->flags & DRM_BO_FLAG_CACHED))) { + +			/* +			 * We are already mapped with different flags. +			 * need to wait for unmap. +			 */ + +			ret = drm_bo_wait_unmapped(bo, no_wait); +			if (ret) +				goto out; + +			continue; +		} +		break; +	} + +	mutex_lock(&dev->struct_mutex); +	ret = drm_add_ref_object(priv, &bo->base, _DRM_REF_TYPE1); +	mutex_unlock(&dev->struct_mutex); +	if (ret) { +		if (atomic_add_negative(-1, &bo->mapped)) +			DRM_WAKEUP(&bo->event_queue); + +	} else +		drm_bo_fill_rep_arg(bo, rep); +      out: +	mutex_unlock(&bo->mutex); +	drm_bo_usage_deref_unlocked(dev, bo); +	return ret; +} + +static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle) +{ +	drm_device_t *dev = priv->head->dev; +	drm_buffer_object_t *bo; +	drm_ref_object_t *ro; +	int ret = 0; + +	mutex_lock(&dev->struct_mutex); + +	bo = drm_lookup_buffer_object(priv, handle, 1); +	if (!bo) { +		ret = -EINVAL; +		goto out; +	} + +	ro = drm_lookup_ref_object(priv, &bo->base, _DRM_REF_TYPE1); +	if (!ro) { +		ret = -EINVAL; +		goto out; +	} + +	drm_remove_ref_object(priv, ro); +	drm_bo_usage_deref_locked(dev, bo); +      out: +	mutex_unlock(&dev->struct_mutex); +	return ret; +} + +/* + * Call struct-sem locked. + */ + +static void drm_buffer_user_object_unmap(drm_file_t * priv, +					 drm_user_object_t * uo, +					 drm_ref_t action) +{ +	drm_buffer_object_t *bo = +	    drm_user_object_entry(uo, drm_buffer_object_t, base); + +	/* +	 * We DON'T want to take the bo->lock here, because we want to +	 * hold it when we wait for unmapped buffer. +	 */ + +	BUG_ON(action != _DRM_REF_TYPE1); + +	if (atomic_add_negative(-1, &bo->mapped)) +		DRM_WAKEUP(&bo->event_queue); +} + +/* + * bo->mutex locked.  + */ + +static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_flags, +			      int no_wait, int force_no_move) +{ +	int ret = 0; + +	/* +	 * Flush outstanding fences. +	 */ +	drm_bo_busy(bo); + +	/* +	 * Make sure we're not mapped. +	 */ + +	ret = drm_bo_wait_unmapped(bo, no_wait); +	if (ret) +		return ret; + +	/* +	 * Wait for outstanding fences. +	 */ + +	ret = drm_bo_wait(bo, 0, 0, no_wait); + +	if (ret == -EINTR) +		return -EAGAIN; +	if (ret) +		return ret; + +	if (new_flags & DRM_BO_FLAG_MEM_TT) { +		ret = drm_move_local_to_tt(bo, no_wait); +		if (ret) +			return ret; +	} else { +		drm_move_tt_to_local(bo, 0, force_no_move); +	} + +	return 0; +} + +/* + * bo locked. + */ + +static int drm_buffer_object_validate(drm_buffer_object_t * bo, +				      uint32_t new_flags, +				      int move_unfenced, int no_wait) +{ +	drm_device_t *dev = bo->dev; +	drm_buffer_manager_t *bm = &dev->bm; +	uint32_t flag_diff = (new_flags ^ bo->flags); +	drm_bo_driver_t *driver = dev->driver->bo_driver; + +	int ret; + +	if (new_flags & DRM_BO_FLAG_MEM_VRAM) { +		DRM_ERROR("Vram support not implemented yet\n"); +		return -EINVAL; +	} + +	DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", new_flags, bo->flags); +	ret = driver->fence_type(new_flags, &bo->fence_class, &bo->fence_type); +	if (ret) { +		DRM_ERROR("Driver did not support given buffer permissions\n"); +		return ret; +	} + +	/* +	 * Move out if we need to change caching policy. +	 */ + +	if ((flag_diff & DRM_BO_FLAG_BIND_CACHED) && +	    !(bo->flags & DRM_BO_FLAG_MEM_LOCAL)) { +		if (bo->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) { +			DRM_ERROR("Cannot change caching policy of " +				  "pinned buffer.\n"); +			return -EINVAL; +		} +		ret = drm_bo_move_buffer(bo, DRM_BO_FLAG_MEM_LOCAL, no_wait, 0); +		if (ret) { +			if (ret != -EAGAIN) +				DRM_ERROR("Failed moving buffer.\n"); +			return ret; +		} +	} +	DRM_MASK_VAL(bo->flags, DRM_BO_FLAG_BIND_CACHED, new_flags); +	flag_diff = (new_flags ^ bo->flags); + +	/* +	 * Check whether we dropped no_move policy, and in that case, +	 * release reserved manager regions. +	 */ + +	if ((flag_diff & DRM_BO_FLAG_NO_MOVE) && +	    !(new_flags & DRM_BO_FLAG_NO_MOVE)) { +		mutex_lock(&dev->struct_mutex); +		if (bo->node_ttm) { +			drm_mm_put_block(bo->node_ttm); +			bo->node_ttm = NULL; +		} +		if (bo->node_card) { +			drm_mm_put_block(bo->node_card); +			bo->node_card = NULL; +		} +		mutex_unlock(&dev->struct_mutex); +	} + +	/* +	 * Check whether we need to move buffer. +	 */ + +	if ((bo->type != drm_bo_type_fake) && (flag_diff & DRM_BO_MASK_MEM)) { +		ret = drm_bo_move_buffer(bo, new_flags, no_wait, 1); +		if (ret) { +			if (ret != -EAGAIN) +				DRM_ERROR("Failed moving buffer.\n"); +			return ret; +		} +	} + +	if (move_unfenced) { + +		/* +		 * Place on unfenced list. +		 */ + +		DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, +				_DRM_BO_FLAG_UNFENCED); +		mutex_lock(&dev->struct_mutex); +		list_del(&bo->lru_ttm); +		list_add_tail(&bo->lru_ttm, &bm->unfenced); +		list_del_init(&bo->lru_card); +		mutex_unlock(&dev->struct_mutex); +	} else { + +		mutex_lock(&dev->struct_mutex); +		list_del_init(&bo->lru_ttm); +		list_del_init(&bo->lru_card); +		drm_bo_add_to_lru(bo, bm); +		mutex_unlock(&dev->struct_mutex); +	} + +	bo->flags = new_flags; +	return 0; +} + +static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, +				  uint32_t flags, uint32_t mask, uint32_t hint, +				  drm_bo_arg_reply_t * rep) +{ +	drm_buffer_object_t *bo; +	drm_device_t *dev = priv->head->dev; +	int ret; +	int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; +	uint32_t new_flags; + +	bo = drm_lookup_buffer_object(priv, handle, 1); +	if (!bo) { +		return -EINVAL; +	} + +	mutex_lock(&bo->mutex); +	ret = drm_bo_wait_unfenced(bo, no_wait, 0); + +	if (ret) +		goto out; + +	ret = drm_bo_new_flags(dev, bo->flags, +			       (flags & mask) | (bo->mask & ~mask), hint, +			       0, &new_flags, &bo->mask); + +	if (ret) +		goto out; + +	ret = +	    drm_buffer_object_validate(bo, new_flags, +				       !(hint & DRM_BO_HINT_DONT_FENCE), +				       no_wait); +	drm_bo_fill_rep_arg(bo, rep); + +      out: + +	mutex_unlock(&bo->mutex); +	drm_bo_usage_deref_unlocked(dev, bo); +	return ret; +} + +static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle, +			      drm_bo_arg_reply_t * rep) +{ +	drm_buffer_object_t *bo; + +	bo = drm_lookup_buffer_object(priv, handle, 1); +	if (!bo) { +		return -EINVAL; +	} +	mutex_lock(&bo->mutex); +	if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) +		(void)drm_bo_busy(bo); +	drm_bo_fill_rep_arg(bo, rep); +	mutex_unlock(&bo->mutex); +	drm_bo_usage_deref_unlocked(bo->dev, bo); +	return 0; +} + +static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle, +			      uint32_t hint, drm_bo_arg_reply_t * rep) +{ +	drm_buffer_object_t *bo; +	int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; +	int ret; + +	bo = drm_lookup_buffer_object(priv, handle, 1); +	if (!bo) { +		return -EINVAL; +	} + +	mutex_lock(&bo->mutex); +	ret = drm_bo_wait_unfenced(bo, no_wait, 0); +	if (ret) +		goto out; +	ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait); +	if (ret) +		goto out; + +	drm_bo_fill_rep_arg(bo, rep); + +      out: +	mutex_unlock(&bo->mutex); +	drm_bo_usage_deref_unlocked(bo->dev, bo); +	return ret; +} + +/* + * Call bo->mutex locked. + */ + +static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo) +{ +	drm_device_t *dev = bo->dev; +	drm_ttm_object_t *to = NULL; +	int ret = 0; +	uint32_t ttm_flags = 0; + +	bo->ttm_object = NULL; +	bo->ttm = NULL; + +	switch (bo->type) { +	case drm_bo_type_dc: +		mutex_lock(&dev->struct_mutex); +		ret = drm_ttm_object_create(dev, bo->num_pages * PAGE_SIZE, +					    ttm_flags, &to); +		mutex_unlock(&dev->struct_mutex); +		break; +	case drm_bo_type_user: +	case drm_bo_type_fake: +		break; +	default: +		DRM_ERROR("Illegal buffer object type\n"); +		ret = -EINVAL; +		break; +	} + +	if (ret) { +		return ret; +	} + +	if (to) { +		bo->ttm_object = to; +		bo->ttm = drm_ttm_from_object(to); +	} +	return ret; +} + +int drm_buffer_object_create(drm_file_t * priv, +			     unsigned long size, +			     drm_bo_type_t type, +			     uint32_t mask, +			     uint32_t hint, +			     unsigned long buffer_start, +			     drm_buffer_object_t ** buf_obj) +{ +	drm_device_t *dev = priv->head->dev; +	drm_buffer_manager_t *bm = &dev->bm; +	drm_buffer_object_t *bo; +	int ret = 0; +	uint32_t new_flags; +	unsigned long num_pages; + +	if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) { +		DRM_ERROR("Invalid buffer object start.\n"); +		return -EINVAL; +	} +	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; +	if (num_pages == 0) { +		DRM_ERROR("Illegal buffer object size.\n"); +		return -EINVAL; +	} + +	bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ); + +	if (!bo) +		return -ENOMEM; + +	mutex_init(&bo->mutex); +	mutex_lock(&bo->mutex); + +	atomic_set(&bo->usage, 1); +	atomic_set(&bo->mapped, -1); +	DRM_INIT_WAITQUEUE(&bo->event_queue); +	INIT_LIST_HEAD(&bo->lru_ttm); +	INIT_LIST_HEAD(&bo->lru_card); +	INIT_LIST_HEAD(&bo->ddestroy); +	bo->dev = dev; +	bo->type = type; +	bo->num_pages = num_pages; +	bo->node_card = NULL; +	bo->node_ttm = NULL; +	if (bo->type == drm_bo_type_fake) { +		bo->offset = buffer_start; +		bo->buffer_start = 0; +	} else { +		bo->buffer_start = buffer_start; +	} +	bo->priv_flags = 0; +	bo->flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED; +	atomic_inc(&bm->count); +	ret = drm_bo_new_flags(dev, bo->flags, mask, hint, +			       1, &new_flags, &bo->mask); +	if (ret) +		goto out_err; +	ret = drm_bo_add_ttm(priv, bo); +	if (ret) +		goto out_err; + +	ret = drm_buffer_object_validate(bo, new_flags, 0, +					 hint & DRM_BO_HINT_DONT_BLOCK); +	if (ret) +		goto out_err; + +	mutex_unlock(&bo->mutex); +	*buf_obj = bo; +	return 0; + +      out_err: +	mutex_unlock(&bo->mutex); +	drm_bo_usage_deref_unlocked(dev, bo); +	return ret; +} + +static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo, +				  int shareable) +{ +	drm_device_t *dev = priv->head->dev; +	int ret; + +	mutex_lock(&dev->struct_mutex); +	ret = drm_add_user_object(priv, &bo->base, shareable); +	if (ret) +		goto out; + +	bo->base.remove = drm_bo_base_deref_locked; +	bo->base.type = drm_buffer_type; +	bo->base.ref_struct_locked = NULL; +	bo->base.unref = drm_buffer_user_object_unmap; + +      out: +	mutex_unlock(&dev->struct_mutex); +	return ret; +} + +static int drm_bo_lock_test(drm_device_t * dev, struct file *filp) +{ +	LOCK_TEST_WITH_RETURN(dev, filp); +	return 0; +} + +int drm_bo_ioctl(DRM_IOCTL_ARGS) +{ +	DRM_DEVICE; +	drm_bo_arg_t arg; +	drm_bo_arg_request_t *req = &arg.d.req; +	drm_bo_arg_reply_t rep; +	unsigned long next; +	drm_user_object_t *uo; +	drm_buffer_object_t *entry; + +	if (!dev->bm.initialized) { +		DRM_ERROR("Buffer object manager is not initialized.\n"); +		return -EINVAL; +	} + +	do { +		DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + +		if (arg.handled) { +			data = arg.next; +			continue; +		} + +		rep.ret = 0; +		switch (req->op) { +		case drm_bo_create: +			rep.ret = +			    drm_buffer_object_create(priv, req->size, +						     req->type, +						     req->mask, +						     req->hint, +						     req->buffer_start, &entry); +			if (rep.ret) +				break; + +			rep.ret = +			    drm_bo_add_user_object(priv, entry, +						   req-> +						   mask & +						   DRM_BO_FLAG_SHAREABLE); +			if (rep.ret) +				drm_bo_usage_deref_unlocked(dev, entry); + +			if (rep.ret) +				break; + +			mutex_lock(&entry->mutex); +			drm_bo_fill_rep_arg(entry, &rep); +			mutex_unlock(&entry->mutex); +			break; +		case drm_bo_unmap: +			rep.ret = drm_buffer_object_unmap(priv, req->handle); +			break; +		case drm_bo_map: +			rep.ret = drm_buffer_object_map(priv, req->handle, +							req->mask, +							req->hint, &rep); +			break; +		case drm_bo_destroy: +			mutex_lock(&dev->struct_mutex); +			uo = drm_lookup_user_object(priv, req->handle); +			if (!uo || (uo->type != drm_buffer_type) +			    || uo->owner != priv) { +				mutex_unlock(&dev->struct_mutex); +				rep.ret = -EINVAL; +				break; +			} +			rep.ret = drm_remove_user_object(priv, uo); +			mutex_unlock(&dev->struct_mutex); +			break; +		case drm_bo_reference: +			rep.ret = drm_user_object_ref(priv, req->handle, +						      drm_buffer_type, &uo); +			if (rep.ret) +				break; +			mutex_lock(&dev->struct_mutex); +			uo = drm_lookup_user_object(priv, req->handle); +			entry = +			    drm_user_object_entry(uo, drm_buffer_object_t, +						  base); +			atomic_dec(&entry->usage); +			mutex_unlock(&dev->struct_mutex); +			mutex_lock(&entry->mutex); +			drm_bo_fill_rep_arg(entry, &rep); +			mutex_unlock(&entry->mutex); +			break; +		case drm_bo_unreference: +			rep.ret = drm_user_object_unref(priv, req->handle, +							drm_buffer_type); +			break; +		case drm_bo_validate: +			rep.ret = drm_bo_lock_test(dev, filp); + +			if (rep.ret) +				break; +			rep.ret = +			    drm_bo_handle_validate(priv, req->handle, req->mask, +						   req->arg_handle, req->hint, +						   &rep); +			break; +		case drm_bo_fence: +			rep.ret = drm_bo_lock_test(dev, filp); +			if (rep.ret) +				break; +			 /**/ break; +		case drm_bo_info: +			rep.ret = drm_bo_handle_info(priv, req->handle, &rep); +			break; +		case drm_bo_wait_idle: +			rep.ret = drm_bo_handle_wait(priv, req->handle, +						     req->hint, &rep); +			break; +		case drm_bo_ref_fence: +			rep.ret = -EINVAL; +			DRM_ERROR("Function is not implemented yet.\n"); +		default: +			rep.ret = -EINVAL; +		} +		next = arg.next; + +		/* +		 * A signal interrupted us. Make sure the ioctl is restartable. +		 */ + +		if (rep.ret == -EAGAIN) +			return -EAGAIN; + +		arg.handled = 1; +		arg.d.rep = rep; +		DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); +		data = next; +	} while (data); +	return 0; +} + +/* + * dev->struct_sem locked. + */ + +static int drm_bo_force_list_clean(drm_device_t * dev, +				   struct list_head *head, +				   unsigned mem_type, +				   int force_no_move, int allow_errors) +{ +	drm_buffer_manager_t *bm = &dev->bm; +	struct list_head *list, *next, *prev; +	drm_buffer_object_t *entry; +	int ret; +	int clean; + +      retry: +	clean = 1; +	list_for_each_safe(list, next, head) { +		prev = list->prev; +		entry = drm_bo_entry(list, mem_type); +		atomic_inc(&entry->usage); +		mutex_unlock(&dev->struct_mutex); +		mutex_lock(&entry->mutex); +		mutex_lock(&dev->struct_mutex); + +		if (prev != list->prev || next != list->next) { +			mutex_unlock(&entry->mutex); +			drm_bo_usage_deref_locked(dev, entry); +			goto retry; +		} +		if (drm_bo_mm_node(entry, mem_type)) { +			clean = 0; + +			/* +			 * Expire the fence. +			 */ + +			mutex_unlock(&dev->struct_mutex); +			if (entry->fence && bm->nice_mode) { +				unsigned long _end = jiffies + 3 * DRM_HZ; +				do { +					ret = drm_bo_wait(entry, 0, 1, 0); +					if (ret && allow_errors) { +						if (ret == -EINTR) +							ret = -EAGAIN; +						goto out_err; +					} +				} while (ret && !time_after_eq(jiffies, _end)); + +				if (entry->fence) { +					bm->nice_mode = 0; +					DRM_ERROR("Detected GPU hang or " +						  "fence manager was taken down. " +						  "Evicting waiting buffers\n"); +				} +			} +			if (entry->fence) { +				drm_fence_usage_deref_unlocked(dev, +							       entry->fence); +				entry->fence = NULL; +			} + +			DRM_MASK_VAL(entry->priv_flags, _DRM_BO_FLAG_UNFENCED, +				     0); + +			if (force_no_move) { +				DRM_MASK_VAL(entry->flags, DRM_BO_FLAG_NO_MOVE, +					     0); +			} +			if (entry->flags & DRM_BO_FLAG_NO_EVICT) { +				DRM_ERROR("A DRM_BO_NO_EVICT buffer present at " +					  "cleanup. Removing flag and evicting.\n"); +				entry->flags &= ~DRM_BO_FLAG_NO_EVICT; +				entry->mask &= ~DRM_BO_FLAG_NO_EVICT; +			} + +			ret = drm_bo_evict(entry, mem_type, 1, force_no_move); +			if (ret) { +				if (allow_errors) { +					goto out_err; +				} else { +					DRM_ERROR("Aargh. Eviction failed.\n"); +				} +			} +			mutex_lock(&dev->struct_mutex); +		} +		mutex_unlock(&entry->mutex); +		drm_bo_usage_deref_locked(dev, entry); +		if (prev != list->prev || next != list->next) { +			goto retry; +		} +	} +	if (!clean) +		goto retry; +	return 0; +      out_err: +	mutex_unlock(&entry->mutex); +	drm_bo_usage_deref_unlocked(dev, entry); +	mutex_lock(&dev->struct_mutex); +	return ret; +} + +int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type) +{ +	drm_buffer_manager_t *bm = &dev->bm; +	int ret = -EINVAL; + +	if (mem_type >= DRM_BO_MEM_TYPES) { +		DRM_ERROR("Illegal memory type %d\n", mem_type); +		return ret; +	} + +	if (!bm->has_type[mem_type]) { +		DRM_ERROR("Trying to take down uninitialized " +			  "memory manager type\n"); +		return ret; +	} +	bm->use_type[mem_type] = 0; +	bm->has_type[mem_type] = 0; + +	ret = 0; +	if (mem_type > 0) { + +		/* +		 * Throw out unfenced buffers. +		 */ + +		drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 1, 0); + +		/* +		 * Throw out evicted no-move buffers. +		 */ + +		drm_bo_force_list_clean(dev, &bm->pinned[DRM_BO_MEM_LOCAL], +					mem_type, 1, 0); +		drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 1, +					0); +		drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 1, +					0); + +		if (drm_mm_clean(&bm->manager[mem_type])) { +			drm_mm_takedown(&bm->manager[mem_type]); +		} else { +			ret = -EBUSY; +		} +	} + +	return ret; +} + +static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type) +{ +	int ret; +	drm_buffer_manager_t *bm = &dev->bm; + +	if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) { +		DRM_ERROR("Illegal memory manager memory type %u,\n", mem_type); +		return -EINVAL; +	} + +	ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1); +	if (ret) +		return ret; +	ret = drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 0, 1); +	if (ret) +		return ret; +	ret = +	    drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 0, 1); +	return ret; +} + +static int drm_bo_init_mm(drm_device_t * dev, +			  unsigned type, +			  unsigned long p_offset, unsigned long p_size) +{ +	drm_buffer_manager_t *bm = &dev->bm; +	int ret = -EINVAL; + +	if (type >= DRM_BO_MEM_TYPES) { +		DRM_ERROR("Illegal memory type %d\n", type); +		return ret; +	} +	if (bm->has_type[type]) { +		DRM_ERROR("Memory manager already initialized for type %d\n", +			  type); +		return ret; +	} + +	ret = 0; +	if (type != DRM_BO_MEM_LOCAL) { +		if (!p_size) { +			DRM_ERROR("Zero size memory manager type %d\n", type); +			return ret; +		} +		ret = drm_mm_init(&bm->manager[type], p_offset, p_size); +		if (ret) +			return ret; +	} +	bm->has_type[type] = 1; +	bm->use_type[type] = 1; + +	INIT_LIST_HEAD(&bm->lru[type]); +	INIT_LIST_HEAD(&bm->pinned[type]); + +	return 0; +} + +/* + * This is called from lastclose, so we don't need to bother about + * any clients still running when we set the initialized flag to zero. + */ + +int drm_bo_driver_finish(drm_device_t * dev) +{ +	drm_buffer_manager_t *bm = &dev->bm; +	int ret = 0; +	unsigned i = DRM_BO_MEM_TYPES; + +	mutex_lock(&dev->bm.init_mutex); +	mutex_lock(&dev->struct_mutex); + +	if (!bm->initialized) +		goto out; +	bm->initialized = 0; + +	while (i--) { +		if (bm->has_type[i]) { +			bm->use_type[i] = 0; +			if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) { +				ret = -EBUSY; +				DRM_ERROR("DRM memory manager type %d " +					  "is not clean.\n", i); +			} +			bm->has_type[i] = 0; +		} +	} +	mutex_unlock(&dev->struct_mutex); +	if (!cancel_delayed_work(&bm->wq)) { +		flush_scheduled_work(); +	} +	mutex_lock(&dev->struct_mutex); +	drm_bo_delayed_delete(dev, 1); +	if (list_empty(&bm->ddestroy)) { +		DRM_DEBUG("Delayed destroy list was clean\n"); +	} +	if (list_empty(&bm->lru[0])) { +		DRM_DEBUG("Swap list was clean\n"); +	} +	if (list_empty(&bm->pinned[0])) { +		DRM_DEBUG("NO_MOVE list was clean\n"); +	} +	if (list_empty(&bm->unfenced)) { +		DRM_DEBUG("Unfenced list was clean\n"); +	} +      out: +	mutex_unlock(&dev->struct_mutex); +	mutex_unlock(&dev->bm.init_mutex); +	return ret; +} + +int drm_bo_driver_init(drm_device_t * dev) +{ +	drm_bo_driver_t *driver = dev->driver->bo_driver; +	drm_buffer_manager_t *bm = &dev->bm; +	int ret = -EINVAL; + +	mutex_lock(&dev->bm.init_mutex); +	mutex_lock(&dev->struct_mutex); +	if (!driver) +		goto out_unlock; + +	/* +	 * Initialize the system memory buffer type. +	 * Other types need to be driver / IOCTL initialized. +	 */ + +	ret = drm_bo_init_mm(dev, 0, 0, 0); +	if (ret) +		goto out_unlock; + +	INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev); +	bm->initialized = 1; +	bm->nice_mode = 1; +	atomic_set(&bm->count, 0); +	bm->cur_pages = 0; +	INIT_LIST_HEAD(&bm->unfenced); +	INIT_LIST_HEAD(&bm->ddestroy); +      out_unlock: +	mutex_unlock(&dev->struct_mutex); +	mutex_unlock(&dev->bm.init_mutex); +	return ret; +} + +EXPORT_SYMBOL(drm_bo_driver_init); + +int drm_mm_init_ioctl(DRM_IOCTL_ARGS) +{ +	DRM_DEVICE; + +	int ret = 0; +	drm_mm_init_arg_t arg; +	drm_buffer_manager_t *bm = &dev->bm; +	drm_bo_driver_t *driver = dev->driver->bo_driver; + +	if (!driver) { +		DRM_ERROR("Buffer objects are not supported by this driver\n"); +		return -EINVAL; +	} + +	DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + +	switch (arg.req.op) { +	case mm_init: +		ret = -EINVAL; +		mutex_lock(&dev->bm.init_mutex); +		mutex_lock(&dev->struct_mutex); +		if (!bm->initialized) { +			DRM_ERROR("DRM memory manager was not initialized.\n"); +			break; +		} +		if (arg.req.mem_type == 0) { +			DRM_ERROR +			    ("System memory buffers already initialized.\n"); +			break; +		} +		ret = drm_bo_init_mm(dev, arg.req.mem_type, +				     arg.req.p_offset, arg.req.p_size); +		break; +	case mm_takedown: +		LOCK_TEST_WITH_RETURN(dev, filp); +		mutex_lock(&dev->bm.init_mutex); +		mutex_lock(&dev->struct_mutex); +		ret = -EINVAL; +		if (!bm->initialized) { +			DRM_ERROR("DRM memory manager was not initialized\n"); +			break; +		} +		if (arg.req.mem_type == 0) { +			DRM_ERROR("No takedown for System memory buffers.\n"); +			break; +		} +		ret = 0; +		if (drm_bo_clean_mm(dev, arg.req.mem_type)) { +			DRM_ERROR("Memory manager type %d not clean. " +				  "Delaying takedown\n", arg.req.mem_type); +		} +		break; +	case mm_lock: +		LOCK_TEST_WITH_RETURN(dev, filp); +		mutex_lock(&dev->bm.init_mutex); +		mutex_lock(&dev->struct_mutex); +		ret = drm_bo_lock_mm(dev, arg.req.mem_type); +		break; +	case mm_unlock: +		LOCK_TEST_WITH_RETURN(dev, filp); +		mutex_lock(&dev->bm.init_mutex); +		mutex_lock(&dev->struct_mutex); +		ret = 0; +		break; +	default: +		DRM_ERROR("Function not implemented yet\n"); +		return -EINVAL; +	} + +	mutex_unlock(&dev->struct_mutex); +	mutex_unlock(&dev->bm.init_mutex); +	if (ret) +		return ret; + +	DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); +	return 0; +} diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c index a2a3dbf7..d6ebc8d1 100644 --- a/linux-core/drm_bufs.c +++ b/linux-core/drm_bufs.c @@ -80,14 +80,14 @@ static int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash,  	if (!use_hashed_handle) {  		int ret; -		hash->key = user_token; +		hash->key = user_token >> PAGE_SHIFT;  		ret = drm_ht_insert_item(&dev->map_hash, hash);  		if (ret != -EINVAL)   			return ret;  	}  	return drm_ht_just_insert_please(&dev->map_hash, hash,   					 user_token, 32 - PAGE_SHIFT - 3, -					 PAGE_SHIFT, DRM_MAP_HASH_OFFSET); +					 0, DRM_MAP_HASH_OFFSET >> PAGE_SHIFT);  }  /** @@ -301,7 +301,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,  		return ret;  	} -	list->user_token = list->hash.key; +	list->user_token = list->hash.key << PAGE_SHIFT;  	mutex_unlock(&dev->struct_mutex);  	*maplist = list; @@ -386,7 +386,8 @@ int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)  		if (r_list->map == map) {  			list_del(list); -			drm_ht_remove_key(&dev->map_hash, r_list->user_token); +			drm_ht_remove_key(&dev->map_hash,  +					  r_list->user_token >> PAGE_SHIFT);  			drm_free(list, sizeof(*list), DRM_MEM_MAPS);  			break;  		} @@ -422,6 +423,8 @@ int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)  		dmah.size = map->size;  		__drm_pci_free(dev, &dmah);  		break; +	case _DRM_TTM: +		BUG_ON(1);  	}  	drm_free(map, sizeof(*map), DRM_MEM_MAPS); diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c new file mode 100644 index 00000000..90e53419 --- /dev/null +++ b/linux-core/drm_compat.c @@ -0,0 +1,421 @@ +/************************************************************************** + *  + * This kernel module is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + *  + **************************************************************************/ +/* + * This code provides access to unexported mm kernel features. It is necessary + * to use the new DRM memory manager code with kernels that don't support it + * directly. + * + * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> + *          Linux kernel mm subsystem authors.  + *          (Most code taken from there). + */ + +#include "drmP.h" + +#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) +int drm_map_page_into_agp(struct page *page) +{ +        int i; +        i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE); +        /* Caller's responsibility to call global_flush_tlb() for +         * performance reasons */ +        return i; +} + +int drm_unmap_page_from_agp(struct page *page) +{ +        int i; +        i = change_page_attr(page, 1, PAGE_KERNEL); +        /* Caller's responsibility to call global_flush_tlb() for +         * performance reasons */ +        return i; +} +#endif + + +pgprot_t vm_get_page_prot(unsigned long vm_flags) +{ +#ifdef MODULE +	static pgprot_t drm_protection_map[16] = { +		__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111, +		__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 +	}; + +	return drm_protection_map[vm_flags & 0x0F]; +#else +	extern pgprot_t protection_map[]; +	return protection_map[vm_flags & 0x0F]; +#endif +}; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) + +static int drm_pte_is_clear(struct vm_area_struct *vma, +			    unsigned long addr) +{ +	struct mm_struct *mm = vma->vm_mm; +	int ret = 1; +	pte_t *pte; +	pmd_t *pmd; +	pud_t *pud; +	pgd_t *pgd; +	 + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) +	spin_lock(&mm->page_table_lock); +#else +	spinlock_t *ptl; +#endif +	 +	pgd = pgd_offset(mm, addr); +	if (pgd_none(*pgd)) +		goto unlock; +	pud = pud_offset(pgd, addr); +        if (pud_none(*pud)) +		goto unlock; +	pmd = pmd_offset(pud, addr); +	if (pmd_none(*pmd)) +		goto unlock; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) +	pte = pte_offset_map(pmd, addr); +#else  +	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);  +#endif +	if (!pte) +		goto unlock; +	ret = pte_none(*pte); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) +	pte_unmap(pte); + unlock:	 +	spin_unlock(&mm->page_table_lock); +#else +	pte_unmap_unlock(pte, ptl); + unlock: +#endif +	return ret; +} +	 +int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,  +		  unsigned long pfn, pgprot_t pgprot) +{ +	int ret; +	if (!drm_pte_is_clear(vma, addr)) +		return -EBUSY; + +	ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, pgprot); +	return ret; +} + + +static struct { +	spinlock_t lock; +	struct page *dummy_page; +	atomic_t present; +} drm_np_retry =  +{SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)}; + +struct page * get_nopage_retry(void) +{ +	if (atomic_read(&drm_np_retry.present) == 0) { +		struct page *page = alloc_page(GFP_KERNEL); +		if (!page) +			return NOPAGE_OOM; +		spin_lock(&drm_np_retry.lock); +		drm_np_retry.dummy_page = page; +		atomic_set(&drm_np_retry.present,1); +		spin_unlock(&drm_np_retry.lock); +	} +	get_page(drm_np_retry.dummy_page); +	return drm_np_retry.dummy_page; +} + +void free_nopage_retry(void) +{ +	if (atomic_read(&drm_np_retry.present) == 1) { +		spin_lock(&drm_np_retry.lock); +		__free_page(drm_np_retry.dummy_page); +		drm_np_retry.dummy_page = NULL; +		atomic_set(&drm_np_retry.present, 0); +		spin_unlock(&drm_np_retry.lock); +	} +} +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) + +struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma, +			       unsigned long address,  +			       int *type) +{ +	struct fault_data data; + +	if (type) +		*type = VM_FAULT_MINOR; + +	data.address = address; +	data.vma = vma; +	drm_vm_ttm_fault(vma, &data); +	switch (data.type) { +	case VM_FAULT_OOM: +		return NOPAGE_OOM; +	case VM_FAULT_SIGBUS: +		return NOPAGE_SIGBUS; +	default: +		break; +	} + +	return NOPAGE_REFAULT; +} + +#endif + +#ifdef DRM_ODD_MM_COMPAT + +typedef struct p_mm_entry { +	struct list_head head; +	struct mm_struct *mm; +	atomic_t refcount; +        int locked; +} p_mm_entry_t; + +typedef struct vma_entry { +	struct list_head head; +	struct vm_area_struct *vma; +} vma_entry_t; + + +struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma, +			       unsigned long address,  +			       int *type) +{ +	drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data; +	unsigned long page_offset; +	struct page *page; +	drm_ttm_t *ttm;  +	drm_buffer_manager_t *bm; +	drm_device_t *dev; + +	/* +	 * FIXME: Check can't map aperture flag. +	 */ + +	if (type) +		*type = VM_FAULT_MINOR; + +	if (!map)  +		return NOPAGE_OOM; + +	if (address > vma->vm_end)  +		return NOPAGE_SIGBUS; + +	ttm = (drm_ttm_t *) map->offset;	 +	dev = ttm->dev; +	mutex_lock(&dev->struct_mutex); +	drm_fixup_ttm_caching(ttm); +	BUG_ON(ttm->page_flags & DRM_TTM_PAGE_UNCACHED); + +	bm = &dev->bm; +	page_offset = (address - vma->vm_start) >> PAGE_SHIFT; +	page = ttm->pages[page_offset]; + +	if (!page) { +		if (drm_alloc_memctl(PAGE_SIZE)) { +			page = NOPAGE_OOM; +			goto out; +		} +		page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0); +		if (!page) { +		        drm_free_memctl(PAGE_SIZE); +			page = NOPAGE_OOM; +			goto out; +		} +		++bm->cur_pages; +		SetPageLocked(page); +	} + +	get_page(page); + out: +	mutex_unlock(&dev->struct_mutex); +	return page; +} + + + + +int drm_ttm_map_bound(struct vm_area_struct *vma) +{ +	drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data; +	drm_ttm_t *ttm = (drm_ttm_t *) map->offset; +	int ret = 0; + +	if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) { +		unsigned long pfn = ttm->aper_offset +  +			(ttm->be->aperture_base >> PAGE_SHIFT); +		pgprot_t pgprot = drm_io_prot(ttm->be->drm_map_type, vma); +		 +		ret = io_remap_pfn_range(vma, vma->vm_start, pfn, +					 vma->vm_end - vma->vm_start, +					 pgprot); +	} +	return ret; +} +	 + +int drm_ttm_add_vma(drm_ttm_t * ttm, struct vm_area_struct *vma) +{ +	p_mm_entry_t *entry, *n_entry; +	vma_entry_t *v_entry; +	drm_local_map_t *map = (drm_local_map_t *) +		vma->vm_private_data; +	struct mm_struct *mm = vma->vm_mm; + +	v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_TTM); +	if (!v_entry) { +		DRM_ERROR("Allocation of vma pointer entry failed\n"); +		return -ENOMEM; +	} +	v_entry->vma = vma; +	map->handle = (void *) v_entry; +	list_add_tail(&v_entry->head, &ttm->vma_list); + +	list_for_each_entry(entry, &ttm->p_mm_list, head) { +		if (mm == entry->mm) { +			atomic_inc(&entry->refcount); +			return 0; +		} else if ((unsigned long)mm < (unsigned long)entry->mm) ; +	} + +	n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_TTM); +	if (!n_entry) { +		DRM_ERROR("Allocation of process mm pointer entry failed\n"); +		return -ENOMEM; +	} +	INIT_LIST_HEAD(&n_entry->head); +	n_entry->mm = mm; +	n_entry->locked = 0; +	atomic_set(&n_entry->refcount, 0); +	list_add_tail(&n_entry->head, &entry->head); + +	return 0; +} + +void drm_ttm_delete_vma(drm_ttm_t * ttm, struct vm_area_struct *vma) +{ +	p_mm_entry_t *entry, *n; +	vma_entry_t *v_entry, *v_n; +	int found = 0; +	struct mm_struct *mm = vma->vm_mm; + +	list_for_each_entry_safe(v_entry, v_n, &ttm->vma_list, head) { +		if (v_entry->vma == vma) { +			found = 1; +			list_del(&v_entry->head); +			drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_TTM); +			break; +		} +	} +	BUG_ON(!found); + +	list_for_each_entry_safe(entry, n, &ttm->p_mm_list, head) { +		if (mm == entry->mm) { +			if (atomic_add_negative(-1, &entry->refcount)) { +				list_del(&entry->head); +				BUG_ON(entry->locked); +				drm_ctl_free(entry, sizeof(*entry), DRM_MEM_TTM); +			} +			return; +		} +	} +	BUG_ON(1); +} + + + +int drm_ttm_lock_mm(drm_ttm_t * ttm) +{ +	p_mm_entry_t *entry; +	int lock_ok = 1; +	 +	list_for_each_entry(entry, &ttm->p_mm_list, head) { +		BUG_ON(entry->locked); +		if (!down_write_trylock(&entry->mm->mmap_sem)) { +			lock_ok = 0; +			break; +		} +		entry->locked = 1; +	} + +	if (lock_ok) +		return 0; + +	list_for_each_entry(entry, &ttm->p_mm_list, head) { +		if (!entry->locked)  +			break; +		up_write(&entry->mm->mmap_sem); +		entry->locked = 0; +	} + +	/* +	 * Possible deadlock. Try again. Our callers should handle this +	 * and restart. +	 */ + +	return -EAGAIN; +} + +void drm_ttm_unlock_mm(drm_ttm_t * ttm) +{ +	p_mm_entry_t *entry; +	 +	list_for_each_entry(entry, &ttm->p_mm_list, head) { +		BUG_ON(!entry->locked); +		up_write(&entry->mm->mmap_sem); +		entry->locked = 0; +	} +} + +int drm_ttm_remap_bound(drm_ttm_t *ttm)  +{ +	vma_entry_t *v_entry; +	int ret = 0; +	 +	list_for_each_entry(v_entry, &ttm->vma_list, head) { +		ret = drm_ttm_map_bound(v_entry->vma); +		if (ret) +			break; +	} + +	drm_ttm_unlock_mm(ttm); +	return ret; +} + +void drm_ttm_finish_unmap(drm_ttm_t *ttm) +{ +	vma_entry_t *v_entry; +	 +	if (!(ttm->page_flags & DRM_TTM_PAGE_UNCACHED)) +		return; + +	list_for_each_entry(v_entry, &ttm->vma_list, head) { +		v_entry->vma->vm_flags &= ~VM_PFNMAP;  +	} +	drm_ttm_unlock_mm(ttm); +}	 + +#endif + diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h index 407853d7..5617fb7f 100644 --- a/linux-core/drm_compat.h +++ b/linux-core/drm_compat.h @@ -31,6 +31,7 @@   * OTHER DEALINGS IN THE SOFTWARE.   */ +#include <asm/agp.h>  #ifndef _DRM_COMPAT_H_  #define _DRM_COMPAT_H_ @@ -227,4 +228,148 @@ static inline int remap_pfn_range(struct vm_area_struct *vma, unsigned long from  }  #endif +#include <linux/mm.h> +#include <asm/page.h> + +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) && \ +     (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)))  +#define DRM_ODD_MM_COMPAT +#endif + + + +/* + * Flush relevant caches and clear a VMA structure so that page references  + * will cause a page fault. Don't flush tlbs. + */ + +extern void drm_clear_vma(struct vm_area_struct *vma, +			  unsigned long addr, unsigned long end); + +/* + * Return the PTE protection map entries for the VMA flags given by  + * flags. This is a functional interface to the kernel's protection map. + */ + +extern pgprot_t vm_get_page_prot(unsigned long vm_flags); + +/* + * These are similar to the current kernel gatt pages allocator, only that we + * want a struct page pointer instead of a virtual address. This allows for pages + * that are not in the kernel linear map. + */ + +#define drm_alloc_gatt_pages(order) ({					\ +			void *_virt = alloc_gatt_pages(order);		\ +			((_virt) ? virt_to_page(_virt) : NULL);}) +#define drm_free_gatt_pages(pages, order) free_gatt_pages(page_address(pages), order)  + +#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) + +/* + * These are too slow in earlier kernels. + */ + +extern int drm_unmap_page_from_agp(struct page *page); +extern int drm_map_page_into_agp(struct page *page); + +#define map_page_into_agp drm_map_page_into_agp +#define unmap_page_from_agp drm_unmap_page_from_agp +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) + +/* + * Hopefully, real NOPAGE_RETRY functionality will be in 2.6.19.  + * For now, just return a dummy page that we've allocated out of  + * static space. The page will be put by do_nopage() since we've already + * filled out the pte. + */ + +struct fault_data { +	struct vm_area_struct *vma; +	unsigned long address; +	pgoff_t pgoff; +	unsigned int flags; +	 +	int type; +}; + +extern struct page *get_nopage_retry(void); +extern void free_nopage_retry(void); + +#define NOPAGE_REFAULT get_nopage_retry() + +extern int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,  +			 unsigned long pfn, pgprot_t pgprot); + +extern struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma, +				      unsigned long address,  +				      int *type); + +extern struct page *drm_vm_ttm_fault(struct vm_area_struct *vma,  +				     struct fault_data *data); + +#endif + +#ifdef DRM_ODD_MM_COMPAT + +struct drm_ttm; + + +/* + * Add a vma to the ttm vma list, and the  + * process mm pointer to the ttm mm list. Needs the ttm mutex. + */ + +extern int drm_ttm_add_vma(struct drm_ttm * ttm,  +			   struct vm_area_struct *vma); +/* + * Delete a vma and the corresponding mm pointer from the + * ttm lists. Needs the ttm mutex. + */ +extern void drm_ttm_delete_vma(struct drm_ttm * ttm,  +			       struct vm_area_struct *vma); + +/* + * Attempts to lock all relevant mmap_sems for a ttm, while + * not releasing the ttm mutex. May return -EAGAIN to avoid  + * deadlocks. In that case the caller shall release the ttm mutex, + * schedule() and try again. + */ + +extern int drm_ttm_lock_mm(struct drm_ttm * ttm); + +/* + * Unlock all relevant mmap_sems for a ttm. + */ +extern void drm_ttm_unlock_mm(struct drm_ttm * ttm); + +/* + * If the ttm was bound to the aperture, this function shall be called + * with all relevant mmap sems held. It deletes the flag VM_PFNMAP from all + * vmas mapping this ttm. This is needed just after unmapping the ptes of + * the vma, otherwise the do_nopage() function will bug :(. The function + * releases the mmap_sems for this ttm. + */ + +extern void drm_ttm_finish_unmap(struct drm_ttm *ttm); + +/* + * Remap all vmas of this ttm using io_remap_pfn_range. We cannot  + * fault these pfns in, because the first one will set the vma VM_PFNMAP + * flag, which will make the next fault bug in do_nopage(). The function + * releases the mmap_sems for this ttm. + */ + +extern int drm_ttm_remap_bound(struct drm_ttm *ttm); + + +/* + * Remap a vma for a bound ttm. Call with the ttm mutex held and + * the relevant mmap_sem locked. + */ +extern int drm_ttm_map_bound(struct vm_area_struct *vma); + +#endif  #endif diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 228c8b89..75c89c1c 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -119,12 +119,17 @@ static drm_ioctl_desc_t drm_ioctls[] = {  	[DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},  	[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0}, +	[DRM_IOCTL_NR(DRM_IOCTL_FENCE)] = {drm_fence_ioctl, DRM_AUTH}, +	[DRM_IOCTL_NR(DRM_IOCTL_BUFOBJ)] = {drm_bo_ioctl, DRM_AUTH}, +	[DRM_IOCTL_NR(DRM_IOCTL_MM_INIT)] = {drm_mm_init_ioctl,  +					     DRM_AUTH },  	[DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW)] = {drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},  };  #define DRIVER_IOCTL_COUNT	ARRAY_SIZE( drm_ioctls ) +  /**   * Take down the DRM device.   * @@ -143,6 +148,11 @@ int drm_lastclose(drm_device_t * dev)  	DRM_DEBUG("\n"); +	if (drm_bo_driver_finish(dev)) { +		DRM_ERROR("DRM memory manager still busy. " +			  "System is unstable. Please reboot.\n"); +	} +  	if (dev->driver->lastclose)  		dev->driver->lastclose(dev);  	DRM_DEBUG("driver lastclose completed\n"); @@ -218,7 +228,7 @@ int drm_lastclose(drm_device_t * dev)  	if (dev->vmalist) {  		for (vma = dev->vmalist; vma; vma = vma_next) {  			vma_next = vma->next; -			drm_free(vma, sizeof(*vma), DRM_MEM_VMAS); +			drm_ctl_free(vma, sizeof(*vma), DRM_MEM_VMAS);  		}  		dev->vmalist = NULL;  	} @@ -256,6 +266,7 @@ int drm_lastclose(drm_device_t * dev)  		dev->lock.filp = NULL;  		wake_up_interruptible(&dev->lock.lock_queue);  	} +	dev->dev_mapping = NULL;  	mutex_unlock(&dev->struct_mutex);  	DRM_DEBUG("lastclose completed\n"); @@ -360,11 +371,14 @@ static void drm_cleanup(drm_device_t * dev)  	}  	drm_lastclose(dev); +	drm_fence_manager_takedown(dev);  	if (dev->maplist) {  		drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);  		dev->maplist = NULL;  		drm_ht_remove(&dev->map_hash); +		drm_mm_takedown(&dev->offset_manager); +		drm_ht_remove(&dev->object_hash);  	}  	if (!drm_fb_loaded) @@ -419,6 +433,9 @@ void drm_exit(struct drm_driver *driver)  		}  	} else  		pci_unregister_driver(&driver->pci_driver); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) +	free_nopage_retry(); +#endif  	DRM_INFO("Module unloaded\n");  }  EXPORT_SYMBOL(drm_exit); @@ -429,10 +446,60 @@ static struct file_operations drm_stub_fops = {  	.open = drm_stub_open  }; +static int drm_create_memory_caches(void) +{ +	drm_cache.mm = kmem_cache_create("drm_mm_node_t",  +					 sizeof(drm_mm_node_t), +					 0, +					 SLAB_HWCACHE_ALIGN, +					 NULL,NULL); +	if (!drm_cache.mm) +		return -ENOMEM; + +	drm_cache.fence_object= kmem_cache_create("drm_fence_object_t",  +						  sizeof(drm_fence_object_t), +						  0, +						  SLAB_HWCACHE_ALIGN, +						  NULL,NULL); +	if (!drm_cache.fence_object) +		return -ENOMEM; + +	return 0; +} + +static void drm_free_mem_cache(kmem_cache_t *cache,  +			       const char *name) +{ +	if (!cache) +		return; +	if (kmem_cache_destroy(cache)) { +		DRM_ERROR("Warning! DRM is leaking %s memory.\n", +			  name); +	} +} + +static void drm_free_memory_caches(void ) +{ +	 +	drm_free_mem_cache(drm_cache.fence_object, "fence object"); +	drm_cache.fence_object = NULL; +	drm_free_mem_cache(drm_cache.mm, "memory manager block"); +	drm_cache.mm = NULL; +} + +  static int __init drm_core_init(void)  { -	int ret = -ENOMEM; +	int ret; +	struct sysinfo si; +	 +	si_meminfo(&si); +	drm_init_memctl(si.totalram/2, si.totalram*3/4); +	ret = drm_create_memory_caches(); +	if (ret) +		goto err_p1; +	ret = -ENOMEM;  	drm_cards_limit =  	    (drm_cards_limit < DRM_MAX_MINOR + 1 ? drm_cards_limit : DRM_MAX_MINOR + 1);  	drm_heads = drm_calloc(drm_cards_limit, sizeof(*drm_heads), DRM_MEM_STUB); @@ -468,11 +535,13 @@ err_p2:  	unregister_chrdev(DRM_MAJOR, "drm");  	drm_free(drm_heads, sizeof(*drm_heads) * drm_cards_limit, DRM_MEM_STUB);  err_p1: +	drm_free_memory_caches();  	return ret;  }  static void __exit drm_core_exit(void)  { +	drm_free_memory_caches();  	remove_proc_entry("dri", NULL);  	drm_sysfs_destroy(drm_class); @@ -549,14 +618,19 @@ int drm_ioctl(struct inode *inode, struct file *filp,  		  current->pid, cmd, nr, (long)old_encode_dev(priv->head->device),  		  priv->authenticated); -	if (nr < DRIVER_IOCTL_COUNT) +	if (nr >= DRIVER_IOCTL_COUNT &&  +	    (nr < DRM_COMMAND_BASE || nr >= DRM_COMMAND_END)) +		goto err_i1; +	if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) +		&& (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) +			ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE]; +	else if (nr >= DRM_COMMAND_END || nr < DRM_COMMAND_BASE)	  		ioctl = &drm_ioctls[nr]; -	else if ((nr >= DRM_COMMAND_BASE) -		 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) -		ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE]; -	else +	else   		goto err_i1; + +  	func = ioctl->func;  	if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)	/* Local override? */  		func = dev->driver->dma_ioctl; diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c new file mode 100644 index 00000000..f656340e --- /dev/null +++ b/linux-core/drm_fence.c @@ -0,0 +1,619 @@ +/************************************************************************** + *  + * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA + * All Rights Reserved. + *  + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + *  + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR  + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE  + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + *  + *  + **************************************************************************/ +/* + * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com> + */ + +#include "drmP.h" + +/* + * Typically called by the IRQ handler. + */ + +void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type) +{ +	int wake = 0; +	uint32_t diff; +	uint32_t relevant; +	drm_fence_manager_t *fm = &dev->fm; +	drm_fence_driver_t *driver = dev->driver->fence_driver; +	struct list_head *list, *prev; +	drm_fence_object_t *fence; +	int found = 0; + +	if (list_empty(&fm->ring)) +		return; + +	list_for_each_entry(fence, &fm->ring, ring) { +		diff = (sequence - fence->sequence) & driver->sequence_mask; +		if (diff > driver->wrap_diff) { +			found = 1; +			break; +		} +	} + +	list = (found) ? fence->ring.prev : fm->ring.prev; +	prev = list->prev; + +	for (; list != &fm->ring; list = prev, prev = list->prev) { +		fence = list_entry(list, drm_fence_object_t, ring); + +		type |= fence->native_type; +		relevant = type & fence->type; + +		if ((fence->signaled | relevant) != fence->signaled) { +			fence->signaled |= relevant; +			DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n", +				  fence->base.hash.key, fence->signaled); +			fence->submitted_flush |= relevant; +			wake = 1; +		} + +		relevant = fence->flush_mask & +		    ~(fence->signaled | fence->submitted_flush); + +		if (relevant) { +			fm->pending_flush |= relevant; +			fence->submitted_flush = fence->flush_mask; +		} + +		if (!(fence->type & ~fence->signaled)) { +			DRM_DEBUG("Fence completely signaled 0x%08lx\n", +				  fence->base.hash.key); +			list_del_init(&fence->ring); +		} + +	} + +	if (wake) { +		DRM_WAKEUP(&fm->fence_queue); +	} +} + +EXPORT_SYMBOL(drm_fence_handler); + +static void drm_fence_unring(drm_device_t * dev, struct list_head *ring) +{ +	drm_fence_manager_t *fm = &dev->fm; +	unsigned long flags; + +	write_lock_irqsave(&fm->lock, flags); +	list_del_init(ring); +	write_unlock_irqrestore(&fm->lock, flags); +} + +void drm_fence_usage_deref_locked(drm_device_t * dev, +				  drm_fence_object_t * fence) +{ +	drm_fence_manager_t *fm = &dev->fm; + +	if (atomic_dec_and_test(&fence->usage)) { +		drm_fence_unring(dev, &fence->ring); +		DRM_DEBUG("Destroyed a fence object 0x%08lx\n", +			  fence->base.hash.key); +		atomic_dec(&fm->count); +		drm_ctl_cache_free(drm_cache.fence_object, sizeof(*fence), +				   fence); +	} +} + +void drm_fence_usage_deref_unlocked(drm_device_t * dev, +				    drm_fence_object_t * fence) +{ +	drm_fence_manager_t *fm = &dev->fm; + +	if (atomic_dec_and_test(&fence->usage)) { +		mutex_lock(&dev->struct_mutex); +		if (atomic_read(&fence->usage) == 0) { +			drm_fence_unring(dev, &fence->ring); +			atomic_dec(&fm->count); +			drm_ctl_cache_free(drm_cache.fence_object, +					   sizeof(*fence), fence); +		} +		mutex_unlock(&dev->struct_mutex); +	} +} + +static void drm_fence_object_destroy(drm_file_t * priv, +				     drm_user_object_t * base) +{ +	drm_device_t *dev = priv->head->dev; +	drm_fence_object_t *fence = +	    drm_user_object_entry(base, drm_fence_object_t, base); + +	drm_fence_usage_deref_locked(dev, fence); +} + +static int fence_signaled(drm_device_t * dev, volatile +			  drm_fence_object_t * fence, +			  uint32_t mask, int poke_flush) +{ +	unsigned long flags; +	int signaled; +	drm_fence_manager_t *fm = &dev->fm; +	drm_fence_driver_t *driver = dev->driver->fence_driver; + +	if (poke_flush) +		driver->poke_flush(dev); +	read_lock_irqsave(&fm->lock, flags); +	signaled = +	    (fence->type & mask & fence->signaled) == (fence->type & mask); +	read_unlock_irqrestore(&fm->lock, flags); + +	return signaled; +} + +static void drm_fence_flush_exe(drm_fence_manager_t * fm, +				drm_fence_driver_t * driver, uint32_t sequence) +{ +	uint32_t diff; + +	if (!fm->pending_exe_flush) { +		volatile struct list_head *list; + +		/* +		 * Last_exe_flush is invalid. Find oldest sequence. +		 */ + +/*		list = fm->fence_types[_DRM_FENCE_TYPE_EXE];*/ +		list = &fm->ring; +		if (list->next == &fm->ring) { +			return; +		} else { +			drm_fence_object_t *fence = +			    list_entry(list->next, drm_fence_object_t, ring); +			fm->last_exe_flush = (fence->sequence - 1) & +			    driver->sequence_mask; +		} +		diff = (sequence - fm->last_exe_flush) & driver->sequence_mask; +		if (diff >= driver->wrap_diff) +			return; +		fm->exe_flush_sequence = sequence; +		fm->pending_exe_flush = 1; +	} else { +		diff = +		    (sequence - fm->exe_flush_sequence) & driver->sequence_mask; +		if (diff < driver->wrap_diff) { +			fm->exe_flush_sequence = sequence; +		} +	} +} + +int drm_fence_object_signaled(volatile drm_fence_object_t * fence, +			      uint32_t type) +{ +	return ((fence->signaled & type) == type); +} + +int drm_fence_object_flush(drm_device_t * dev, +			   volatile drm_fence_object_t * fence, uint32_t type) +{ +	drm_fence_manager_t *fm = &dev->fm; +	drm_fence_driver_t *driver = dev->driver->fence_driver; +	unsigned long flags; + +	if (type & ~fence->type) { +		DRM_ERROR("Flush trying to extend fence type, " +			  "0x%x, 0x%x\n", type, fence->type); +		return -EINVAL; +	} + +	write_lock_irqsave(&fm->lock, flags); +	fence->flush_mask |= type; +	if (fence->submitted_flush == fence->signaled) { +		if ((fence->type & DRM_FENCE_TYPE_EXE) && +		    !(fence->submitted_flush & DRM_FENCE_TYPE_EXE)) { +			drm_fence_flush_exe(fm, driver, fence->sequence); +			fence->submitted_flush |= DRM_FENCE_TYPE_EXE; +		} else { +			fm->pending_flush |= (fence->flush_mask & +					      ~fence->submitted_flush); +			fence->submitted_flush = fence->flush_mask; +		} +	} +	write_unlock_irqrestore(&fm->lock, flags); +	driver->poke_flush(dev); +	return 0; +} + +/* + * Make sure old fence objects are signaled before their fence sequences are + * wrapped around and reused. + */ + +void drm_fence_flush_old(drm_device_t * dev, uint32_t sequence) +{ +	drm_fence_manager_t *fm = &dev->fm; +	drm_fence_driver_t *driver = dev->driver->fence_driver; +	uint32_t old_sequence; +	unsigned long flags; +	drm_fence_object_t *fence; +	uint32_t diff; + +	mutex_lock(&dev->struct_mutex); +	read_lock_irqsave(&fm->lock, flags); +	if (fm->ring.next == &fm->ring) { +		read_unlock_irqrestore(&fm->lock, flags); +		mutex_unlock(&dev->struct_mutex); +		return; +	} +	old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask; +	fence = list_entry(fm->ring.next, drm_fence_object_t, ring); +	atomic_inc(&fence->usage); +	mutex_unlock(&dev->struct_mutex); +	diff = (old_sequence - fence->sequence) & driver->sequence_mask; +	read_unlock_irqrestore(&fm->lock, flags); +	if (diff < driver->wrap_diff) { +		drm_fence_object_flush(dev, fence, fence->type); +	} +	drm_fence_usage_deref_unlocked(dev, fence); +} + +EXPORT_SYMBOL(drm_fence_flush_old); + +int drm_fence_object_wait(drm_device_t * dev, +			  volatile drm_fence_object_t * fence, +			  int lazy, int ignore_signals, uint32_t mask) +{ +	drm_fence_manager_t *fm = &dev->fm; +	drm_fence_driver_t *driver = dev->driver->fence_driver; +	int ret = 0; +	unsigned long _end; +	int signaled; + +	if (mask & ~fence->type) { +		DRM_ERROR("Wait trying to extend fence type" +			  " 0x%08x 0x%08x\n", mask, fence->type); +		return -EINVAL; +	} + +	if (fence_signaled(dev, fence, mask, 0)) +		return 0; + +	_end = jiffies + 3 * DRM_HZ; + +	drm_fence_object_flush(dev, fence, mask); + +	if (lazy && driver->lazy_capable) { + +		do { +			DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ, +				    fence_signaled(dev, fence, mask, 1)); +			if (time_after_eq(jiffies, _end)) +				break; +		} while (ret == -EINTR && ignore_signals); +		if (time_after_eq(jiffies, _end) && (ret != 0)) +			ret = -EBUSY; +		if (ret) { +			if (ret == -EBUSY) { +				DRM_ERROR("Fence timeout. " +					  "GPU lockup or fence driver was " +					  "taken down.\n"); +			} +			return ((ret == -EINTR) ? -EAGAIN : ret); +		} +	} else if ((fence->class == 0) && (mask & DRM_FENCE_TYPE_EXE) && +		   driver->lazy_capable) { + +		/* +		 * We use IRQ wait for EXE fence if available to gain  +		 * CPU in some cases. +		 */ + +		do { +			DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ, +				    fence_signaled(dev, fence, +						   DRM_FENCE_TYPE_EXE, 1)); +			if (time_after_eq(jiffies, _end)) +				break; +		} while (ret == -EINTR && ignore_signals); +		if (time_after_eq(jiffies, _end) && (ret != 0)) +			ret = -EBUSY; +		if (ret) +			return ((ret == -EINTR) ? -EAGAIN : ret); +	} + +	if (fence_signaled(dev, fence, mask, 0)) +		return 0; + +	/* +	 * Avoid kernel-space busy-waits. +	 */ +#if 1 +	if (!ignore_signals) +		return -EAGAIN; +#endif +	do { +		schedule(); +		signaled = fence_signaled(dev, fence, mask, 1); +	} while (!signaled && !time_after_eq(jiffies, _end)); + +	if (!signaled) +		return -EBUSY; + +	return 0; +} + +int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence, +			  uint32_t fence_flags, uint32_t type) +{ +	drm_fence_manager_t *fm = &dev->fm; +	drm_fence_driver_t *driver = dev->driver->fence_driver; +	unsigned long flags; +	uint32_t sequence; +	uint32_t native_type; +	int ret; + +	drm_fence_unring(dev, &fence->ring); +	ret = driver->emit(dev, fence_flags, &sequence, &native_type); +	if (ret) +		return ret; + +	write_lock_irqsave(&fm->lock, flags); +	fence->type = type; +	fence->flush_mask = 0x00; +	fence->submitted_flush = 0x00; +	fence->signaled = 0x00; +	fence->sequence = sequence; +	fence->native_type = native_type; +	list_add_tail(&fence->ring, &fm->ring); +	write_unlock_irqrestore(&fm->lock, flags); +	return 0; +} + +static int drm_fence_object_init(drm_device_t * dev, uint32_t type, +				 uint32_t fence_flags, +				 drm_fence_object_t * fence) +{ +	int ret = 0; +	unsigned long flags; +	drm_fence_manager_t *fm = &dev->fm; + +	mutex_lock(&dev->struct_mutex); +	atomic_set(&fence->usage, 1); +	mutex_unlock(&dev->struct_mutex); + +	write_lock_irqsave(&fm->lock, flags); +	INIT_LIST_HEAD(&fence->ring); +	fence->class = 0; +	fence->type = type; +	fence->flush_mask = 0; +	fence->submitted_flush = 0; +	fence->signaled = 0; +	fence->sequence = 0; +	write_unlock_irqrestore(&fm->lock, flags); +	if (fence_flags & DRM_FENCE_FLAG_EMIT) { +		ret = drm_fence_object_emit(dev, fence, fence_flags, type); +	} +	return ret; +} + +int drm_fence_add_user_object(drm_file_t * priv, drm_fence_object_t * fence, +			      int shareable) +{ +	drm_device_t *dev = priv->head->dev; +	int ret; + +	mutex_lock(&dev->struct_mutex); +	ret = drm_add_user_object(priv, &fence->base, shareable); +	mutex_unlock(&dev->struct_mutex); +	if (ret) +		return ret; +	fence->base.type = drm_fence_type; +	fence->base.remove = &drm_fence_object_destroy; +	DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key); +	return 0; +} + +EXPORT_SYMBOL(drm_fence_add_user_object); + +int drm_fence_object_create(drm_device_t * dev, uint32_t type, +			    unsigned flags, drm_fence_object_t ** c_fence) +{ +	drm_fence_object_t *fence; +	int ret; +	drm_fence_manager_t *fm = &dev->fm; + +	fence = drm_ctl_cache_alloc(drm_cache.fence_object, +				    sizeof(*fence), GFP_KERNEL); +	if (!fence) +		return -ENOMEM; +	ret = drm_fence_object_init(dev, type, flags, fence); +	if (ret) { +		drm_fence_usage_deref_unlocked(dev, fence); +		return ret; +	} +	*c_fence = fence; +	atomic_inc(&fm->count); + +	return 0; +} + +EXPORT_SYMBOL(drm_fence_object_create); + +void drm_fence_manager_init(drm_device_t * dev) +{ +	drm_fence_manager_t *fm = &dev->fm; +	drm_fence_driver_t *fed = dev->driver->fence_driver; +	int i; + +	fm->lock = RW_LOCK_UNLOCKED; +	write_lock(&fm->lock); +	INIT_LIST_HEAD(&fm->ring); +	fm->pending_flush = 0; +	DRM_INIT_WAITQUEUE(&fm->fence_queue); +	fm->initialized = 0; +	if (fed) { +		fm->initialized = 1; +		atomic_set(&fm->count, 0); +		for (i = 0; i < fed->no_types; ++i) { +			fm->fence_types[i] = &fm->ring; +		} +	} +	write_unlock(&fm->lock); +} + +void drm_fence_manager_takedown(drm_device_t * dev) +{ +} + +drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle) +{ +	drm_device_t *dev = priv->head->dev; +	drm_user_object_t *uo; +	drm_fence_object_t *fence; + +	mutex_lock(&dev->struct_mutex); +	uo = drm_lookup_user_object(priv, handle); +	if (!uo || (uo->type != drm_fence_type)) { +		mutex_unlock(&dev->struct_mutex); +		return NULL; +	} +	fence = drm_user_object_entry(uo, drm_fence_object_t, base); +	atomic_inc(&fence->usage); +	mutex_unlock(&dev->struct_mutex); +	return fence; +} + +int drm_fence_ioctl(DRM_IOCTL_ARGS) +{ +	DRM_DEVICE; +	int ret; +	drm_fence_manager_t *fm = &dev->fm; +	drm_fence_arg_t arg; +	drm_fence_object_t *fence; +	drm_user_object_t *uo; +	unsigned long flags; +	ret = 0; + +	if (!fm->initialized) { +		DRM_ERROR("The DRM driver does not support fencing.\n"); +		return -EINVAL; +	} + +	DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); +	switch (arg.op) { +	case drm_fence_create: +		if (arg.flags & DRM_FENCE_FLAG_EMIT) +			LOCK_TEST_WITH_RETURN(dev, filp); +		ret = drm_fence_object_create(dev, arg.type, arg.flags, &fence); +		if (ret) +			return ret; +		ret = drm_fence_add_user_object(priv, fence, +						arg.flags & +						DRM_FENCE_FLAG_SHAREABLE); +		if (ret) { +			drm_fence_usage_deref_unlocked(dev, fence); +			return ret; +		} + +		/* +		 * usage > 0. No need to lock dev->struct_mutex; +		 */ + +		atomic_inc(&fence->usage); +		arg.handle = fence->base.hash.key; +		break; +	case drm_fence_destroy: +		mutex_lock(&dev->struct_mutex); +		uo = drm_lookup_user_object(priv, arg.handle); +		if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) { +			mutex_unlock(&dev->struct_mutex); +			return -EINVAL; +		} +		ret = drm_remove_user_object(priv, uo); +		mutex_unlock(&dev->struct_mutex); +		return ret; +	case drm_fence_reference: +		ret = +		    drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo); +		if (ret) +			return ret; +		fence = drm_lookup_fence_object(priv, arg.handle); +		break; +	case drm_fence_unreference: +		ret = drm_user_object_unref(priv, arg.handle, drm_fence_type); +		return ret; +	case drm_fence_signaled: +		fence = drm_lookup_fence_object(priv, arg.handle); +		if (!fence) +			return -EINVAL; +		break; +	case drm_fence_flush: +		fence = drm_lookup_fence_object(priv, arg.handle); +		if (!fence) +			return -EINVAL; +		ret = drm_fence_object_flush(dev, fence, arg.type); +		break; +	case drm_fence_wait: +		fence = drm_lookup_fence_object(priv, arg.handle); +		if (!fence) +			return -EINVAL; +		ret = +		    drm_fence_object_wait(dev, fence, +					  arg.flags & DRM_FENCE_FLAG_WAIT_LAZY, +					  0, arg.type); +		break; +	case drm_fence_emit: +		LOCK_TEST_WITH_RETURN(dev, filp); +		fence = drm_lookup_fence_object(priv, arg.handle); +		if (!fence) +			return -EINVAL; +		ret = drm_fence_object_emit(dev, fence, arg.flags, arg.type); +		break; +	case drm_fence_buffers: +		if (!dev->bm.initialized) { +			DRM_ERROR("Buffer object manager is not initialized\n"); +			return -EINVAL; +		} +		LOCK_TEST_WITH_RETURN(dev, filp); +		ret = drm_fence_buffer_objects(priv, NULL, arg.flags, +					       NULL, &fence); +		if (ret) +			return ret; +		ret = drm_fence_add_user_object(priv, fence, +						arg.flags & +						DRM_FENCE_FLAG_SHAREABLE); +		if (ret) +			return ret; +		atomic_inc(&fence->usage); +		arg.handle = fence->base.hash.key; +		break; +	default: +		return -EINVAL; +	} +	read_lock_irqsave(&fm->lock, flags); +	arg.class = fence->class; +	arg.type = fence->type; +	arg.signaled = fence->signaled; +	read_unlock_irqrestore(&fm->lock, flags); +	drm_fence_usage_deref_unlocked(dev, fence); + +	DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); +	return ret; +} diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c index 48c77545..b60ced34 100644 --- a/linux-core/drm_fops.c +++ b/linux-core/drm_fops.c @@ -47,6 +47,7 @@ static int drm_setup(drm_device_t * dev)  	int i;  	int ret; +  	if (dev->driver->firstopen) {  		ret = dev->driver->firstopen(dev);  		if (ret != 0) @@ -56,6 +57,7 @@ static int drm_setup(drm_device_t * dev)  	dev->magicfree.next = NULL;  	/* prebuild the SAREA */ +  	i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, _DRM_CONTAINS_LOCK, &map);  	if (i != 0)  		return i; @@ -156,6 +158,12 @@ int drm_open(struct inode *inode, struct file *filp)  		}  		spin_unlock(&dev->count_lock);  	} +	mutex_lock(&dev->struct_mutex); +	BUG_ON((dev->dev_mapping != NULL) &&  +	       (dev->dev_mapping != inode->i_mapping)); +	if (dev->dev_mapping == NULL) +		dev->dev_mapping = inode->i_mapping; +	mutex_unlock(&dev->struct_mutex);  	return retcode;  } @@ -233,6 +241,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,  	int minor = iminor(inode);  	drm_file_t *priv;  	int ret; +	int i,j;  	if (filp->f_flags & O_EXCL)  		return -EBUSY;	/* No exclusive opens */ @@ -256,6 +265,22 @@ static int drm_open_helper(struct inode *inode, struct file *filp,  	priv->authenticated = capable(CAP_SYS_ADMIN);  	priv->lock_count = 0; +	INIT_LIST_HEAD(&priv->user_objects); +	INIT_LIST_HEAD(&priv->refd_objects); + +	for (i=0; i<_DRM_NO_REF_TYPES; ++i) { +		ret = drm_ht_create(&priv->refd_object_hash[i], DRM_FILE_HASH_ORDER); +		if (ret) +			break; +	} + +	if (ret) { +		for(j=0; j<i; ++j) { +			drm_ht_remove(&priv->refd_object_hash[j]); +		} +		goto out_free; +	} +  	if (dev->driver->open) {  		ret = dev->driver->open(dev, priv);  		if (ret < 0) @@ -320,6 +345,53 @@ int drm_fasync(int fd, struct file *filp, int on)  }  EXPORT_SYMBOL(drm_fasync); +static void drm_object_release(struct file *filp) { + +        drm_file_t *priv = filp->private_data; +	struct list_head *head; +	drm_user_object_t *user_object; +	drm_ref_object_t *ref_object; +	int i; + +	/* +	 * Free leftover ref objects created by me. Note that we cannot use +	 * list_for_each() here, as the struct_mutex may be temporarily released  +	 * by the remove_() functions, and thus the lists may be altered. +	 * Also, a drm_remove_ref_object() will not remove it +	 * from the list unless its refcount is 1. +	 */ + +	head = &priv->refd_objects;  +	while (head->next != head) { +		ref_object = list_entry(head->next, drm_ref_object_t, list); +		drm_remove_ref_object(priv, ref_object);		 +		head = &priv->refd_objects;  +	} +		 +	/* +	 * Free leftover user objects created by me. +	 */ + +	head = &priv->user_objects;  +	while (head->next != head) { +		user_object = list_entry(head->next, drm_user_object_t, list); +		drm_remove_user_object(priv, user_object);		 +		head = &priv->user_objects;  +	} + + + + +	for(i=0; i<_DRM_NO_REF_TYPES; ++i) { +		drm_ht_remove(&priv->refd_object_hash[i]); +	} +}			 +		 + + + + +  /**   * Release file.   * @@ -354,58 +426,43 @@ int drm_release(struct inode *inode, struct file *filp)  		  current->pid, (long)old_encode_dev(priv->head->device),  		  dev->open_count); -	if (priv->lock_count && dev->lock.hw_lock && -	    _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && -	    dev->lock.filp == filp) { -		DRM_DEBUG("File %p released, freeing lock for context %d\n", -			  filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); - -		if (dev->driver->reclaim_buffers_locked) -			dev->driver->reclaim_buffers_locked(dev, filp); +	if (dev->driver->reclaim_buffers_locked) { +	        unsigned long _end = jiffies + DRM_HZ*3; -		drm_lock_free(dev, &dev->lock.hw_lock->lock, -			      _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); +		do { +			retcode = drm_kernel_take_hw_lock(filp); +		} while(retcode && !time_after_eq(jiffies,_end)); -		/* FIXME: may require heavy-handed reset of -		   hardware at this point, possibly -		   processed via a callback to the X -		   server. */ -	} else if (dev->driver->reclaim_buffers_locked && priv->lock_count -		   && dev->lock.hw_lock) { -		/* The lock is required to reclaim buffers */ -		DECLARE_WAITQUEUE(entry, current); - -		add_wait_queue(&dev->lock.lock_queue, &entry); -		for (;;) { -			__set_current_state(TASK_INTERRUPTIBLE); -			if (!dev->lock.hw_lock) { -				/* Device has been unregistered */ -				retcode = -EINTR; -				break; -			} -			if (drm_lock_take(&dev->lock.hw_lock->lock, -					  DRM_KERNEL_CONTEXT)) { -				dev->lock.filp = filp; -				dev->lock.lock_time = jiffies; -				atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); -				break;	/* Got lock */ -			} -			/* Contention */ -			schedule(); -			if (signal_pending(current)) { -				retcode = -ERESTARTSYS; -				break; -			} -		} -		__set_current_state(TASK_RUNNING); -		remove_wait_queue(&dev->lock.lock_queue, &entry);  		if (!retcode) {  			dev->driver->reclaim_buffers_locked(dev, filp); +  			drm_lock_free(dev, &dev->lock.hw_lock->lock, -				      DRM_KERNEL_CONTEXT); +				      _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); +		} else { + +			/* +			 * FIXME: This is not a good solution. We should perhaps associate the +			 * DRM lock with a process context, and check whether the current process +			 * holds the lock. Then we can run reclaim buffers locked anyway. +			 */ + +			DRM_ERROR("Reclaim buffers locked deadlock.\n"); +			DRM_ERROR("This is probably a single thread having multiple\n"); +			DRM_ERROR("DRM file descriptors open either dying or " +				  "closing file descriptors\n"); +			DRM_ERROR("while having the lock. I will not reclaim buffers.\n"); +			DRM_ERROR("Locking context is 0x%08x\n", +				  _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));  		} +	} else if (drm_i_have_hw_lock(filp)) { +		DRM_DEBUG("File %p released, freeing lock for context %d\n", +			  filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); + +		drm_lock_free(dev, &dev->lock.hw_lock->lock, +			      _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));  	} +  	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&  	    !dev->driver->reclaim_buffers_locked) {  		dev->driver->reclaim_buffers(dev, filp); @@ -414,6 +471,7 @@ int drm_release(struct inode *inode, struct file *filp)  	drm_fasync(-1, filp, 0);  	mutex_lock(&dev->ctxlist_mutex); +  	if (dev->ctxlist && (!list_empty(&dev->ctxlist->head))) {  		drm_ctx_list_t *pos, *n; @@ -435,6 +493,7 @@ int drm_release(struct inode *inode, struct file *filp)  	mutex_unlock(&dev->ctxlist_mutex);  	mutex_lock(&dev->struct_mutex); +	drm_object_release(filp);  	if (priv->remove_auth_on_close == 1) {  		drm_file_t *temp = dev->file_first;  		while (temp) { diff --git a/linux-core/drm_hashtab.c b/linux-core/drm_hashtab.c index a0b2d680..6f17e114 100644 --- a/linux-core/drm_hashtab.c +++ b/linux-core/drm_hashtab.c @@ -36,25 +36,34 @@  #include "drm_hashtab.h"  #include <linux/hash.h> -int drm_ht_create(drm_open_hash_t *ht, unsigned int order) +int drm_ht_create(drm_open_hash_t * ht, unsigned int order)  {  	unsigned int i;  	ht->size = 1 << order;  	ht->order = order;  	ht->fill = 0; -	ht->table = vmalloc(ht->size*sizeof(*ht->table)); +	ht->table = NULL; +	ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE); +	if (!ht->use_vmalloc) { +		ht->table = drm_calloc(ht->size, sizeof(*ht->table), +				       DRM_MEM_HASHTAB); +	} +	if (!ht->table) { +		ht->use_vmalloc = 1; +		ht->table = vmalloc(ht->size * sizeof(*ht->table)); +	}  	if (!ht->table) {  		DRM_ERROR("Out of memory for hash table\n");  		return -ENOMEM;  	} -	for (i=0; i< ht->size; ++i) { +	for (i = 0; i < ht->size; ++i) {  		INIT_HLIST_HEAD(&ht->table[i]);  	}  	return 0;  } -void drm_ht_verbose_list(drm_open_hash_t *ht, unsigned long key) +void drm_ht_verbose_list(drm_open_hash_t * ht, unsigned long key)  {  	drm_hash_item_t *entry;  	struct hlist_head *h_list; @@ -71,7 +80,7 @@ void drm_ht_verbose_list(drm_open_hash_t *ht, unsigned long key)  	}  } -static struct hlist_node *drm_ht_find_key(drm_open_hash_t *ht,  +static struct hlist_node *drm_ht_find_key(drm_open_hash_t * ht,  					  unsigned long key)  {  	drm_hash_item_t *entry; @@ -91,8 +100,7 @@ static struct hlist_node *drm_ht_find_key(drm_open_hash_t *ht,  	return NULL;  } - -int drm_ht_insert_item(drm_open_hash_t *ht, drm_hash_item_t *item) +int drm_ht_insert_item(drm_open_hash_t * ht, drm_hash_item_t * item)  {  	drm_hash_item_t *entry;  	struct hlist_head *h_list; @@ -123,7 +131,7 @@ int drm_ht_insert_item(drm_open_hash_t *ht, drm_hash_item_t *item)   * Just insert an item and return any "bits" bit key that hasn't been    * used before.   */ -int drm_ht_just_insert_please(drm_open_hash_t *ht, drm_hash_item_t *item, +int drm_ht_just_insert_please(drm_open_hash_t * ht, drm_hash_item_t * item,  			      unsigned long seed, int bits, int shift,  			      unsigned long add)  { @@ -138,7 +146,7 @@ int drm_ht_just_insert_please(drm_open_hash_t *ht, drm_hash_item_t *item,  		ret = drm_ht_insert_item(ht, item);  		if (ret)  			unshifted_key = (unshifted_key + 1) & mask; -	} while(ret && (unshifted_key != first)); +	} while (ret && (unshifted_key != first));  	if (ret) {  		DRM_ERROR("Available key bit space exhausted\n"); @@ -147,8 +155,8 @@ int drm_ht_just_insert_please(drm_open_hash_t *ht, drm_hash_item_t *item,  	return 0;  } -int drm_ht_find_item(drm_open_hash_t *ht, unsigned long key, -		     drm_hash_item_t **item) +int drm_ht_find_item(drm_open_hash_t * ht, unsigned long key, +		     drm_hash_item_t ** item)  {  	struct hlist_node *list; @@ -160,7 +168,7 @@ int drm_ht_find_item(drm_open_hash_t *ht, unsigned long key,  	return 0;  } -int drm_ht_remove_key(drm_open_hash_t *ht, unsigned long key) +int drm_ht_remove_key(drm_open_hash_t * ht, unsigned long key)  {  	struct hlist_node *list; @@ -173,18 +181,21 @@ int drm_ht_remove_key(drm_open_hash_t *ht, unsigned long key)  	return -EINVAL;  } -int drm_ht_remove_item(drm_open_hash_t *ht, drm_hash_item_t *item) +int drm_ht_remove_item(drm_open_hash_t * ht, drm_hash_item_t * item)  {  	hlist_del_init(&item->head);  	ht->fill--;  	return 0;  } -void drm_ht_remove(drm_open_hash_t *ht) +void drm_ht_remove(drm_open_hash_t * ht)  {  	if (ht->table) { -		vfree(ht->table); +		if (ht->use_vmalloc) +			vfree(ht->table); +		else +			drm_free(ht->table, ht->size * sizeof(*ht->table), +				 DRM_MEM_HASHTAB);  		ht->table = NULL;  	}  } - diff --git a/linux-core/drm_hashtab.h b/linux-core/drm_hashtab.h index 40afec05..613091c9 100644 --- a/linux-core/drm_hashtab.h +++ b/linux-core/drm_hashtab.h @@ -47,6 +47,7 @@ typedef struct drm_open_hash{  	unsigned int order;  	unsigned int fill;  	struct hlist_head *table; +	int use_vmalloc;  } drm_open_hash_t; diff --git a/linux-core/drm_irq.c b/linux-core/drm_irq.c index 4d8e4a25..c365c08e 100644 --- a/linux-core/drm_irq.c +++ b/linux-core/drm_irq.c @@ -118,6 +118,7 @@ static int drm_irq_install(drm_device_t * dev)  		init_waitqueue_head(&dev->vbl_queue);  		spin_lock_init(&dev->vbl_lock); +		spin_lock_init(&dev->tasklet_lock);  		INIT_LIST_HEAD(&dev->vbl_sigs.head);  		INIT_LIST_HEAD(&dev->vbl_sigs2.head); diff --git a/linux-core/drm_lock.c b/linux-core/drm_lock.c index d1b85a15..d11c570e 100644 --- a/linux-core/drm_lock.c +++ b/linux-core/drm_lock.c @@ -35,9 +35,12 @@  #include "drmP.h" +#if 0  static int drm_lock_transfer(drm_device_t * dev,  			     __volatile__ unsigned int *lock,  			     unsigned int context); +#endif +  static int drm_notifier(void *priv);  /** @@ -181,12 +184,9 @@ int drm_unlock(struct inode *inode, struct file *filp,  	if (dev->driver->kernel_context_switch_unlock)  		dev->driver->kernel_context_switch_unlock(dev);  	else { -		drm_lock_transfer(dev, &dev->lock.hw_lock->lock, -				  DRM_KERNEL_CONTEXT); -  		if (drm_lock_free(dev, &dev->lock.hw_lock->lock, -				  DRM_KERNEL_CONTEXT)) { -			DRM_ERROR("\n"); +				  lock.context)) { +			/* FIXME: Should really bail out here. */  		}  	} @@ -212,7 +212,7 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)  		if (old & _DRM_LOCK_HELD)  			new = old | _DRM_LOCK_CONT;  		else -			new = context | _DRM_LOCK_HELD; +			new = context | _DRM_LOCK_HELD | _DRM_LOCK_CONT;  		prev = cmpxchg(lock, old, new);  	} while (prev != old);  	if (_DRM_LOCKING_CONTEXT(old) == context) { @@ -224,13 +224,14 @@ int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)  			return 0;  		}  	} -	if (new == (context | _DRM_LOCK_HELD)) { +	if (new == (context | _DRM_LOCK_HELD | _DRM_LOCK_CONT)) {  		/* Have lock */  		return 1;  	}  	return 0;  } +#if 0  /**   * This takes a lock forcibly and hands it to context.	Should ONLY be used   * inside *_unlock to give lock to kernel before calling *_dma_schedule. @@ -257,6 +258,7 @@ static int drm_lock_transfer(drm_device_t * dev,  	} while (prev != old);  	return 1;  } +#endif  /**   * Free lock. @@ -274,12 +276,12 @@ int drm_lock_free(drm_device_t * dev,  {  	unsigned int old, new, prev; -	dev->lock.filp = NULL;  	do {  		old = *lock; -		new = 0; +		new = _DRM_LOCKING_CONTEXT(old);  		prev = cmpxchg(lock, old, new);  	} while (prev != old); +  	if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {  		DRM_ERROR("%d freed heavyweight lock held by %d\n",  			  context, _DRM_LOCKING_CONTEXT(old)); @@ -319,3 +321,66 @@ static int drm_notifier(void *priv)  	} while (prev != old);  	return 0;  } + +/* + * Can be used by drivers to take the hardware lock if necessary. + * (Waiting for idle before reclaiming buffers etc.) + */ + +int drm_i_have_hw_lock(struct file *filp) +{ +	DRM_DEVICE; + +	return (priv->lock_count && dev->lock.hw_lock && +		_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && +		dev->lock.filp == filp); +} + +EXPORT_SYMBOL(drm_i_have_hw_lock); + +int drm_kernel_take_hw_lock(struct file *filp) +{ +	DRM_DEVICE; + +	int ret = 0;  +	unsigned long _end = jiffies + 3*DRM_HZ; +	 +	if (!drm_i_have_hw_lock(filp)) { +	 +		DECLARE_WAITQUEUE(entry, current); + +		add_wait_queue(&dev->lock.lock_queue, &entry); +		for (;;) { +			__set_current_state(TASK_INTERRUPTIBLE); +			if (!dev->lock.hw_lock) { +				/* Device has been unregistered */ +				ret = -EINTR; +				break; +			} +			if (drm_lock_take(&dev->lock.hw_lock->lock, +					  DRM_KERNEL_CONTEXT)) { +				dev->lock.filp = filp; +				dev->lock.lock_time = jiffies; +				atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); +				break;	/* Got lock */ +			} +			/* Contention */ +			if (time_after_eq(jiffies,_end)) { +			        ret = -EBUSY; +				break; +			} + +			schedule_timeout(1); +			if (signal_pending(current)) { +				ret = -ERESTARTSYS; +				break; +			} +		} +		__set_current_state(TASK_RUNNING); +		remove_wait_queue(&dev->lock.lock_queue, &entry); +	} +	return ret; +} + +EXPORT_SYMBOL(drm_kernel_take_hw_lock); + diff --git a/linux-core/drm_memory.c b/linux-core/drm_memory.c index a249382d..3370c279 100644 --- a/linux-core/drm_memory.c +++ b/linux-core/drm_memory.c @@ -36,6 +36,75 @@  #include <linux/highmem.h>  #include "drmP.h" +static struct { +	spinlock_t lock; +	drm_u64_t cur_used; +	drm_u64_t low_threshold; +	drm_u64_t high_threshold; +} drm_memctl = { +	.lock = SPIN_LOCK_UNLOCKED +}; + +static inline size_t drm_size_align(size_t size) { + +	register size_t tmpSize = 4; +	if (size > PAGE_SIZE) +		return PAGE_ALIGN(size); + +	while(tmpSize < size) +		tmpSize <<= 1; + +	return (size_t) tmpSize; +} + +int drm_alloc_memctl(size_t size) +{ +	int ret; +	unsigned long a_size = drm_size_align(size); +  +	spin_lock(&drm_memctl.lock); +	ret = ((drm_memctl.cur_used + a_size) > drm_memctl.high_threshold) ?  +		-ENOMEM : 0; +	if (!ret)  +		drm_memctl.cur_used += a_size; +	spin_unlock(&drm_memctl.lock); +	return ret; +} +EXPORT_SYMBOL(drm_alloc_memctl); +	 +void drm_free_memctl(size_t size) +{ +	unsigned long a_size = drm_size_align(size); + +	spin_lock(&drm_memctl.lock); +	drm_memctl.cur_used -= a_size; +	spin_unlock(&drm_memctl.lock); +} +EXPORT_SYMBOL(drm_free_memctl); + +void drm_query_memctl(drm_u64_t *cur_used, +		      drm_u64_t *low_threshold, +		      drm_u64_t *high_threshold)  +{ +	spin_lock(&drm_memctl.lock); +	*cur_used = drm_memctl.cur_used; +	*low_threshold = drm_memctl.low_threshold; +	*high_threshold = drm_memctl.high_threshold; +	spin_unlock(&drm_memctl.lock); +}	 +EXPORT_SYMBOL(drm_query_memctl); + +void drm_init_memctl(size_t p_low_threshold, +		     size_t p_high_threshold) +{ +	spin_lock(&drm_memctl.lock); +	drm_memctl.cur_used = 0; +	drm_memctl.low_threshold = p_low_threshold << PAGE_SHIFT; +	drm_memctl.high_threshold = p_high_threshold << PAGE_SHIFT; +	spin_unlock(&drm_memctl.lock); +} + +  #ifndef DEBUG_MEMORY  /** No-op. */ diff --git a/linux-core/drm_mm.c b/linux-core/drm_mm.c index 617526bd..4af33bde 100644 --- a/linux-core/drm_mm.c +++ b/linux-core/drm_mm.c @@ -42,6 +42,7 @@   */  #include "drmP.h" +#include <linux/slab.h>  drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,  				unsigned long size, unsigned alignment) @@ -57,7 +58,10 @@ drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,  		parent->free = 0;  		return parent;  	} else { -		child = (drm_mm_node_t *) drm_alloc(sizeof(*child), DRM_MEM_MM); + +		child = (drm_mm_node_t *) +		    drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child), +					GFP_KERNEL);  		if (!child)  			return NULL; @@ -67,6 +71,7 @@ drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,  		child->free = 0;  		child->size = size;  		child->start = parent->start; +		child->mm = parent->mm;  		list_add_tail(&child->ml_entry, &parent->ml_entry);  		parent->size -= size; @@ -80,9 +85,10 @@ drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,   * Otherwise add to the free stack.   */ -void drm_mm_put_block(drm_mm_t * mm, drm_mm_node_t * cur) +void drm_mm_put_block(drm_mm_node_t * cur)  { +	drm_mm_t *mm = cur->mm;  	drm_mm_node_t *list_root = &mm->root_node;  	struct list_head *cur_head = &cur->ml_entry;  	struct list_head *root_head = &list_root->ml_entry; @@ -105,8 +111,9 @@ void drm_mm_put_block(drm_mm_t * mm, drm_mm_node_t * cur)  				prev_node->size += next_node->size;  				list_del(&next_node->ml_entry);  				list_del(&next_node->fl_entry); -				drm_free(next_node, sizeof(*next_node), -					 DRM_MEM_MM); +				drm_ctl_cache_free(drm_cache.mm, +						   sizeof(*next_node), +						   next_node);  			} else {  				next_node->size += cur->size;  				next_node->start = cur->start; @@ -119,7 +126,7 @@ void drm_mm_put_block(drm_mm_t * mm, drm_mm_node_t * cur)  		list_add(&cur->fl_entry, &list_root->fl_entry);  	} else {  		list_del(&cur->ml_entry); -		drm_free(cur, sizeof(*cur), DRM_MEM_MM); +		drm_ctl_cache_free(drm_cache.mm, sizeof(*cur), cur);  	}  } @@ -154,13 +161,23 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm,  	return best;  } +int drm_mm_clean(drm_mm_t * mm) +{ +	struct list_head *head = &mm->root_node.ml_entry; + +	return (head->next->next == head); +} +  int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size)  {  	drm_mm_node_t *child;  	INIT_LIST_HEAD(&mm->root_node.ml_entry);  	INIT_LIST_HEAD(&mm->root_node.fl_entry); -	child = (drm_mm_node_t *) drm_alloc(sizeof(*child), DRM_MEM_MM); + +	child = (drm_mm_node_t *) +	    drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child), GFP_KERNEL); +  	if (!child)  		return -ENOMEM; @@ -170,6 +187,7 @@ int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size)  	child->start = start;  	child->size = size;  	child->free = 1; +	child->mm = mm;  	list_add(&child->fl_entry, &mm->root_node.fl_entry);  	list_add(&child->ml_entry, &mm->root_node.ml_entry); @@ -194,8 +212,7 @@ void drm_mm_takedown(drm_mm_t * mm)  	list_del(&entry->fl_entry);  	list_del(&entry->ml_entry); - -	drm_free(entry, sizeof(*entry), DRM_MEM_MM); +	drm_ctl_cache_free(drm_cache.mm, sizeof(*entry), entry);  }  EXPORT_SYMBOL(drm_mm_takedown); diff --git a/linux-core/drm_object.c b/linux-core/drm_object.c new file mode 100644 index 00000000..0157329c --- /dev/null +++ b/linux-core/drm_object.c @@ -0,0 +1,287 @@ +/************************************************************************** + *  + * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA + * All Rights Reserved. + *  + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + *  + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR  + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE  + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + *  + *  + **************************************************************************/ + +#include "drmP.h" + +int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item, +			int shareable) +{ +	drm_device_t *dev = priv->head->dev; +	int ret; + +	atomic_set(&item->refcount, 1); +	item->shareable = shareable; +	item->owner = priv; + +	ret = drm_ht_just_insert_please(&dev->object_hash, &item->hash, +					(unsigned long)item, 32, 0, 0); +	if (ret) +		return ret; + +	list_add_tail(&item->list, &priv->user_objects); +	return 0; +} + +drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, uint32_t key) +{ +	drm_device_t *dev = priv->head->dev; +	drm_hash_item_t *hash; +	int ret; +	drm_user_object_t *item; + +	ret = drm_ht_find_item(&dev->object_hash, key, &hash); +	if (ret) { +		return NULL; +	} +	item = drm_hash_entry(hash, drm_user_object_t, hash); + +	if (priv != item->owner) { +		drm_open_hash_t *ht = &priv->refd_object_hash[_DRM_REF_USE]; +		ret = drm_ht_find_item(ht, (unsigned long)item, &hash); +		if (ret) { +			DRM_ERROR("Object not registered for usage\n"); +			return NULL; +		} +	} +	return item; +} + +static void drm_deref_user_object(drm_file_t * priv, drm_user_object_t * item) +{ +	drm_device_t *dev = priv->head->dev; +	int ret; + +	if (atomic_dec_and_test(&item->refcount)) { +		ret = drm_ht_remove_item(&dev->object_hash, &item->hash); +		BUG_ON(ret); +		list_del_init(&item->list); +		item->remove(priv, item); +	} +} + +int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item) +{ +	if (item->owner != priv) { +		DRM_ERROR("Cannot destroy object not owned by you.\n"); +		return -EINVAL; +	} +	item->owner = 0; +	item->shareable = 0; +	list_del_init(&item->list); +	drm_deref_user_object(priv, item); +	return 0; +} + +static int drm_object_ref_action(drm_file_t * priv, drm_user_object_t * ro, +				 drm_ref_t action) +{ +	int ret = 0; + +	switch (action) { +	case _DRM_REF_USE: +		atomic_inc(&ro->refcount); +		break; +	default: +		if (!ro->ref_struct_locked) { +			break; +		} else { +			ro->ref_struct_locked(priv, ro, action); +		} +	} +	return ret; +} + +int drm_add_ref_object(drm_file_t * priv, drm_user_object_t * referenced_object, +		       drm_ref_t ref_action) +{ +	int ret = 0; +	drm_ref_object_t *item; +	drm_open_hash_t *ht = &priv->refd_object_hash[ref_action]; + +	if (!referenced_object->shareable && priv != referenced_object->owner) { +		DRM_ERROR("Not allowed to reference this object\n"); +		return -EINVAL; +	} + +	/* +	 * If this is not a usage reference, Check that usage has been registered +	 * first. Otherwise strange things may happen on destruction. +	 */ + +	if ((ref_action != _DRM_REF_USE) && priv != referenced_object->owner) { +		item = +		    drm_lookup_ref_object(priv, referenced_object, +					  _DRM_REF_USE); +		if (!item) { +			DRM_ERROR +			    ("Object not registered for usage by this client\n"); +			return -EINVAL; +		} +	} + +	if (NULL != +	    (item = +	     drm_lookup_ref_object(priv, referenced_object, ref_action))) { +		atomic_inc(&item->refcount); +		return drm_object_ref_action(priv, referenced_object, +					     ref_action); +	} + +	item = drm_ctl_calloc(1, sizeof(*item), DRM_MEM_OBJECTS); +	if (item == NULL) { +		DRM_ERROR("Could not allocate reference object\n"); +		return -ENOMEM; +	} + +	atomic_set(&item->refcount, 1); +	item->hash.key = (unsigned long)referenced_object; +	ret = drm_ht_insert_item(ht, &item->hash); +	item->unref_action = ref_action; + +	if (ret) +		goto out; + +	list_add(&item->list, &priv->refd_objects); +	ret = drm_object_ref_action(priv, referenced_object, ref_action); +      out: +	return ret; +} + +drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv, +					drm_user_object_t * referenced_object, +					drm_ref_t ref_action) +{ +	drm_hash_item_t *hash; +	int ret; + +	ret = drm_ht_find_item(&priv->refd_object_hash[ref_action], +			       (unsigned long)referenced_object, &hash); +	if (ret) +		return NULL; + +	return drm_hash_entry(hash, drm_ref_object_t, hash); +} + +static void drm_remove_other_references(drm_file_t * priv, +					drm_user_object_t * ro) +{ +	int i; +	drm_open_hash_t *ht; +	drm_hash_item_t *hash; +	drm_ref_object_t *item; + +	for (i = _DRM_REF_USE + 1; i < _DRM_NO_REF_TYPES; ++i) { +		ht = &priv->refd_object_hash[i]; +		while (!drm_ht_find_item(ht, (unsigned long)ro, &hash)) { +			item = drm_hash_entry(hash, drm_ref_object_t, hash); +			drm_remove_ref_object(priv, item); +		} +	} +} + +void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item) +{ +	int ret; +	drm_user_object_t *user_object = (drm_user_object_t *) item->hash.key; +	drm_open_hash_t *ht = &priv->refd_object_hash[item->unref_action]; +	drm_ref_t unref_action; + +	unref_action = item->unref_action; +	if (atomic_dec_and_test(&item->refcount)) { +		ret = drm_ht_remove_item(ht, &item->hash); +		BUG_ON(ret); +		list_del_init(&item->list); +		if (unref_action == _DRM_REF_USE) +			drm_remove_other_references(priv, user_object); +		drm_ctl_free(item, sizeof(*item), DRM_MEM_OBJECTS); +	} + +	switch (unref_action) { +	case _DRM_REF_USE: +		drm_deref_user_object(priv, user_object); +		break; +	default: +		BUG_ON(!user_object->unref); +		user_object->unref(priv, user_object, unref_action); +		break; +	} + +} + +int drm_user_object_ref(drm_file_t * priv, uint32_t user_token, +			drm_object_type_t type, drm_user_object_t ** object) +{ +	drm_device_t *dev = priv->head->dev; +	drm_user_object_t *uo; +	int ret; + +	mutex_lock(&dev->struct_mutex); +	uo = drm_lookup_user_object(priv, user_token); +	if (!uo || (uo->type != type)) { +		ret = -EINVAL; +		goto out_err; +	} +	ret = drm_add_ref_object(priv, uo, _DRM_REF_USE); +	if (ret) +		goto out_err; +	mutex_unlock(&dev->struct_mutex); +	*object = uo; +	DRM_ERROR("Referenced an object\n"); +	return 0; +      out_err: +	mutex_unlock(&dev->struct_mutex); +	return ret; +} + +int drm_user_object_unref(drm_file_t * priv, uint32_t user_token, +			  drm_object_type_t type) +{ +	drm_device_t *dev = priv->head->dev; +	drm_user_object_t *uo; +	drm_ref_object_t *ro; +	int ret; + +	mutex_lock(&dev->struct_mutex); +	uo = drm_lookup_user_object(priv, user_token); +	if (!uo || (uo->type != type)) { +		ret = -EINVAL; +		goto out_err; +	} +	ro = drm_lookup_ref_object(priv, uo, _DRM_REF_USE); +	if (!ro) { +		ret = -EINVAL; +		goto out_err; +	} +	drm_remove_ref_object(priv, ro); +	mutex_unlock(&dev->struct_mutex); +	DRM_ERROR("Unreferenced an object\n"); +	return 0; +      out_err: +	mutex_unlock(&dev->struct_mutex); +	return ret; +} diff --git a/linux-core/drm_proc.c b/linux-core/drm_proc.c index 512a8f75..863cacfc 100644 --- a/linux-core/drm_proc.c +++ b/linux-core/drm_proc.c @@ -49,6 +49,8 @@ static int drm_queues_info(char *buf, char **start, off_t offset,  			   int request, int *eof, void *data);  static int drm_bufs_info(char *buf, char **start, off_t offset,  			 int request, int *eof, void *data); +static int drm_objects_info(char *buf, char **start, off_t offset, +			 int request, int *eof, void *data);  #if DRM_DEBUG_CODE  static int drm_vma_info(char *buf, char **start, off_t offset,  			int request, int *eof, void *data); @@ -67,6 +69,7 @@ static struct drm_proc_list {  	{"clients", drm_clients_info},  	{"queues", drm_queues_info},  	{"bufs", drm_bufs_info}, +	{"objects", drm_objects_info},  #if DRM_DEBUG_CODE  	{"vma", drm_vma_info},  #endif @@ -238,10 +241,11 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,  			type = "??";  		else  			type = types[map->type]; -		DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s  0x%02x 0x%08x ", +		DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s  0x%02x 0x%08lx ",  			       i,  			       map->offset, -			       map->size, type, map->flags, r_list->user_token); +			       map->size, type, map->flags,  +			       (unsigned long) r_list->user_token);  		if (map->mtrr < 0) {  			DRM_PROC_PRINT("none\n"); @@ -418,6 +422,89 @@ static int drm_bufs_info(char *buf, char **start, off_t offset, int request,  }  /** + * Called when "/proc/dri/.../objects" is read. + * + * \param buf output buffer. + * \param start start of output data. + * \param offset requested start offset. + * \param request requested number of bytes. + * \param eof whether there is no more data to return. + * \param data private data. + * \return number of written bytes. + */ +static int drm__objects_info(char *buf, char **start, off_t offset, int request, +			  int *eof, void *data) +{ +	drm_device_t *dev = (drm_device_t *) data; +	int len = 0; +	drm_buffer_manager_t *bm = &dev->bm; +	drm_fence_manager_t *fm = &dev->fm;  +	drm_u64_t used_mem; +	drm_u64_t low_mem; +	drm_u64_t high_mem; + + +	if (offset > DRM_PROC_LIMIT) { +		*eof = 1; +		return 0; +	} + +	*start = &buf[offset]; +	*eof = 0; +	 +	if (fm->initialized) { +		DRM_PROC_PRINT("Number of active fence objects: %d.\n\n",  +			       atomic_read(&fm->count)); +	} else { +		DRM_PROC_PRINT("Fence objects are not supported by this driver\n\n"); +	} + +	if (bm->initialized) { +		DRM_PROC_PRINT("Number of active buffer objects: %d.\n\n",  +			       atomic_read(&bm->count)); +		DRM_PROC_PRINT("Number of locked GATT pages: %lu.\n", bm->cur_pages); +	} else { +		DRM_PROC_PRINT("Buffer objects are not supported by this driver.\n\n"); +	} + +	drm_query_memctl(&used_mem, &low_mem, &high_mem); + +	if (used_mem > 16*PAGE_SIZE) {  +		DRM_PROC_PRINT("Used object memory is %lu pages.\n",  +			       (unsigned long) (used_mem >> PAGE_SHIFT)); +	} else { +		DRM_PROC_PRINT("Used object memory is %lu bytes.\n",  +			       (unsigned long) used_mem); +	} +	DRM_PROC_PRINT("Soft object memory usage threshold is %lu pages.\n",  +		       (unsigned long) (low_mem >> PAGE_SHIFT)); +	DRM_PROC_PRINT("Hard object memory usage threshold is %lu pages.\n",  +		       (unsigned long) (high_mem >> PAGE_SHIFT)); + +	DRM_PROC_PRINT("\n"); + +	if (len > request + offset) +		return request; +	*eof = 1; +	return len - offset; +} + +/** + * Simply calls _objects_info() while holding the drm_device::struct_mutex lock. + */ +static int drm_objects_info(char *buf, char **start, off_t offset, int request, +			 int *eof, void *data) +{ +	drm_device_t *dev = (drm_device_t *) data; +	int ret; + +	mutex_lock(&dev->struct_mutex); +	ret = drm__objects_info(buf, start, offset, request, eof, data); +	mutex_unlock(&dev->struct_mutex); +	return ret; +} + +/**   * Called when "/proc/dri/.../clients" is read.   *   * \param buf output buffer. @@ -500,7 +587,7 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request,  	for (pt = dev->vmalist; pt; pt = pt->next) {  		if (!(vma = pt->vma))  			continue; -		DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx", +		DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",  			       pt->pid,  			       vma->vm_start,  			       vma->vm_end, @@ -510,7 +597,7 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request,  			       vma->vm_flags & VM_MAYSHARE ? 's' : 'p',  			       vma->vm_flags & VM_LOCKED ? 'l' : '-',  			       vma->vm_flags & VM_IO ? 'i' : '-', -			       VM_OFFSET(vma)); +			       vma->vm_pgoff);  #if defined(__i386__)  		pgprot = pgprot_val(vma->vm_page_prot); diff --git a/linux-core/drm_sman.c b/linux-core/drm_sman.c index 425c8233..19a13f3a 100644 --- a/linux-core/drm_sman.c +++ b/linux-core/drm_sman.c @@ -101,10 +101,9 @@ static void *drm_sman_mm_allocate(void *private, unsigned long size,  static void drm_sman_mm_free(void *private, void *ref)  { -	drm_mm_t *mm = (drm_mm_t *) private;  	drm_mm_node_t *node = (drm_mm_node_t *) ref; -	drm_mm_put_block(mm, node); +	drm_mm_put_block(node);  }  static void drm_sman_mm_destroy(void *private) diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c index 839cf441..c03a56a1 100644 --- a/linux-core/drm_stub.c +++ b/linux-core/drm_stub.c @@ -54,6 +54,11 @@ drm_head_t **drm_heads;  struct drm_sysfs_class *drm_class;  struct proc_dir_entry *drm_proc_root; +drm_cache_t drm_cache = +{ .mm = NULL, +  .fence_object = NULL +}; +  static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,  		       const struct pci_device_id *ent,  		       struct drm_driver *driver) @@ -66,6 +71,7 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,  	init_timer(&dev->timer);  	mutex_init(&dev->struct_mutex);  	mutex_init(&dev->ctxlist_mutex); +	mutex_init(&dev->bm.init_mutex);  	dev->pdev = pdev;  	dev->pci_device = pdev->device; @@ -76,15 +82,29 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,  #endif  	dev->irq = pdev->irq; -	dev->maplist = drm_calloc(1, sizeof(*dev->maplist), DRM_MEM_MAPS); -	if (dev->maplist == NULL) +	if (drm_ht_create(&dev->map_hash, DRM_MAP_HASH_ORDER)) { +		drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);  		return -ENOMEM; -	INIT_LIST_HEAD(&dev->maplist->head); -	if (drm_ht_create(&dev->map_hash, 12)) { +	} +	if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,  +			DRM_FILE_PAGE_OFFSET_SIZE)) {  		drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS); +		drm_ht_remove(&dev->map_hash); +		return -ENOMEM; +	} + +	if (drm_ht_create(&dev->object_hash, DRM_OBJECT_HASH_ORDER)) { +                drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS); +		drm_ht_remove(&dev->map_hash); +		drm_mm_takedown(&dev->offset_manager);  		return -ENOMEM;  	} +	dev->maplist = drm_calloc(1, sizeof(*dev->maplist), DRM_MEM_MAPS); +	if (dev->maplist == NULL) +		return -ENOMEM; +	INIT_LIST_HEAD(&dev->maplist->head); +  	/* the DRM has 6 counters */  	dev->counters = 6;  	dev->types[0] = _DRM_STAT_LOCK; @@ -125,6 +145,7 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,  		goto error_out_unreg;  	} +	drm_fence_manager_init(dev);  	return 0;  error_out_unreg: diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c new file mode 100644 index 00000000..599589fc --- /dev/null +++ b/linux-core/drm_ttm.c @@ -0,0 +1,498 @@ +/************************************************************************** + *  + * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA + * All Rights Reserved. + *  + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + *  + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR  + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE  + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + *  + *  + **************************************************************************/ + +#include "drmP.h" + +/* + * Use kmalloc if possible. Otherwise fall back to vmalloc. + */ + +static void *ttm_alloc(unsigned long size, int type) +{ +	void *ret = NULL; + +	if (drm_alloc_memctl(size)) +		return NULL; +	if (size <= PAGE_SIZE) { +		ret = drm_alloc(size, type); +	} +	if (!ret) { +		ret = vmalloc(size); +	} +	if (!ret) { +		drm_free_memctl(size); +	} +	return ret; +} + +static void ttm_free(void *pointer, unsigned long size, int type) +{ + +	if ((unsigned long)pointer >= VMALLOC_START && +	    (unsigned long)pointer <= VMALLOC_END) { +		vfree(pointer); +	} else { +		drm_free(pointer, size, type); +	} +	drm_free_memctl(size); +} + +/* + * Unmap all vma pages from vmas mapping this ttm. + */ + +static int unmap_vma_pages(drm_ttm_t * ttm) +{ +	drm_device_t *dev = ttm->dev; +	loff_t offset = ((loff_t) ttm->mapping_offset) << PAGE_SHIFT; +	loff_t holelen = ((loff_t) ttm->num_pages) << PAGE_SHIFT; + +#ifdef DRM_ODD_MM_COMPAT +	int ret; +	ret = drm_ttm_lock_mm(ttm); +	if (ret) +		return ret; +#endif +	unmap_mapping_range(dev->dev_mapping, offset, holelen, 1); +#ifdef DRM_ODD_MM_COMPAT +	drm_ttm_finish_unmap(ttm); +#endif +	return 0; +} + +/* + * Change caching policy for the linear kernel map  + * for range of pages in a ttm. + */ + +static int drm_set_caching(drm_ttm_t * ttm, int noncached) +{ +	int i; +	struct page **cur_page; +	int do_tlbflush = 0; + +	if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached) +		return 0; + +	for (i = 0; i < ttm->num_pages; ++i) { +		cur_page = ttm->pages + i; +		if (*cur_page) { +			if (!PageHighMem(*cur_page)) { +				if (noncached) { +					map_page_into_agp(*cur_page); +				} else { +					unmap_page_from_agp(*cur_page); +				} +				do_tlbflush = 1; +			} +		} +	} +	if (do_tlbflush) +		flush_agp_mappings(); + +	DRM_MASK_VAL(ttm->page_flags, DRM_TTM_PAGE_UNCACHED, noncached); + +	return 0; +} + +/* + * Free all resources associated with a ttm. + */ + +int drm_destroy_ttm(drm_ttm_t * ttm) +{ + +	int i; +	struct page **cur_page; +	drm_ttm_backend_t *be; + +	if (!ttm) +		return 0; + +	if (atomic_read(&ttm->vma_count) > 0) { +		ttm->destroy = 1; +		DRM_ERROR("VMAs are still alive. Skipping destruction.\n"); +		return -EBUSY; +	} + +	DRM_DEBUG("Destroying a ttm\n"); + +#ifdef DRM_TTM_ODD_COMPAT +	BUG_ON(!list_empty(&ttm->vma_list)); +	BUG_ON(!list_empty(&ttm->p_mm_list)); +#endif +	be = ttm->be; +	if (be) { +		be->destroy(be); +		ttm->be = NULL; +	} + +	if (ttm->pages) { +		drm_buffer_manager_t *bm = &ttm->dev->bm; +		if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) +			drm_set_caching(ttm, 0); + +		for (i = 0; i < ttm->num_pages; ++i) { +			cur_page = ttm->pages + i; +			if (*cur_page) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)) +				unlock_page(*cur_page); +#else +				ClearPageReserved(*cur_page); +#endif +				if (page_count(*cur_page) != 1) { +					DRM_ERROR("Erroneous page count. " +						  "Leaking pages.\n"); +				} +				if (page_mapped(*cur_page)) { +					DRM_ERROR("Erroneous map count. " +						  "Leaking page mappings.\n"); +				} + +				/* +				 * End debugging. +				 */ + +				drm_free_gatt_pages(*cur_page, 0); +				drm_free_memctl(PAGE_SIZE); +				--bm->cur_pages; +			} +		} +		ttm_free(ttm->pages, ttm->num_pages * sizeof(*ttm->pages), +			 DRM_MEM_TTM); +		ttm->pages = NULL; +	} + +	drm_ctl_free(ttm, sizeof(*ttm), DRM_MEM_TTM); +	return 0; +} + +static int drm_ttm_populate(drm_ttm_t * ttm) +{ +	struct page *page; +	unsigned long i; +	drm_buffer_manager_t *bm; +	drm_ttm_backend_t *be; + +	if (ttm->state != ttm_unpopulated) +		return 0; + +	bm = &ttm->dev->bm; +	be = ttm->be; +	for (i = 0; i < ttm->num_pages; ++i) { +		page = ttm->pages[i]; +		if (!page) { +			if (drm_alloc_memctl(PAGE_SIZE)) { +				return -ENOMEM; +			} +			page = drm_alloc_gatt_pages(0); +			if (!page) { +				drm_free_memctl(PAGE_SIZE); +				return -ENOMEM; +			} +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)) +			SetPageLocked(page); +#else +			SetPageReserved(page); +#endif +			ttm->pages[i] = page; +			++bm->cur_pages; +		} +	} +	be->populate(be, ttm->num_pages, ttm->pages); +	ttm->state = ttm_unbound; +	return 0; +} + +/* + * Initialize a ttm. + */ + +static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size) +{ +	drm_bo_driver_t *bo_driver = dev->driver->bo_driver; +	drm_ttm_t *ttm; + +	if (!bo_driver) +		return NULL; + +	ttm = drm_ctl_calloc(1, sizeof(*ttm), DRM_MEM_TTM); +	if (!ttm) +		return NULL; + +#ifdef DRM_ODD_MM_COMPAT +	INIT_LIST_HEAD(&ttm->p_mm_list); +	INIT_LIST_HEAD(&ttm->vma_list); +#endif + +	ttm->dev = dev; +	atomic_set(&ttm->vma_count, 0); + +	ttm->destroy = 0; +	ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; + +	ttm->page_flags = 0; + +	/* +	 * Account also for AGP module memory usage. +	 */ + +	ttm->pages = ttm_alloc(ttm->num_pages * sizeof(*ttm->pages), +			       DRM_MEM_TTM); +	if (!ttm->pages) { +		drm_destroy_ttm(ttm); +		DRM_ERROR("Failed allocating page table\n"); +		return NULL; +	} +	memset(ttm->pages, 0, ttm->num_pages * sizeof(*ttm->pages)); +	ttm->be = bo_driver->create_ttm_backend_entry(dev); +	if (!ttm->be) { +		drm_destroy_ttm(ttm); +		DRM_ERROR("Failed creating ttm backend entry\n"); +		return NULL; +	} +	ttm->state = ttm_unpopulated; +	return ttm; +} + +/* + * Unbind a ttm region from the aperture. + */ + +int drm_evict_ttm(drm_ttm_t * ttm) +{ +	drm_ttm_backend_t *be = ttm->be; +	int ret; + +	switch (ttm->state) { +	case ttm_bound: +		if (be->needs_ub_cache_adjust(be)) { +			ret = unmap_vma_pages(ttm); +			if (ret) { +				return ret; +			} +		} +		be->unbind(be); +		break; +	default: +		break; +	} +	ttm->state = ttm_evicted; +	return 0; +} + +void drm_fixup_ttm_caching(drm_ttm_t * ttm) +{ + +	if (ttm->state == ttm_evicted) { +		drm_ttm_backend_t *be = ttm->be; +		if (be->needs_ub_cache_adjust(be)) { +			drm_set_caching(ttm, 0); +		} +		ttm->state = ttm_unbound; +	} +} + +int drm_unbind_ttm(drm_ttm_t * ttm) +{ +	int ret = 0; + +	if (ttm->state == ttm_bound) +		ret = drm_evict_ttm(ttm); + +	if (ret) +		return ret; + +	drm_fixup_ttm_caching(ttm); +	return 0; +} + +int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset) +{ + +	int ret = 0; +	drm_ttm_backend_t *be; + +	if (!ttm) +		return -EINVAL; +	if (ttm->state == ttm_bound) +		return 0; + +	be = ttm->be; + +	ret = drm_ttm_populate(ttm); +	if (ret) +		return ret; +	if (ttm->state == ttm_unbound && !cached) { +		ret = unmap_vma_pages(ttm); +		if (ret) +			return ret; + +		drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED); +	} +#ifdef DRM_ODD_MM_COMPAT +	else if (ttm->state == ttm_evicted && !cached) { +		ret = drm_ttm_lock_mm(ttm); +		if (ret) +			return ret; +	} +#endif +	if ((ret = be->bind(be, aper_offset, cached))) { +		ttm->state = ttm_evicted; +#ifdef DRM_ODD_MM_COMPAT +		if (be->needs_ub_cache_adjust(be)) +			drm_ttm_unlock_mm(ttm); +#endif +		DRM_ERROR("Couldn't bind backend.\n"); +		return ret; +	} + +	ttm->aper_offset = aper_offset; +	ttm->state = ttm_bound; + +#ifdef DRM_ODD_MM_COMPAT +	if (be->needs_ub_cache_adjust(be)) { +		ret = drm_ttm_remap_bound(ttm); +		if (ret) +			return ret; +	} +#endif + +	return 0; +} + +/* + * dev->struct_mutex locked. + */ +static void drm_ttm_object_remove(drm_device_t * dev, drm_ttm_object_t * object) +{ +	drm_map_list_t *list = &object->map_list; +	drm_local_map_t *map; + +	if (list->user_token) +		drm_ht_remove_item(&dev->map_hash, &list->hash); + +	if (list->file_offset_node) { +		drm_mm_put_block(list->file_offset_node); +		list->file_offset_node = NULL; +	} + +	map = list->map; + +	if (map) { +		drm_ttm_t *ttm = (drm_ttm_t *) map->offset; +		if (ttm) { +			if (drm_destroy_ttm(ttm) != -EBUSY) { +				drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM); +			} +		} else { +			drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM); +		} +	} + +	drm_ctl_free(object, sizeof(*object), DRM_MEM_TTM); +} + +void drm_ttm_object_deref_locked(drm_device_t * dev, drm_ttm_object_t * to) +{ +	if (atomic_dec_and_test(&to->usage)) { +		drm_ttm_object_remove(dev, to); +	} +} + +void drm_ttm_object_deref_unlocked(drm_device_t * dev, drm_ttm_object_t * to) +{ +	if (atomic_dec_and_test(&to->usage)) { +		mutex_lock(&dev->struct_mutex); +		if (atomic_read(&to->usage) == 0) +			drm_ttm_object_remove(dev, to); +		mutex_unlock(&dev->struct_mutex); +	} +} + +/* + * Create a ttm and add it to the drm book-keeping.  + * dev->struct_mutex locked. + */ + +int drm_ttm_object_create(drm_device_t * dev, unsigned long size, +			  uint32_t flags, drm_ttm_object_t ** ttm_object) +{ +	drm_ttm_object_t *object; +	drm_map_list_t *list; +	drm_local_map_t *map; +	drm_ttm_t *ttm; + +	object = drm_ctl_calloc(1, sizeof(*object), DRM_MEM_TTM); +	if (!object) +		return -ENOMEM; +	object->flags = flags; +	list = &object->map_list; + +	list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_TTM); +	if (!list->map) { +		drm_ttm_object_remove(dev, object); +		return -ENOMEM; +	} +	map = list->map; + +	ttm = drm_init_ttm(dev, size); +	if (!ttm) { +		DRM_ERROR("Could not create ttm\n"); +		drm_ttm_object_remove(dev, object); +		return -ENOMEM; +	} + +	map->offset = (unsigned long)ttm; +	map->type = _DRM_TTM; +	map->flags = _DRM_REMOVABLE; +	map->size = ttm->num_pages * PAGE_SIZE; +	map->handle = (void *)object; + +	list->file_offset_node = drm_mm_search_free(&dev->offset_manager, +						    ttm->num_pages, 0, 0); +	if (!list->file_offset_node) { +		drm_ttm_object_remove(dev, object); +		return -ENOMEM; +	} +	list->file_offset_node = drm_mm_get_block(list->file_offset_node, +						  ttm->num_pages, 0); + +	list->hash.key = list->file_offset_node->start; + +	if (drm_ht_insert_item(&dev->map_hash, &list->hash)) { +		drm_ttm_object_remove(dev, object); +		return -ENOMEM; +	} + +	list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT; +	ttm->mapping_offset = list->hash.key; +	atomic_set(&object->usage, 1); +	*ttm_object = object; +	return 0; +} diff --git a/linux-core/drm_ttm.h b/linux-core/drm_ttm.h new file mode 100644 index 00000000..11a13754 --- /dev/null +++ b/linux-core/drm_ttm.h @@ -0,0 +1,145 @@ +/************************************************************************** + *  + * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA + * All Rights Reserved. + *  + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + *  + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR  + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE  + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + *  + *  + **************************************************************************/ +/* + * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com> + */ + +#ifndef _DRM_TTM_H +#define _DRM_TTM_H +#define DRM_HAS_TTM + +/* + * The backend GART interface. (In our case AGP). Any similar type of device (PCIE?) + * needs only to implement these functions to be usable with the "TTM" interface. + * The AGP backend implementation lives in drm_agpsupport.c  + * basically maps these calls to available functions in agpgart. Each drm device driver gets an + * additional function pointer that creates these types,  + * so that the device can choose the correct aperture. + * (Multiple AGP apertures, etc.)  + * Most device drivers will let this point to the standard AGP implementation. + */ + +#define DRM_BE_FLAG_NEEDS_FREE     0x00000001 +#define DRM_BE_FLAG_BOUND_CACHED   0x00000002 +#define DRM_BE_FLAG_CBA            0x00000004 + +typedef struct drm_ttm_backend { +	unsigned long aperture_base; +	void *private; +	uint32_t flags; +	uint32_t drm_map_type; +	int (*needs_ub_cache_adjust) (struct drm_ttm_backend * backend); +	int (*populate) (struct drm_ttm_backend * backend, +			 unsigned long num_pages, struct page ** pages); +	void (*clear) (struct drm_ttm_backend * backend); +	int (*bind) (struct drm_ttm_backend * backend, +		     unsigned long offset, int cached); +	int (*unbind) (struct drm_ttm_backend * backend); +	void (*destroy) (struct drm_ttm_backend * backend); +} drm_ttm_backend_t; + +typedef struct drm_ttm { +	struct page **pages; +	uint32_t page_flags; +	unsigned long num_pages; +	unsigned long aper_offset; +	atomic_t vma_count; +	struct drm_device *dev; +	int destroy; +	uint32_t mapping_offset; +	drm_ttm_backend_t *be; +	enum { +		ttm_bound, +		ttm_evicted, +		ttm_unbound, +		ttm_unpopulated, +	} state; +#ifdef DRM_ODD_MM_COMPAT +	struct list_head vma_list; +	struct list_head p_mm_list; +#endif + +} drm_ttm_t; + +typedef struct drm_ttm_object { +	atomic_t usage; +	uint32_t flags; +	drm_map_list_t map_list; +} drm_ttm_object_t; + +extern int drm_ttm_object_create(struct drm_device *dev, unsigned long size, +				 uint32_t flags, +				 drm_ttm_object_t ** ttm_object); +extern void drm_ttm_object_deref_locked(struct drm_device *dev, +					drm_ttm_object_t * to); +extern void drm_ttm_object_deref_unlocked(struct drm_device *dev, +					  drm_ttm_object_t * to); +extern drm_ttm_object_t *drm_lookup_ttm_object(drm_file_t * priv, +					       uint32_t handle, +					       int check_owner); +extern int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset); + +extern int drm_unbind_ttm(drm_ttm_t * ttm); + +/* + * Evict a ttm region. Keeps Aperture caching policy. + */ + +extern int drm_evict_ttm(drm_ttm_t * ttm); +extern void drm_fixup_ttm_caching(drm_ttm_t * ttm); + +/* + * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this,  + * which calls this function iff there are no vmas referencing it anymore. Otherwise it is called + * when the last vma exits. + */ + +extern int drm_destroy_ttm(drm_ttm_t * ttm); +extern int drm_ttm_ioctl(DRM_IOCTL_ARGS); + +static __inline__ drm_ttm_t *drm_ttm_from_object(drm_ttm_object_t * to) +{ +	return (drm_ttm_t *) to->map_list.map->offset; +} + +#define DRM_MASK_VAL(dest, mask, val)			\ +  (dest) = ((dest) & ~(mask)) | ((val) & (mask)); + +#define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1) +#define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS) + +/* + * Page flags. + */ + +#define DRM_TTM_PAGE_UNCACHED 0x01 +#define DRM_TTM_PAGE_USED     0x02 +#define DRM_TTM_PAGE_BOUND    0x04 +#define DRM_TTM_PAGE_PRESENT  0x08 + +#endif diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index adff7d1a..ba4b1451 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -34,12 +34,42 @@   */  #include "drmP.h" +  #if defined(__ia64__)  #include <linux/efi.h>  #endif  static void drm_vm_open(struct vm_area_struct *vma);  static void drm_vm_close(struct vm_area_struct *vma); +static void drm_vm_ttm_close(struct vm_area_struct *vma); +static int drm_vm_ttm_open(struct vm_area_struct *vma); +static void drm_vm_ttm_open_wrapper(struct vm_area_struct *vma); + + +pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma) +{ +	pgprot_t tmp = vm_get_page_prot(vma->vm_flags); + +#if defined(__i386__) || defined(__x86_64__) +	if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) { +		pgprot_val(tmp) |= _PAGE_PCD; +		pgprot_val(tmp) &= ~_PAGE_PWT; +	} +#elif defined(__powerpc__) +	pgprot_val(tmp) |= _PAGE_NO_CACHE; +	if (map->type == _DRM_REGISTERS) +		pgprot_val(tmp) |= _PAGE_GUARDED; +#endif +#if defined(__ia64__) +	if (efi_range_is_wc(vma->vm_start, vma->vm_end - +				    vma->vm_start)) +		tmp = pgprot_writecombine(tmp); +	else +		tmp = pgprot_noncached(tmp); +#endif +	return tmp; +} +  /**   * \c nopage method for AGP virtual memory. @@ -70,7 +100,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,  	if (!dev->agp || !dev->agp->cant_use_aperture)  		goto vm_nopage_error; -	if (drm_ht_find_item(&dev->map_hash, VM_OFFSET(vma), &hash)) +	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))  		goto vm_nopage_error;  	r_list = drm_hash_entry(hash, drm_map_list_t, hash); @@ -129,6 +159,93 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,  }  #endif				/* __OS_HAS_AGP */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) +static +#endif +struct page *drm_vm_ttm_fault(struct vm_area_struct *vma,  +			      struct fault_data *data) +{ +	unsigned long address = data->address; +	drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data; +	unsigned long page_offset; +	struct page *page; +	drm_ttm_t *ttm;  +	drm_buffer_manager_t *bm; +	drm_device_t *dev; +	unsigned long pfn; +	int err; +	pgprot_t pgprot; + +	if (!map) { +		data->type = VM_FAULT_OOM; +		return NULL; +	} + +	if (address > vma->vm_end) { +		data->type = VM_FAULT_SIGBUS; +		return NULL; +	} + +	ttm = (drm_ttm_t *) map->offset; +	 +	dev = ttm->dev; + +	/* +	 * Perhaps retry here? +	 */ + +	mutex_lock(&dev->struct_mutex); +	drm_fixup_ttm_caching(ttm); + +	bm = &dev->bm; +	page_offset = (address - vma->vm_start) >> PAGE_SHIFT; +	page = ttm->pages[page_offset]; + +	if (!page) { +		if (drm_alloc_memctl(PAGE_SIZE)) { +			data->type = VM_FAULT_OOM; +			goto out; +		} +		page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0); +		if (!page) { +			drm_free_memctl(PAGE_SIZE); +			data->type = VM_FAULT_OOM; +			goto out; +		} +		++bm->cur_pages; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)) +		SetPageLocked(page); +#else +		SetPageReserved(page); +#endif +	} + +	if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) { + +		/* +		 * FIXME: Check can't map aperture flag. +		 */ + +		pfn = ttm->aper_offset + page_offset +  +			(ttm->be->aperture_base >> PAGE_SHIFT); +		pgprot = drm_io_prot(ttm->be->drm_map_type, vma); +	} else { +		pfn = page_to_pfn(page); +		pgprot = vma->vm_page_prot; +	} +	 +	err = vm_insert_pfn(vma, address, pfn, pgprot); + +	if (!err || err == -EBUSY)  +		data->type = VM_FAULT_MINOR;  +	else +		data->type = VM_FAULT_OOM; + out: +	mutex_unlock(&dev->struct_mutex); +	return NULL; +} + +  /**   * \c nopage method for shared virtual memory.   * @@ -198,7 +315,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)  			} else {  				dev->vmalist = pt->next;  			} -			drm_free(pt, sizeof(*pt), DRM_MEM_VMAS); +			drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);  		} else {  			prev = pt;  		} @@ -243,6 +360,9 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)  				dmah.size = map->size;  				__drm_pci_free(dev, &dmah);  				break; +		        case _DRM_TTM: +				BUG_ON(1); +				break;  			}  			drm_free(map, sizeof(*map), DRM_MEM_MAPS);  		} @@ -358,6 +478,7 @@ static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,  	return drm_do_vm_sg_nopage(vma, address);  } +  #else				/* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */  static struct page *drm_vm_nopage(struct vm_area_struct *vma, @@ -414,6 +535,20 @@ static struct vm_operations_struct drm_vm_sg_ops = {  	.close = drm_vm_close,  }; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) +static struct vm_operations_struct drm_vm_ttm_ops = { +	.nopage = drm_vm_ttm_nopage, +	.open = drm_vm_ttm_open_wrapper, +	.close = drm_vm_ttm_close, +}; +#else +static struct vm_operations_struct drm_vm_ttm_ops = { +	.fault = drm_vm_ttm_fault, +	.open = drm_vm_ttm_open_wrapper, +	.close = drm_vm_ttm_close, +}; +#endif +  /**   * \c open method for shared virtual memory.   * @@ -432,7 +567,7 @@ static void drm_vm_open(struct vm_area_struct *vma)  		  vma->vm_start, vma->vm_end - vma->vm_start);  	atomic_inc(&dev->vma_count); -	vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS); +	vma_entry = drm_ctl_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);  	if (vma_entry) {  		mutex_lock(&dev->struct_mutex);  		vma_entry->vma = vma; @@ -443,6 +578,29 @@ static void drm_vm_open(struct vm_area_struct *vma)  	}  } +static int drm_vm_ttm_open(struct vm_area_struct *vma) { +   +	drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data; +	drm_ttm_t *ttm; +	drm_file_t *priv = vma->vm_file->private_data; +	drm_device_t *dev = priv->head->dev; + +	drm_vm_open(vma); +	mutex_lock(&dev->struct_mutex); +	ttm = (drm_ttm_t *) map->offset; +	atomic_inc(&ttm->vma_count); +#ifdef DRM_ODD_MM_COMPAT +	drm_ttm_add_vma(ttm, vma); +#endif +	mutex_unlock(&dev->struct_mutex); +	return 0; +} + +static void drm_vm_ttm_open_wrapper(struct vm_area_struct *vma)  +{ +	drm_vm_ttm_open(vma); +} +  /**   * \c close method for all virtual memory types.   * @@ -469,13 +627,42 @@ static void drm_vm_close(struct vm_area_struct *vma)  			} else {  				dev->vmalist = pt->next;  			} -			drm_free(pt, sizeof(*pt), DRM_MEM_VMAS); +			drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);  			break;  		}  	}  	mutex_unlock(&dev->struct_mutex);  } + +static void drm_vm_ttm_close(struct vm_area_struct *vma) +{ +	drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;  +	drm_ttm_t *ttm;  +        drm_device_t *dev; +	int ret; + +	drm_vm_close(vma);  +	if (map) { +		ttm = (drm_ttm_t *) map->offset; +		dev = ttm->dev; +		mutex_lock(&dev->struct_mutex); +#ifdef DRM_ODD_MM_COMPAT +		drm_ttm_delete_vma(ttm, vma); +#endif +		if (atomic_dec_and_test(&ttm->vma_count)) { +			if (ttm->destroy) { +				ret = drm_destroy_ttm(ttm); +				BUG_ON(ret); +				drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM); +			} +		} +		mutex_unlock(&dev->struct_mutex); +	} +	return; +} + +  /**   * mmap DMA memory.   * @@ -496,8 +683,8 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)  	lock_kernel();  	dev = priv->head->dev;  	dma = dev->dma; -	DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n", -		  vma->vm_start, vma->vm_end, VM_OFFSET(vma)); +	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", +		  vma->vm_start, vma->vm_end, vma->vm_pgoff);  	/* Length must match exact page count */  	if (!dma || (length >> PAGE_SHIFT) != dma->page_count) { @@ -572,8 +759,8 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)  	unsigned long offset = 0;  	drm_hash_item_t *hash; -	DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n", -		  vma->vm_start, vma->vm_end, VM_OFFSET(vma)); +	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", +		  vma->vm_start, vma->vm_end, vma->vm_pgoff);  	if (!priv->authenticated)  		return -EACCES; @@ -582,7 +769,7 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)  	 * the AGP mapped at physical address 0  	 * --BenH.  	 */ -	if (!VM_OFFSET(vma) +	if (!vma->vm_pgoff  #if __OS_HAS_AGP  	    && (!dev->agp  		|| dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE) @@ -590,7 +777,7 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)  	    )  		return drm_mmap_dma(filp, vma); -	if (drm_ht_find_item(&dev->map_hash, VM_OFFSET(vma), &hash)) { +	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff , &hash)) {  		DRM_ERROR("Could not find map\n");  		return -EINVAL;  	} @@ -636,27 +823,9 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)  		/* fall through to _DRM_FRAME_BUFFER... */  	case _DRM_FRAME_BUFFER:  	case _DRM_REGISTERS: -#if defined(__i386__) || defined(__x86_64__) -		if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) { -			pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; -			pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT; -		} -#elif defined(__powerpc__) -		pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; -		if (map->type == _DRM_REGISTERS) -			pgprot_val(vma->vm_page_prot) |= _PAGE_GUARDED; -#endif -		vma->vm_flags |= VM_IO;	/* not in core dump */ -#if defined(__ia64__) -		if (efi_range_is_wc(vma->vm_start, vma->vm_end - -				    vma->vm_start)) -			vma->vm_page_prot = -				pgprot_writecombine(vma->vm_page_prot); -		else -			vma->vm_page_prot = -				pgprot_noncached(vma->vm_page_prot); -#endif  		offset = dev->driver->get_reg_ofs(dev); +		vma->vm_flags |= VM_IO;	/* not in core dump */ +		vma->vm_page_prot = drm_io_prot(map->type, vma);  #ifdef __sparc__  		if (io_remap_pfn_range(vma, vma->vm_start,  					(map->offset + offset) >>PAGE_SHIFT, @@ -703,6 +872,20 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)  		vma->vm_flags |= VM_RESERVED;  #endif  		break; +	case _DRM_TTM: { +		vma->vm_ops = &drm_vm_ttm_ops; +		vma->vm_private_data = (void *) map; +		vma->vm_file = filp; +		vma->vm_flags |= VM_RESERVED | VM_IO; +#ifdef DRM_ODD_MM_COMPAT +		mutex_lock(&dev->struct_mutex); +		drm_ttm_map_bound(vma); +		mutex_unlock(&dev->struct_mutex); +#endif		 +		if (drm_vm_ttm_open(vma)) +		        return -EAGAIN; +		return 0; +	}  	default:  		return -EINVAL;	/* This should never happen. */  	} diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c new file mode 100644 index 00000000..c3e54468 --- /dev/null +++ b/linux-core/i915_buffer.c @@ -0,0 +1,66 @@ +/************************************************************************** + *  + * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA + * All Rights Reserved. + *  + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + *  + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR  + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE  + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + *  + *  + **************************************************************************/ +/* + * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com> + */ + +#include "drmP.h" +#include "i915_drm.h" +#include "i915_drv.h" + + +drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t * dev) +{ +	return drm_agp_init_ttm(dev, NULL); +} + +int i915_fence_types(uint32_t buffer_flags, uint32_t * class, uint32_t * type) +{ +	*class = 0; +	if (buffer_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) +		*type = 3; +	else +		*type = 1; +	return 0; +} + +int i915_invalidate_caches(drm_device_t * dev, uint32_t flags) +{ +	/* +	 * FIXME: Only emit once per batchbuffer submission. +	 */ + +	uint32_t flush_cmd = MI_NO_WRITE_FLUSH; + +	if (flags & DRM_BO_FLAG_READ) +		flush_cmd |= MI_READ_FLUSH; +	if (flags & DRM_BO_FLAG_EXE) +		flush_cmd |= MI_EXE_FLUSH; + +	return i915_emit_mi_flush(dev, flush_cmd); +} diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c index 209500be..2c5b43d0 100644 --- a/linux-core/i915_drv.c +++ b/linux-core/i915_drv.c @@ -38,6 +38,27 @@ static struct pci_device_id pciidlist[] = {  	i915_PCI_IDS  }; +#ifdef I915_HAVE_FENCE +static drm_fence_driver_t i915_fence_driver = { +	.no_types = 2, +	.wrap_diff = (1 << 30), +	.flush_diff = (1 << 29), +	.sequence_mask = 0xffffffffU, +	.lazy_capable = 1, +	.emit = i915_fence_emit_sequence, +	.poke_flush = i915_poke_flush, +}; +#endif +#ifdef I915_HAVE_BUFFER +static drm_bo_driver_t i915_bo_driver = { +        .iomap = {NULL, NULL}, +	.cached = {1, 1}, +	.create_ttm_backend_entry = i915_create_ttm_backend_entry, +	.fence_type = i915_fence_types, +	.invalidate_caches = i915_invalidate_caches +}; +#endif +  static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);  static struct drm_driver driver = {  	/* don't use mtrr's here, the Xserver or user space app should @@ -79,7 +100,12 @@ static struct drm_driver driver = {  		.probe = probe,  		.remove = __devexit_p(drm_cleanup_pci),  		}, - +#ifdef I915_HAVE_FENCE +	.fence_driver = &i915_fence_driver, +#endif +#ifdef I915_HAVE_BUFFER +	.bo_driver = &i915_bo_driver, +#endif  	.name = DRIVER_NAME,  	.desc = DRIVER_DESC,  	.date = DRIVER_DATE, diff --git a/linux-core/i915_fence.c b/linux-core/i915_fence.c new file mode 100644 index 00000000..2182604c --- /dev/null +++ b/linux-core/i915_fence.c @@ -0,0 +1,146 @@ +/************************************************************************** + *  + * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA + * All Rights Reserved. + *  + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + *  + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR  + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE  + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + *  + *  + **************************************************************************/ +/* + * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com> + */ + +#include "drmP.h" +#include "drm.h" +#include "i915_drm.h" +#include "i915_drv.h" + +/* + * Implements an intel sync flush operation. + */ + +static void i915_perform_flush(drm_device_t * dev) +{ +	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; +	drm_fence_manager_t *fm = &dev->fm; +	drm_fence_driver_t *driver = dev->driver->fence_driver; +	uint32_t flush_flags = 0; +	uint32_t flush_sequence = 0; +	uint32_t i_status; +	uint32_t diff; +	uint32_t sequence; + +	if (!dev_priv) +		return; + +	if (fm->pending_exe_flush) { +		sequence = READ_BREADCRUMB(dev_priv); + +		/* +		 * First update fences with the current breadcrumb. +		 */ + +		diff = sequence - fm->last_exe_flush; +		if (diff < driver->wrap_diff && diff != 0) { +			drm_fence_handler(dev, sequence, DRM_FENCE_TYPE_EXE); +		} + +		diff = sequence - fm->exe_flush_sequence; +		if (diff < driver->wrap_diff) { +			fm->pending_exe_flush = 0; +			if (dev_priv->fence_irq_on) { +				i915_user_irq_off(dev_priv); +				dev_priv->fence_irq_on = 0; +			} +		} else if (!dev_priv->fence_irq_on) { +			i915_user_irq_on(dev_priv); +			dev_priv->fence_irq_on = 1; +		} +	} + +	if (dev_priv->flush_pending) { +		i_status = READ_HWSP(dev_priv, 0); +		if ((i_status & (1 << 12)) != +		    (dev_priv->saved_flush_status & (1 << 12))) { +			flush_flags = dev_priv->flush_flags; +			flush_sequence = dev_priv->flush_sequence; +			dev_priv->flush_pending = 0; +			drm_fence_handler(dev, flush_sequence, flush_flags); +		} +	} + +	if (fm->pending_flush && !dev_priv->flush_pending) { +		dev_priv->flush_sequence = (uint32_t) READ_BREADCRUMB(dev_priv); +		dev_priv->flush_flags = fm->pending_flush; +		dev_priv->saved_flush_status = READ_HWSP(dev_priv, 0); +		I915_WRITE(I915REG_INSTPM, (1 << 5) | (1 << 21)); +		dev_priv->flush_pending = 1; +		fm->pending_flush = 0; +	} + +	if (dev_priv->flush_pending) { +		i_status = READ_HWSP(dev_priv, 0); +		if ((i_status & (1 << 12)) != +		    (dev_priv->saved_flush_status & (1 << 12))) { +			flush_flags = dev_priv->flush_flags; +			flush_sequence = dev_priv->flush_sequence; +			dev_priv->flush_pending = 0; +			drm_fence_handler(dev, flush_sequence, flush_flags); +		} +	} + +} + +void i915_poke_flush(drm_device_t * dev) +{ +	drm_fence_manager_t *fm = &dev->fm; +	unsigned long flags; + +	write_lock_irqsave(&fm->lock, flags); +	i915_perform_flush(dev); +	write_unlock_irqrestore(&fm->lock, flags); +} + +int i915_fence_emit_sequence(drm_device_t * dev, uint32_t flags, +			     uint32_t * sequence, uint32_t * native_type) +{ +	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; +	if (!dev_priv) +		return -EINVAL; + +	i915_emit_irq(dev); +	*sequence = (uint32_t) dev_priv->counter; +	*native_type = DRM_FENCE_TYPE_EXE; +	if (flags & DRM_I915_FENCE_FLAG_FLUSHED) +		*native_type |= DRM_I915_FENCE_TYPE_RW; + +	return 0; +} + +void i915_fence_handler(drm_device_t * dev) +{ +	drm_fence_manager_t *fm = &dev->fm; + +	write_lock(&fm->lock); +	i915_perform_flush(dev); +	write_unlock(&fm->lock); +} | 
