diff options
| -rw-r--r-- | linux-core/i915_gem.c | 186 | ||||
| -rw-r--r-- | shared-core/i915_dma.c | 4 | ||||
| -rw-r--r-- | shared-core/i915_drv.h | 30 | ||||
| -rw-r--r-- | shared-core/i915_irq.c | 23 | 
4 files changed, 188 insertions, 55 deletions
| diff --git a/linux-core/i915_gem.c b/linux-core/i915_gem.c index 641ca8a3..c67ce309 100644 --- a/linux-core/i915_gem.c +++ b/linux-core/i915_gem.c @@ -124,7 +124,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)   * Returned sequence numbers are nonzero on success.   */  static uint32_t -i915_add_request(struct drm_device *dev) +i915_add_request(struct drm_device *dev, uint32_t flush_domains)  {  	drm_i915_private_t *dev_priv = dev->dev_private;  	struct drm_i915_gem_request *request; @@ -155,12 +155,74 @@ i915_add_request(struct drm_device *dev)  	request->seqno = seqno;  	request->emitted_jiffies = jiffies; +	request->flush_domains = flush_domains;  	list_add_tail(&request->list, &dev_priv->mm.request_list);  	return seqno;  }  /** + * Moves buffers associated only with the given active seqno from the active + * to inactive list, potentially freeing them. + */ +static void +i915_gem_retire_request(struct drm_device *dev, +			struct drm_i915_gem_request *request) +{ +	drm_i915_private_t *dev_priv = dev->dev_private; + +	if (request->flush_domains != 0) { +		struct drm_i915_gem_object *obj_priv, *next; + +		/* First clear any buffers that were only waiting for a flush +		 * matching the one just retired. +		 */ + +		list_for_each_entry_safe(obj_priv, next, +					 &dev_priv->mm.flushing_list, list) { +			struct drm_gem_object *obj = obj_priv->obj; + +			if (obj->write_domain & request->flush_domains) { +				obj->write_domain = 0; +				i915_gem_object_move_to_inactive(obj); +			} +		} + +	} + +	/* Move any buffers on the active list that are no longer referenced +	 * by the ringbuffer to the flushing/inactive lists as appropriate. +	 */ +	while (!list_empty(&dev_priv->mm.active_list)) { +		struct drm_gem_object *obj; +		struct drm_i915_gem_object *obj_priv; + +		obj_priv = list_first_entry(&dev_priv->mm.active_list, +					    struct drm_i915_gem_object, +					    list); +		obj = obj_priv->obj; + +		/* If the seqno being retired doesn't match the oldest in the +		 * list, then the oldest in the list must still be newer than +		 * this seqno. +		 */ +		if (obj_priv->last_rendering_seqno != request->seqno) +			return; +#if WATCH_LRU +		DRM_INFO("%s: retire %d moves to inactive list %p\n", +			 __func__, seqno, obj); +#endif + +		if (obj->write_domain != 0) { +			list_move_tail(&obj_priv->list, +				       &dev_priv->mm.flushing_list); +		} else { +			i915_gem_object_move_to_inactive(obj); +		} +	} +} + +/**   * Returns true if seq1 is later than seq2.   */  static int @@ -198,6 +260,8 @@ i915_gem_retire_requests(struct drm_device *dev)  		retiring_seqno = request->seqno;  		if (i915_seqno_passed(seqno, retiring_seqno)) { +			i915_gem_retire_request(dev, request); +  			list_del(&request->list);  			drm_free(request, sizeof(*request), DRM_MEM_DRIVER);  		} else @@ -317,15 +381,17 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)  	 * create a new seqno to wait for.  	 */  	if (obj->write_domain & ~(DRM_GEM_DOMAIN_CPU)) { +		uint32_t write_domain = obj->write_domain;  #if WATCH_BUF  		DRM_INFO("%s: flushing object %p from write domain %08x\n", -			  __func__, obj, obj->write_domain); +			  __func__, obj, write_domain);  #endif -		i915_gem_flush(dev, 0, obj->write_domain); +		i915_gem_flush(dev, 0, write_domain);  		obj->write_domain = 0;  		i915_gem_object_move_to_active(obj); -		obj_priv->last_rendering_seqno = i915_add_request(dev); +		obj_priv->last_rendering_seqno = i915_add_request(dev, +								  write_domain);  		BUG_ON(obj_priv->last_rendering_seqno == 0);  #if WATCH_LRU  		DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj); @@ -343,11 +409,7 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)  		if (ret != 0)  			return ret; -		i915_gem_object_move_to_inactive(obj); - -#if WATCH_LRU -		DRM_INFO("%s: wait moves to lru list %p\n", __func__, obj); -#endif +		BUG_ON(obj_priv->active);  	}  	return 0; @@ -471,54 +533,73 @@ i915_gem_evict_something(struct drm_device *dev)  	drm_i915_private_t *dev_priv = dev->dev_private;  	struct drm_gem_object *obj;  	struct drm_i915_gem_object *obj_priv; -	int ret; -	/* Find the LRU buffer. */ -	if (!list_empty(&dev_priv->mm.inactive_list)) { -		obj_priv = list_first_entry(&dev_priv->mm.inactive_list, -					    struct drm_i915_gem_object, -					    list); -	} else if (!list_empty(&dev_priv->mm.active_list)) { -		int found = 0; +	for (;;) { +		/* If there's an inactive buffer available now, grab it +		 * and be done. +		 */ +		if (!list_empty(&dev_priv->mm.inactive_list)) { +			obj_priv = list_first_entry(&dev_priv->mm.inactive_list, +						    struct drm_i915_gem_object, +						    list); +			obj = obj_priv->obj; +			BUG_ON(obj_priv->pin_count != 0); +			break; +		} -		/* If there's nothing unused and ready, grab the first -		 * unpinned object from the currently executing list. +		/* If we didn't get anything, but the ring is still processing +		 * things, wait for one of those things to finish and hopefully +		 * leave us a buffer to evict.  		 */ -		list_for_each_entry(obj_priv, &dev_priv->mm.active_list, -				    list) { -			if (obj_priv->pin_count == 0) { -				found = 1; -				break; -			} +		if (!list_empty(&dev_priv->mm.request_list)) { +			struct drm_i915_gem_request *request; +			int ret; + +			request = list_first_entry(&dev_priv->mm.request_list, +						   struct drm_i915_gem_request, +						   list); + +			ret = i915_wait_request(dev, request->seqno); +			if (ret != 0) +				return ret; + +			continue;  		} -		if (!found) -			return -ENOMEM; -	} else { + +		/* If we didn't have anything on the request list but there +		 * are buffers awaiting a flush, emit one and try again. +		 * When we wait on it, those buffers waiting for that flush +		 * will get moved to inactive. +		 */ +		if (!list_empty(&dev_priv->mm.flushing_list)) { +			obj_priv = list_first_entry(&dev_priv->mm.flushing_list, +						    struct drm_i915_gem_object, +						    list); +			obj = obj_priv->obj; + +			i915_gem_flush(dev, +				       obj->write_domain, +				       obj->write_domain); +			i915_add_request(dev, obj->write_domain); + +			obj = NULL; +			continue; +		} + +		/* If we didn't do any of the above, there's nothing to be done +		 * and we just can't fit it in. +		 */  		return -ENOMEM;  	} -	obj = obj_priv->obj; -	drm_gem_object_reference(obj); +  #if WATCH_LRU  	DRM_INFO("%s: evicting %p\n", __func__, obj);  #endif -	/* Only unpinned buffers should be on this list. */ -	BUG_ON(obj_priv->pin_count != 0); - -	/* Do this separately from the wait_rendering in -	 * i915_gem_object_unbind() because we want to catch interrupts and -	 * return. -	 */ -	ret = i915_gem_object_wait_rendering(obj); -	if (ret != 0) -		return ret; +	BUG_ON(obj_priv->active);  	/* Wait on the rendering and unbind the buffer. */  	i915_gem_object_unbind(obj); -#if WATCH_LRU -	DRM_INFO("%s: evicted %p\n", __func__, obj); -#endif -	drm_gem_object_unreference(obj);  	return 0;  } @@ -713,12 +794,15 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,  /**   * Once all of the objects have been set in the proper domain, - * perform the necessary flush and invalidate operations + * perform the necessary flush and invalidate operations. + * + * Returns the write domains flushed, for use in flush tracking.   */ - -static void +static uint32_t  i915_gem_dev_set_domain(struct drm_device *dev)  { +	uint32_t flush_domains = dev->flush_domains; +  	/*  	 * Now that all the buffers are synced to the proper domains,  	 * flush and invalidate the collected domains @@ -736,6 +820,8 @@ i915_gem_dev_set_domain(struct drm_device *dev)  		dev->invalidate_domains = 0;  		dev->flush_domains = 0;  	} + +	return flush_domains;  }  static int @@ -1014,7 +1100,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,  	struct drm_gem_object *batch_obj;  	int ret, i;  	uint64_t exec_offset; -	uint32_t seqno; +	uint32_t seqno, flush_domains;  	LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -1099,7 +1185,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,  	}  	/* Flush/invalidate caches and chipset buffer */ -	i915_gem_dev_set_domain(dev); +	flush_domains = i915_gem_dev_set_domain(dev);  	exec_offset = validate_list[args->buffer_count - 1].offset; @@ -1124,7 +1210,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,  	 * *some* interrupts representing completion of buffers that we can  	 * wait on when trying to clear up gtt space).  	 */ -	seqno = i915_add_request(dev); +	seqno = i915_add_request(dev, flush_domains);  	BUG_ON(seqno == 0);  	for (i = 0; i < args->buffer_count; i++) {  		struct drm_gem_object *obj = object_list[i]; diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index 30ba8c65..fa73cd29 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -1043,6 +1043,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)  	memset(dev_priv, 0, sizeof(drm_i915_private_t));  	dev->dev_private = (void *)dev_priv; +	dev_priv->dev = dev;  	/* Add register map (needed for suspend/resume) */  	base = drm_get_resource_start(dev, mmio_bar); @@ -1052,8 +1053,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)  		_DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);  	INIT_LIST_HEAD(&dev_priv->mm.active_list); +	INIT_LIST_HEAD(&dev_priv->mm.flushing_list);  	INIT_LIST_HEAD(&dev_priv->mm.inactive_list);  	INIT_LIST_HEAD(&dev_priv->mm.request_list); +	INIT_WORK(&dev_priv->user_interrupt_task, +		  i915_user_interrupt_handler);  	dev_priv->mm.next_gem_seqno = 1;  #ifdef __linux__ diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index e3f280d5..029c39a0 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -101,6 +101,8 @@ typedef struct _drm_i915_vbl_swap {  } drm_i915_vbl_swap_t;  typedef struct drm_i915_private { +	struct drm_device *dev; +  	drm_local_map_t *sarea;  	drm_local_map_t *mmio_map; @@ -244,6 +246,7 @@ typedef struct drm_i915_private {  	struct {  		struct drm_memrange gtt_space; +  		/**  		 * List of objects currently involved in rendering from the  		 * ringbuffer. @@ -251,10 +254,20 @@ typedef struct drm_i915_private {  		 * A reference is held on the buffer while on this list.  		 */  		struct list_head active_list; +  		/** -		 * LRU List of non-executing objects still in the GTT. -		 * There may still be dirty cachelines that need to be flushed -		 * before unbind. +		 * List of objects which are not in the ringbuffer but which +		 * still have a write_domain which needs to be flushed before +		 * unbinding. +		 * +		 * A reference is held on the buffer while on this list. +		 */ +		struct list_head flushing_list; + +		/** +		 * LRU list of objects which are not in the ringbuffer and +		 * are ready to unbind, but are still in the GTT. +		 *  		 * A reference is not held on the buffer while on this list,  		 * as merely being GTT-bound shouldn't prevent its being  		 * freed, and we'll pull it off the list in the free path. @@ -269,6 +282,8 @@ typedef struct drm_i915_private {  		uint32_t next_gem_seqno;  	} mm; + +	struct work_struct user_interrupt_task;  } drm_i915_private_t;  enum intel_chip_family { @@ -285,11 +300,11 @@ struct drm_i915_gem_object {  	/** Current space allocated to this object in the GTT, if any. */  	struct drm_memrange_node *gtt_space; -	/** This object's place on the active or inactive lists */ +	/** This object's place on the active/flushing/inactive lists */  	struct list_head list;  	/** -	 * This is set if the object is on the active list +	 * This is set if the object is on the active or flushing lists  	 * (has pending rendering), and is not set if it's on inactive (ready  	 * to be unbound).  	 */ @@ -334,6 +349,9 @@ struct drm_i915_gem_request {  	/** Time at which this request was emitted, in jiffies. */  	unsigned long emitted_jiffies; +	/** Cache domains that were flushed at the start of the request. */ +	uint32_t flush_domains; +  	struct list_head list;  }; @@ -385,6 +403,7 @@ extern int i915_vblank_swap(struct drm_device *dev, void *data,  			    struct drm_file *file_priv);  extern void i915_user_irq_on(drm_i915_private_t *dev_priv);  extern void i915_user_irq_off(drm_i915_private_t *dev_priv); +extern void i915_user_interrupt_handler(struct work_struct *work);  /* i915_mem.c */  extern int i915_mem_alloc(struct drm_device *dev, void *data, @@ -438,6 +457,7 @@ int i915_gem_set_domain(struct drm_gem_object *obj,  int i915_gem_flush_pwrite(struct drm_gem_object *obj,  			  uint64_t offset, uint64_t size);  void i915_gem_lastclose(struct drm_device *dev); +void i915_gem_retire_requests(struct drm_device *dev);  #endif  #ifdef __linux__ diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c index b17e2408..02191316 100644 --- a/shared-core/i915_irq.c +++ b/shared-core/i915_irq.c @@ -436,6 +436,28 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int plane)  	return count;  } +/** + * Handler for user interrupts in process context (able to sleep, do VFS + * operations, etc. + * + * If another IRQ comes in while we're in this handler, it will still get put + * on the queue again to be rerun when we finish. + */ +void +i915_user_interrupt_handler(struct work_struct *work) +{ +	drm_i915_private_t *dev_priv; +	struct drm_device *dev; + +	dev_priv = container_of(work, drm_i915_private_t, +				user_interrupt_task); +	dev = dev_priv->dev; + +	mutex_lock(&dev->struct_mutex); +	i915_gem_retire_requests(dev); +	mutex_unlock(&dev->struct_mutex); +} +  irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)  {  	struct drm_device *dev = (struct drm_device *) arg; @@ -493,6 +515,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)  		DRM_WAKEUP(&dev_priv->irq_queue);  #ifdef I915_HAVE_FENCE  		i915_fence_handler(dev); +		schedule_work(&dev_priv->user_interrupt_task);  #endif  	} | 
