diff options
| -rw-r--r-- | libdrm/xf86drm.c | 6 | ||||
| -rw-r--r-- | libdrm/xf86mm.h | 2 | ||||
| -rw-r--r-- | linux-core/drm_agpsupport.c | 16 | ||||
| -rw-r--r-- | linux-core/drm_bo.c | 270 | ||||
| -rw-r--r-- | linux-core/drm_bo_move.c | 18 | ||||
| -rw-r--r-- | linux-core/drm_objects.h | 127 | ||||
| -rw-r--r-- | linux-core/drm_ttm.c | 139 | ||||
| -rw-r--r-- | linux-core/drm_vm.c | 4 | ||||
| -rw-r--r-- | linux-core/i915_buffer.c | 15 | ||||
| -rw-r--r-- | linux-core/i915_drv.c | 4 | ||||
| -rw-r--r-- | linux-core/i915_ioc32.c | 63 | ||||
| -rw-r--r-- | linux-core/nouveau_buffer.c | 15 | ||||
| -rw-r--r-- | linux-core/nouveau_sgdma.c | 9 | ||||
| -rw-r--r-- | linux-core/via_buffer.c | 2 | ||||
| -rw-r--r-- | shared-core/drm.h | 51 | ||||
| -rw-r--r-- | shared-core/i915_dma.c | 91 | ||||
| -rw-r--r-- | shared-core/i915_drv.h | 9 | ||||
| -rw-r--r-- | shared-core/i915_irq.c | 2 | ||||
| -rw-r--r-- | shared-core/mga_dma.c | 2 | ||||
| -rw-r--r-- | shared-core/nv20_graph.c | 13 | ||||
| -rw-r--r-- | shared-core/via_drv.c | 2 | ||||
| -rw-r--r-- | shared-core/via_drv.h | 2 | 
22 files changed, 658 insertions, 204 deletions
diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c index 4265c320..e3550de7 100644 --- a/libdrm/xf86drm.c +++ b/libdrm/xf86drm.c @@ -2578,7 +2578,7 @@ static void drmBOCopyReply(const struct drm_bo_info_rep *rep, drmBO *buf)      buf->size = rep->size;      buf->offset = rep->offset;      buf->mapHandle = rep->arg_handle; -    buf->mask = rep->mask; +    buf->proposedFlags = rep->proposed_flags;      buf->start = rep->buffer_start;      buf->fenceFlags = rep->fence_flags;      buf->replyFlags = rep->rep_flags; @@ -2592,7 +2592,7 @@ static void drmBOCopyReply(const struct drm_bo_info_rep *rep, drmBO *buf)  int drmBOCreate(int fd, unsigned long size,  		unsigned pageAlignment, void *user_buffer, -		uint64_t mask, +		uint64_t flags,  		unsigned hint, drmBO *buf)  {      struct drm_bo_create_arg arg; @@ -2602,7 +2602,7 @@ int drmBOCreate(int fd, unsigned long size,      memset(buf, 0, sizeof(*buf));      memset(&arg, 0, sizeof(arg)); -    req->mask = mask; +    req->flags = flags;      req->hint = hint;      req->size = size;      req->page_alignment = pageAlignment; diff --git a/libdrm/xf86mm.h b/libdrm/xf86mm.h index d3df8497..c80288a7 100644 --- a/libdrm/xf86mm.h +++ b/libdrm/xf86mm.h @@ -110,7 +110,7 @@ typedef struct _drmBO      unsigned handle;      uint64_t mapHandle;      uint64_t flags; -    uint64_t mask; +    uint64_t proposedFlags;      unsigned mapFlags;      unsigned long size;      unsigned long offset; diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c index e8bfaea4..02187017 100644 --- a/linux-core/drm_agpsupport.c +++ b/linux-core/drm_agpsupport.c @@ -505,12 +505,14 @@ static int drm_agp_needs_unbind_cache_adjust(struct drm_ttm_backend *backend)  static int drm_agp_populate(struct drm_ttm_backend *backend, -			    unsigned long num_pages, struct page **pages) +			    unsigned long num_pages, struct page **pages, +			    struct page *dummy_read_page)  {  	struct drm_agp_ttm_backend *agp_be =  		container_of(backend, struct drm_agp_ttm_backend, backend);  	struct page **cur_page, **last_page = pages + num_pages;  	DRM_AGP_MEM *mem; +	int dummy_page_count = 0;  	if (drm_alloc_memctl(num_pages * sizeof(void *)))  		return -1; @@ -528,8 +530,16 @@ static int drm_agp_populate(struct drm_ttm_backend *backend,  	DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count);  	mem->page_count = 0; -	for (cur_page = pages; cur_page < last_page; ++cur_page) -		mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(*cur_page)); +	for (cur_page = pages; cur_page < last_page; ++cur_page) { +		struct page *page = *cur_page; +		if (!page) { +			page = dummy_read_page; +			++dummy_page_count; +		} +		mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(page)); +	} +	if (dummy_page_count) +		DRM_DEBUG("Mapped %d dummy pages\n", dummy_page_count);  	agp_be->mem = mem;  	return 0;  } diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 2b8ef1bf..df10e12b 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -80,7 +80,7 @@ void drm_bo_add_to_lru(struct drm_buffer_object *bo)  	DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); -	if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)) +	if (!(bo->mem.proposed_flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))  	    || bo->mem.mem_type != bo->pinned_mem_type) {  		man = &bo->dev->bm.man[bo->mem.mem_type];  		list_add_tail(&bo->lru, &man->lru); @@ -137,27 +137,32 @@ static int drm_bo_add_ttm(struct drm_buffer_object *bo)  {  	struct drm_device *dev = bo->dev;  	int ret = 0; +	uint32_t page_flags = 0;  	DRM_ASSERT_LOCKED(&bo->mutex);  	bo->ttm = NULL; +	if (bo->mem.proposed_flags & DRM_BO_FLAG_WRITE) +		page_flags |= DRM_TTM_PAGE_WRITE; +  	switch (bo->type) { -	case drm_bo_type_dc: +	case drm_bo_type_device:  	case drm_bo_type_kernel: -		bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT); +		bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT,  +					 page_flags, dev->bm.dummy_read_page);  		if (!bo->ttm)  			ret = -ENOMEM;  		break;  	case drm_bo_type_user: -		bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT); +		bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT, +					 page_flags | DRM_TTM_PAGE_USER, +					 dev->bm.dummy_read_page);  		if (!bo->ttm)  			ret = -ENOMEM;  		ret = drm_ttm_set_user(bo->ttm, current, -				       bo->mem.mask & DRM_BO_FLAG_WRITE,  				       bo->buffer_start, -				       bo->num_pages, -				       dev->bm.dummy_read_page); +				       bo->num_pages);  		if (ret)  			return ret; @@ -199,7 +204,7 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object *bo,  			goto out_err;  		if (mem->mem_type != DRM_BO_MEM_LOCAL) { -			ret = drm_bind_ttm(bo->ttm, mem); +			ret = drm_ttm_bind(bo->ttm, mem);  			if (ret)  				goto out_err;  		} @@ -209,11 +214,11 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object *bo,  		struct drm_bo_mem_reg *old_mem = &bo->mem;  		uint64_t save_flags = old_mem->flags; -		uint64_t save_mask = old_mem->mask; +		uint64_t save_proposed_flags = old_mem->proposed_flags;  		*old_mem = *mem;  		mem->mm_node = NULL; -		old_mem->mask = save_mask; +		old_mem->proposed_flags = save_proposed_flags;  		DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);  	} else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && @@ -262,7 +267,7 @@ out_err:  	new_man = &bm->man[bo->mem.mem_type];  	if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {  		drm_ttm_unbind(bo->ttm); -		drm_destroy_ttm(bo->ttm); +		drm_ttm_destroy(bo->ttm);  		bo->ttm = NULL;  	} @@ -419,7 +424,7 @@ static void drm_bo_destroy_locked(struct drm_buffer_object *bo)  		if (bo->ttm) {  			drm_ttm_unbind(bo->ttm); -			drm_destroy_ttm(bo->ttm); +			drm_ttm_destroy(bo->ttm);  			bo->ttm = NULL;  		} @@ -703,7 +708,7 @@ static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type,  	evict_mem.mm_node = NULL;  	evict_mem = bo->mem; -	evict_mem.mask = dev->driver->bo_driver->evict_mask(bo); +	evict_mem.proposed_flags = dev->driver->bo_driver->evict_flags(bo);  	ret = drm_bo_mem_space(bo, &evict_mem, no_wait);  	if (ret) { @@ -867,7 +872,7 @@ int drm_bo_mem_space(struct drm_buffer_object *bo,  		type_ok = drm_bo_mt_compatible(man,  					       bo->type == drm_bo_type_user, -					       mem_type, mem->mask, +					       mem_type, mem->proposed_flags,  					       &cur_flags);  		if (!type_ok) @@ -919,13 +924,13 @@ int drm_bo_mem_space(struct drm_buffer_object *bo,  		if (!drm_bo_mt_compatible(man,  					  bo->type == drm_bo_type_user,  					  mem_type, -					  mem->mask, +					  mem->proposed_flags,  					  &cur_flags))  			continue;  		ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait); -		if (ret == 0) { +		if (ret == 0 && mem->mm_node) {  			mem->flags = cur_flags;  			return 0;  		} @@ -939,11 +944,25 @@ int drm_bo_mem_space(struct drm_buffer_object *bo,  }  EXPORT_SYMBOL(drm_bo_mem_space); -static int drm_bo_new_mask(struct drm_buffer_object *bo, -			   uint64_t new_flags, uint64_t used_mask) +/* + * drm_bo_propose_flags: + * + * @bo: the buffer object getting new flags + * + * @new_flags: the new set of proposed flag bits + * + * @new_mask: the mask of bits changed in new_flags + * + * Modify the proposed_flag bits in @bo + */ +static int drm_bo_modify_proposed_flags (struct drm_buffer_object *bo, +					 uint64_t new_flags, uint64_t new_mask)  { -	uint32_t new_props; +	uint32_t new_access; +	/* Copy unchanging bits from existing proposed_flags */ +	DRM_FLAG_MASKED(new_flags, bo->mem.proposed_flags, ~new_mask); +	   	if (bo->type == drm_bo_type_user &&  	    ((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=  	     (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) { @@ -951,7 +970,7 @@ static int drm_bo_new_mask(struct drm_buffer_object *bo,  		return -EINVAL;  	} -	if ((used_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) { +	if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {  		DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n");  		return -EPERM;  	} @@ -961,15 +980,15 @@ static int drm_bo_new_mask(struct drm_buffer_object *bo,  		return -EPERM;  	} -	new_props = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE | -				 DRM_BO_FLAG_READ); +	new_access = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE | +				  DRM_BO_FLAG_READ); -	if (!new_props) { +	if (new_access == 0) {  		DRM_ERROR("Invalid buffer object rwx properties\n");  		return -EINVAL;  	} -	bo->mem.mask = new_flags; +	bo->mem.proposed_flags = new_flags;  	return 0;  } @@ -1104,8 +1123,8 @@ static int drm_bo_wait_unfenced(struct drm_buffer_object *bo, int no_wait,  	ret = 0;  	mutex_unlock(&bo->mutex); -	DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ, -		    !drm_bo_check_unfenced(bo)); +	DRM_WAIT_ON (ret, bo->event_queue, 3 * DRM_HZ, +		     !drm_bo_check_unfenced(bo));  	mutex_lock(&bo->mutex);  	if (ret == -EINTR)  		return -EAGAIN; @@ -1136,12 +1155,17 @@ static void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,  	rep->size = bo->num_pages * PAGE_SIZE;  	rep->offset = bo->offset; -	if (bo->type == drm_bo_type_dc) +	/* +	 * drm_bo_type_device buffers have user-visible +	 * handles which can be used to share across +	 * processes. Hand that back to the application +	 */ +	if (bo->type == drm_bo_type_device)  		rep->arg_handle = bo->map_list.user_token;  	else  		rep->arg_handle = 0; -	rep->mask = bo->mem.mask; +	rep->proposed_flags = bo->mem.proposed_flags;  	rep->buffer_start = bo->buffer_start;  	rep->fence_flags = bo->fence_type;  	rep->rep_flags = 0; @@ -1287,7 +1311,7 @@ static void drm_buffer_user_object_unmap(struct drm_file *file_priv,  /*   * bo->mutex locked. - * Note that new_mem_flags are NOT transferred to the bo->mem.mask. + * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags.   */  int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags, @@ -1313,7 +1337,7 @@ int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,  	mem.num_pages = bo->num_pages;  	mem.size = mem.num_pages << PAGE_SHIFT; -	mem.mask = new_mem_flags; +	mem.proposed_flags = new_mem_flags;  	mem.page_alignment = bo->mem.page_alignment;  	mutex_lock(&bm->evict_mutex); @@ -1356,24 +1380,41 @@ out_unlock:  static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem)  { -	uint32_t flag_diff = (mem->mask ^ mem->flags); +	uint32_t flag_diff = (mem->proposed_flags ^ mem->flags); -	if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0) +	if ((mem->proposed_flags & mem->flags & DRM_BO_MASK_MEM) == 0)  		return 0;  	if ((flag_diff & DRM_BO_FLAG_CACHED) && -	    (/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/ -	     (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) +	    (/* !(mem->proposed_flags & DRM_BO_FLAG_CACHED) ||*/ +	     (mem->proposed_flags & DRM_BO_FLAG_FORCE_CACHING)))  		return 0;  	if ((flag_diff & DRM_BO_FLAG_MAPPABLE) && -	    ((mem->mask & DRM_BO_FLAG_MAPPABLE) || -	     (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE))) +	    ((mem->proposed_flags & DRM_BO_FLAG_MAPPABLE) || +	     (mem->proposed_flags & DRM_BO_FLAG_FORCE_MAPPABLE)))  		return 0;  	return 1;  } -/* - * bo locked. +/** + * drm_buffer_object_validate: + * + * @bo: the buffer object to modify + * + * @fence_class: the new fence class covering this buffer + * + * @move_unfenced: a boolean indicating whether switching the + * memory space of this buffer should cause the buffer to + * be placed on the unfenced list. + * + * @no_wait: whether this function should return -EBUSY instead + * of waiting. + * + * Change buffer access parameters. This can involve moving + * the buffer to the correct memory type, pinning the buffer + * or changing the class/type of fence covering this buffer + * + * Must be called with bo locked.   */  static int drm_buffer_object_validate(struct drm_buffer_object *bo, @@ -1386,8 +1427,8 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo,  	uint32_t ftype;  	int ret; -	DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n", -		  (unsigned long long) bo->mem.mask, +	DRM_DEBUG("Proposed flags 0x%016llx, Old flags 0x%016llx\n", +		  (unsigned long long) bo->mem.proposed_flags,  		  (unsigned long long) bo->mem.flags);  	ret = driver->fence_type(bo, &fence_class, &ftype); @@ -1428,7 +1469,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo,  	 */  	if (!drm_bo_mem_compat(&bo->mem)) { -		ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait, +		ret = drm_bo_move_buffer(bo, bo->mem.proposed_flags, no_wait,  					 move_unfenced);  		if (ret) {  			if (ret != -EAGAIN) @@ -1441,7 +1482,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo,  	 * Pinned buffers.  	 */ -	if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) { +	if (bo->mem.proposed_flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {  		bo->pinned_mem_type = bo->mem.mem_type;  		mutex_lock(&dev->struct_mutex);  		list_del_init(&bo->pinned_lru); @@ -1477,7 +1518,13 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo,  		if (ret)  			return ret;  	} -	DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE); +	/* +	 * Validation has succeeded, move the access and other +	 * non-mapping-related flag bits from the proposed flags to +	 * the active flags +	 */ + +	DRM_FLAG_MASKED(bo->mem.flags, bo->mem.proposed_flags, ~DRM_BO_MASK_MEMTYPE);  	/*  	 * Finally, adjust lru to be sure. @@ -1502,13 +1549,38 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo,  	return 0;  } +/** + * drm_bo_do_validate: + * + * @bo:	the buffer object + * + * @flags: access rights, mapping parameters and cacheability. See + * the DRM_BO_FLAG_* values in drm.h + * + * @mask: Which flag values to change; this allows callers to modify + * things without knowing the current state of other flags. + * + * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_* + * values in drm.h. + * + * @fence_class: a driver-specific way of doing fences. Presumably, + * this would be used if the driver had more than one submission and + * fencing mechanism. At this point, there isn't any use of this + * from the user mode code. + * + * @rep: To be stuffed with the reply from validation + *  + * 'validate' a buffer object. This changes where the buffer is + * located, along with changing access modes. + */ +  int drm_bo_do_validate(struct drm_buffer_object *bo,  		       uint64_t flags, uint64_t mask, uint32_t hint,  		       uint32_t fence_class, -		       int no_wait,  		       struct drm_bo_info_rep *rep)  {  	int ret; +	int no_wait = (hint & DRM_BO_HINT_DONT_BLOCK) != 0;  	mutex_lock(&bo->mutex);  	ret = drm_bo_wait_unfenced(bo, no_wait, 0); @@ -1516,9 +1588,7 @@ int drm_bo_do_validate(struct drm_buffer_object *bo,  	if (ret)  		goto out; - -	DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask); -	ret = drm_bo_new_mask(bo, flags, mask); +	ret = drm_bo_modify_proposed_flags (bo, flags, mask);  	if (ret)  		goto out; @@ -1535,11 +1605,42 @@ out:  }  EXPORT_SYMBOL(drm_bo_do_validate); +/** + * drm_bo_handle_validate + * + * @file_priv: the drm file private, used to get a handle to the user context + * + * @handle: the buffer object handle + * + * @flags: access rights, mapping parameters and cacheability. See + * the DRM_BO_FLAG_* values in drm.h + * + * @mask: Which flag values to change; this allows callers to modify + * things without knowing the current state of other flags. + * + * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_* + * values in drm.h. + * + * @fence_class: a driver-specific way of doing fences. Presumably, + * this would be used if the driver had more than one submission and + * fencing mechanism. At this point, there isn't any use of this + * from the user mode code. + * + * @use_old_fence_class: don't change fence class, pull it from the buffer object + * + * @rep: To be stuffed with the reply from validation + *  + * @bp_rep: To be stuffed with the buffer object pointer + * + * Perform drm_bo_do_validate on a buffer referenced by a user-space handle. + * Some permissions checking is done on the parameters, otherwise this + * is a thin wrapper. + */  int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle, -			   uint32_t fence_class,  			   uint64_t flags, uint64_t mask,  			   uint32_t hint, +			   uint32_t fence_class,  			   int use_old_fence_class,  			   struct drm_bo_info_rep *rep,  			   struct drm_buffer_object **bo_rep) @@ -1547,7 +1648,6 @@ int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,  	struct drm_device *dev = file_priv->head->dev;  	struct drm_buffer_object *bo;  	int ret; -	int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;  	mutex_lock(&dev->struct_mutex);  	bo = drm_lookup_buffer_object(file_priv, handle, 1); @@ -1567,8 +1667,7 @@ int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,  		mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE); -	ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, -				 no_wait, rep); +	ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, rep);  	if (!ret && bo_rep)  		*bo_rep = bo; @@ -1636,7 +1735,7 @@ out:  int drm_buffer_object_create(struct drm_device *dev,  			     unsigned long size,  			     enum drm_bo_type type, -			     uint64_t mask, +			     uint64_t flags,  			     uint32_t hint,  			     uint32_t page_alignment,  			     unsigned long buffer_start, @@ -1681,16 +1780,23 @@ int drm_buffer_object_create(struct drm_device *dev,  	bo->mem.page_alignment = page_alignment;  	bo->buffer_start = buffer_start & PAGE_MASK;  	bo->priv_flags = 0; -	bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED | -		DRM_BO_FLAG_MAPPABLE; -	bo->mem.mask = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED | -		DRM_BO_FLAG_MAPPABLE; +	bo->mem.flags = (DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED | +			 DRM_BO_FLAG_MAPPABLE); +	bo->mem.proposed_flags = 0;  	atomic_inc(&bm->count); -	ret = drm_bo_new_mask(bo, mask, mask); +	/* +	 * Use drm_bo_modify_proposed_flags to error-check the proposed flags +	 */ +	ret = drm_bo_modify_proposed_flags (bo, flags, flags);  	if (ret)  		goto out_err; -	if (bo->type == drm_bo_type_dc) { +	/* +	 * For drm_bo_type_device buffers, allocate +	 * address space from the device so that applications +	 * can mmap the buffer from there +	 */ +	if (bo->type == drm_bo_type_device) {  		mutex_lock(&dev->struct_mutex);  		ret = drm_bo_setup_vm_locked(bo);  		mutex_unlock(&dev->struct_mutex); @@ -1753,20 +1859,28 @@ int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fil  		return -EINVAL;  	} -	bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_dc; +	/* +	 * If the buffer creation request comes in with a starting address, +	 * that points at the desired user pages to map. Otherwise, create +	 * a drm_bo_type_device buffer, which uses pages allocated from the kernel +	 */ +	bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_device; +	/* +	 * User buffers cannot be shared +	 */  	if (bo_type == drm_bo_type_user) -		req->mask &= ~DRM_BO_FLAG_SHAREABLE; +		req->flags &= ~DRM_BO_FLAG_SHAREABLE;  	ret = drm_buffer_object_create(file_priv->head->dev, -				       req->size, bo_type, req->mask, +				       req->size, bo_type, req->flags,  				       req->hint, req->page_alignment,  				       req->buffer_start, &entry);  	if (ret)  		goto out;  	ret = drm_bo_add_user_object(file_priv, entry, -				     req->mask & DRM_BO_FLAG_SHAREABLE); +				     req->flags & DRM_BO_FLAG_SHAREABLE);  	if (ret) {  		drm_bo_usage_deref_unlocked(&entry);  		goto out; @@ -1797,11 +1911,17 @@ int drm_bo_setstatus_ioctl(struct drm_device *dev,  	if (ret)  		return ret; -	ret = drm_bo_handle_validate(file_priv, req->handle, req->fence_class, +	/* +	 * validate the buffer. note that 'fence_class' will be unused +	 * as we pass use_old_fence_class=1 here. Note also that +	 * the libdrm API doesn't pass fence_class to the kernel, +	 * so it's a good thing it isn't used here. +	 */ +	ret = drm_bo_handle_validate(file_priv, req->handle,  				     req->flags,  				     req->mask,  				     req->hint | DRM_BO_HINT_DONT_FENCE, -				     1, +				     req->fence_class, 1,  				     rep, NULL);  	(void) drm_bo_read_unlock(&dev->bm.bm_lock); @@ -1952,7 +2072,7 @@ static int drm_bo_leave_list(struct drm_buffer_object *bo,  		DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "  			  "cleanup. Removing flag and evicting.\n");  		bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT; -		bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT; +		bo->mem.proposed_flags &= ~DRM_BO_FLAG_NO_EVICT;  	}  	if (bo->mem.mem_type == mem_type) @@ -2502,6 +2622,14 @@ void drm_bo_unmap_virtual(struct drm_buffer_object *bo)  	unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);  } +/** + * drm_bo_takedown_vm_locked: + * + * @bo: the buffer object to remove any drm device mapping + * + * Remove any associated vm mapping on the drm device node that + * would have been created for a drm_bo_type_device buffer + */  static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)  {  	struct drm_map_list *list; @@ -2509,7 +2637,7 @@ static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)  	struct drm_device *dev = bo->dev;  	DRM_ASSERT_LOCKED(&dev->struct_mutex); -	if (bo->type != drm_bo_type_dc) +	if (bo->type != drm_bo_type_device)  		return;  	list = &bo->map_list; @@ -2532,6 +2660,16 @@ static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)  	drm_bo_usage_deref_locked(&bo);  } +/** + * drm_bo_setup_vm_locked: + * + * @bo: the buffer to allocate address space for + * + * Allocate address space in the drm device so that applications + * can mmap the buffer and access the contents. This only + * applies to drm_bo_type_device objects as others are not + * placed in the drm device address space. + */  static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)  {  	struct drm_map_list *list = &bo->map_list; diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 971b4af8..b06a09f0 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -54,7 +54,7 @@ int drm_bo_move_ttm(struct drm_buffer_object *bo,  	struct drm_ttm *ttm = bo->ttm;  	struct drm_bo_mem_reg *old_mem = &bo->mem;  	uint64_t save_flags = old_mem->flags; -	uint64_t save_mask = old_mem->mask; +	uint64_t save_proposed_flags = old_mem->proposed_flags;  	int ret;  	if (old_mem->mem_type == DRM_BO_MEM_TT) { @@ -71,14 +71,14 @@ int drm_bo_move_ttm(struct drm_buffer_object *bo,  		save_flags = old_mem->flags;  	}  	if (new_mem->mem_type != DRM_BO_MEM_LOCAL) { -		ret = drm_bind_ttm(ttm, new_mem); +		ret = drm_ttm_bind(ttm, new_mem);  		if (ret)  			return ret;  	}  	*old_mem = *new_mem;  	new_mem->mm_node = NULL; -	old_mem->mask = save_mask; +	old_mem->proposed_flags = save_proposed_flags;  	DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);  	return 0;  } @@ -210,7 +210,7 @@ int drm_bo_move_memcpy(struct drm_buffer_object *bo,  	void *new_iomap;  	int ret;  	uint64_t save_flags = old_mem->flags; -	uint64_t save_mask = old_mem->mask; +	uint64_t save_proposed_flags = old_mem->proposed_flags;  	unsigned long i;  	unsigned long page;  	unsigned long add = 0; @@ -255,12 +255,12 @@ out2:  	*old_mem = *new_mem;  	new_mem->mm_node = NULL; -	old_mem->mask = save_mask; +	old_mem->proposed_flags = save_proposed_flags;  	DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);  	if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) {  		drm_ttm_unbind(ttm); -		drm_destroy_ttm(ttm); +		drm_ttm_destroy(ttm);  		bo->ttm = NULL;  	} @@ -330,7 +330,7 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,  	struct drm_bo_mem_reg *old_mem = &bo->mem;  	int ret;  	uint64_t save_flags = old_mem->flags; -	uint64_t save_mask = old_mem->mask; +	uint64_t save_proposed_flags = old_mem->proposed_flags;  	struct drm_buffer_object *old_obj;  	if (bo->fence) @@ -365,7 +365,7 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,  		if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) {  			drm_ttm_unbind(bo->ttm); -			drm_destroy_ttm(bo->ttm); +			drm_ttm_destroy(bo->ttm);  			bo->ttm = NULL;  		}  	} else { @@ -399,7 +399,7 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,  	*old_mem = *new_mem;  	new_mem->mm_node = NULL; -	old_mem->mask = save_mask; +	old_mem->proposed_flags = save_proposed_flags;  	DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);  	return 0;  } diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 1dc61fde..a2d10b5d 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -263,7 +263,8 @@ struct drm_ttm_backend;  struct drm_ttm_backend_func {  	int (*needs_ub_cache_adjust) (struct drm_ttm_backend *backend);  	int (*populate) (struct drm_ttm_backend *backend, -			 unsigned long num_pages, struct page **pages); +			 unsigned long num_pages, struct page **pages, +			 struct page *dummy_read_page);  	void (*clear) (struct drm_ttm_backend *backend);  	int (*bind) (struct drm_ttm_backend *backend,  		     struct drm_bo_mem_reg *bo_mem); @@ -297,8 +298,10 @@ struct drm_ttm {  }; -extern struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size); -extern int drm_bind_ttm(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem); +extern struct drm_ttm *drm_ttm_create(struct drm_device *dev, unsigned long size, +				      uint32_t page_flags, +				      struct page *dummy_read_page); +extern int drm_ttm_bind(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem);  extern void drm_ttm_unbind(struct drm_ttm *ttm);  extern void drm_ttm_evict(struct drm_ttm *ttm);  extern void drm_ttm_fixup_caching(struct drm_ttm *ttm); @@ -307,10 +310,8 @@ extern void drm_ttm_cache_flush(void);  extern int drm_ttm_populate(struct drm_ttm *ttm);  extern int drm_ttm_set_user(struct drm_ttm *ttm,  			    struct task_struct *tsk, -			    int write,  			    unsigned long start, -			    unsigned long num_pages, -			    struct page *dummy_read_page); +			    unsigned long num_pages);  /*   * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do @@ -318,7 +319,7 @@ extern int drm_ttm_set_user(struct drm_ttm *ttm,   * Otherwise it is called when the last vma exits.   */ -extern int drm_destroy_ttm(struct drm_ttm *ttm); +extern int drm_ttm_destroy(struct drm_ttm *ttm);  #define DRM_FLAG_MASKED(_old, _new, _mask) {\  (_old) ^= (((_old) ^ (_new)) & (_mask)); \ @@ -331,14 +332,47 @@ extern int drm_destroy_ttm(struct drm_ttm *ttm);   * Page flags.   */ +/* + * This ttm should not be cached by the CPU + */  #define DRM_TTM_PAGE_UNCACHED   (1 << 0) +/* + * This flat is not used at this time; I don't know what the + * intent was + */  #define DRM_TTM_PAGE_USED       (1 << 1) +/* + * This flat is not used at this time; I don't know what the + * intent was + */  #define DRM_TTM_PAGE_BOUND      (1 << 2) +/* + * This flat is not used at this time; I don't know what the + * intent was + */  #define DRM_TTM_PAGE_PRESENT    (1 << 3) +/* + * The array of page pointers was allocated with vmalloc + * instead of drm_calloc. + */  #define DRM_TTM_PAGE_VMALLOC    (1 << 4) +/* + * This ttm is mapped from user space + */  #define DRM_TTM_PAGE_USER       (1 << 5) -#define DRM_TTM_PAGE_USER_WRITE (1 << 6) +/* + * This ttm will be written to by the GPU + */ +#define DRM_TTM_PAGE_WRITE	(1 << 6) +/* + * This ttm was mapped to the GPU, and so the contents may have + * been modified + */  #define DRM_TTM_PAGE_USER_DIRTY (1 << 7) +/* + * This flag is not used at this time; I don't know what the + * intent was. + */  #define DRM_TTM_PAGE_USER_DMA   (1 << 8)  /*************************************************** @@ -351,16 +385,50 @@ struct drm_bo_mem_reg {  	unsigned long num_pages;  	uint32_t page_alignment;  	uint32_t mem_type; +	/* +	 * Current buffer status flags, indicating +	 * where the buffer is located and which +	 * access modes are in effect +	 */  	uint64_t flags; -	uint64_t mask; +	/** +	 * These are the flags proposed for +	 * a validate operation. If the +	 * validate succeeds, they'll get moved +	 * into the flags field +	 */ +	uint64_t proposed_flags; +	  	uint32_t desired_tile_stride;  	uint32_t hw_tile_stride;  };  enum drm_bo_type { -	drm_bo_type_dc, +	/* +	 * drm_bo_type_device are 'normal' drm allocations, +	 * pages are allocated from within the kernel automatically +	 * and the objects can be mmap'd from the drm device. Each +	 * drm_bo_type_device object has a unique name which can be +	 * used by other processes to share access to the underlying +	 * buffer. +	 */ +	drm_bo_type_device, +	/* +	 * drm_bo_type_user are buffers of pages that already exist +	 * in the process address space. They are more limited than +	 * drm_bo_type_device buffers in that they must always +	 * remain cached (as we assume the user pages are mapped cached), +	 * and they are not sharable to other processes through DRM +	 * (although, regular shared memory should still work fine). +	 */  	drm_bo_type_user, -	drm_bo_type_kernel, /* for initial kernel allocations */ +	/* +	 * drm_bo_type_kernel are buffers that exist solely for use +	 * within the kernel. The pages cannot be mapped into the +	 * process. One obvious use would be for the ring +	 * buffer where user access would not (ideally) be required. +	 */ +	drm_bo_type_kernel,  };  struct drm_buffer_object { @@ -477,9 +545,36 @@ struct drm_bo_driver {  	int (*invalidate_caches) (struct drm_device *dev, uint64_t flags);  	int (*init_mem_type) (struct drm_device *dev, uint32_t type,  			      struct drm_mem_type_manager *man); -	 uint32_t(*evict_mask) (struct drm_buffer_object *bo); +	/* +	 * evict_flags: +	 * +	 * @bo: the buffer object to be evicted +	 * +	 * Return the bo flags for a buffer which is not mapped to the hardware. +	 * These will be placed in proposed_flags so that when the move is +	 * finished, they'll end up in bo->mem.flags +	 */ +	uint64_t(*evict_flags) (struct drm_buffer_object *bo); +	/* +	 * move: +	 * +	 * @bo: the buffer to move +	 * +	 * @evict: whether this motion is evicting the buffer from +	 * the graphics address space +	 * +	 * @no_wait: whether this should give up and return -EBUSY +	 * if this move would require sleeping +	 * +	 * @new_mem: the new memory region receiving the buffer +	 * +	 * Move a buffer between two memory regions. +	 */  	int (*move) (struct drm_buffer_object *bo,  		     int evict, int no_wait, struct drm_bo_mem_reg *new_mem); +	/* +	 * ttm_cache_flush +	 */  	void (*ttm_cache_flush)(struct drm_ttm *ttm);  }; @@ -520,7 +615,7 @@ extern int drm_fence_buffer_objects(struct drm_device *dev,  				    struct drm_fence_object **used_fence);  extern void drm_bo_add_to_lru(struct drm_buffer_object *bo);  extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size, -				    enum drm_bo_type type, uint64_t mask, +				    enum drm_bo_type type, uint64_t flags,  				    uint32_t hint, uint32_t page_alignment,  				    unsigned long buffer_start,  				    struct drm_buffer_object **bo); @@ -535,9 +630,8 @@ extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type);  extern int drm_bo_init_mm(struct drm_device *dev, unsigned type,  			  unsigned long p_offset, unsigned long p_size);  extern int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle, -				  uint32_t fence_class, uint64_t flags, -				  uint64_t mask, uint32_t hint, -				  int use_old_fence_class, +				  uint64_t flags, uint64_t mask, uint32_t hint, +				  uint32_t fence_class, int use_old_fence_class,  				  struct drm_bo_info_rep *rep,  				  struct drm_buffer_object **bo_rep);  extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv, @@ -546,7 +640,6 @@ extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_  extern int drm_bo_do_validate(struct drm_buffer_object *bo,  			      uint64_t flags, uint64_t mask, uint32_t hint,  			      uint32_t fence_class, -			      int no_wait,  			      struct drm_bo_info_rep *rep);  /* diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c index 3540571f..a9d87338 100644 --- a/linux-core/drm_ttm.c +++ b/linux-core/drm_ttm.c @@ -46,7 +46,7 @@ EXPORT_SYMBOL(drm_ttm_cache_flush);   * Use kmalloc if possible. Otherwise fall back to vmalloc.   */ -static void ttm_alloc_pages(struct drm_ttm *ttm) +static void drm_ttm_alloc_pages(struct drm_ttm *ttm)  {  	unsigned long size = ttm->num_pages * sizeof(*ttm->pages);  	ttm->pages = NULL; @@ -66,7 +66,7 @@ static void ttm_alloc_pages(struct drm_ttm *ttm)  		drm_free_memctl(size);  } -static void ttm_free_pages(struct drm_ttm *ttm) +static void drm_ttm_free_pages(struct drm_ttm *ttm)  {  	unsigned long size = ttm->num_pages * sizeof(*ttm->pages); @@ -103,7 +103,7 @@ static struct page *drm_ttm_alloc_page(void)   * for range of pages in a ttm.   */ -static int drm_set_caching(struct drm_ttm *ttm, int noncached) +static int drm_ttm_set_caching(struct drm_ttm *ttm, int noncached)  {  	int i;  	struct page **cur_page; @@ -145,7 +145,7 @@ static void drm_ttm_free_user_pages(struct drm_ttm *ttm)  	int i;  	BUG_ON(!(ttm->page_flags & DRM_TTM_PAGE_USER)); -	write = ((ttm->page_flags & DRM_TTM_PAGE_USER_WRITE) != 0); +	write = ((ttm->page_flags & DRM_TTM_PAGE_WRITE) != 0);  	dirty = ((ttm->page_flags & DRM_TTM_PAGE_USER_DIRTY) != 0);  	for (i = 0; i < ttm->num_pages; ++i) { @@ -193,7 +193,7 @@ static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm)   * Free all resources associated with a ttm.   */ -int drm_destroy_ttm(struct drm_ttm *ttm) +int drm_ttm_destroy(struct drm_ttm *ttm)  {  	struct drm_ttm_backend *be; @@ -208,14 +208,14 @@ int drm_destroy_ttm(struct drm_ttm *ttm)  	if (ttm->pages) {  		if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) -			drm_set_caching(ttm, 0); +			drm_ttm_set_caching(ttm, 0);  		if (ttm->page_flags & DRM_TTM_PAGE_USER)  			drm_ttm_free_user_pages(ttm);  		else  			drm_ttm_free_alloced_pages(ttm); -		ttm_free_pages(ttm); +		drm_ttm_free_pages(ttm);  	}  	drm_ctl_free(ttm, sizeof(*ttm), DRM_MEM_TTM); @@ -239,23 +239,33 @@ struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index)  }  EXPORT_SYMBOL(drm_ttm_get_page); +/** + * drm_ttm_set_user: + * + * @ttm: the ttm to map pages to. This must always be + * a freshly created ttm. + * + * @tsk: a pointer to the address space from which to map + * pages. + *  + * @write: a boolean indicating that write access is desired + * + * start: the starting address + * + * Map a range of user addresses to a new ttm object. This + * provides access to user memory from the graphics device. + */  int drm_ttm_set_user(struct drm_ttm *ttm,  		     struct task_struct *tsk, -		     int write,  		     unsigned long start, -		     unsigned long num_pages, -		     struct page *dummy_read_page) +		     unsigned long num_pages)  {  	struct mm_struct *mm = tsk->mm;  	int ret; -	int i; +	int write = (ttm->page_flags & DRM_TTM_PAGE_WRITE) != 0;  	BUG_ON(num_pages != ttm->num_pages); - -	ttm->dummy_read_page = dummy_read_page; -	ttm->page_flags |= DRM_TTM_PAGE_USER | -		((write) ? DRM_TTM_PAGE_USER_WRITE : 0); - +	BUG_ON((ttm->page_flags & DRM_TTM_PAGE_USER) == 0);  	down_read(&mm->mmap_sem);  	ret = get_user_pages(tsk, mm, start, num_pages, @@ -267,14 +277,17 @@ int drm_ttm_set_user(struct drm_ttm *ttm,  		return -ENOMEM;  	} -	for (i = 0; i < num_pages; ++i) { -		if (ttm->pages[i] == NULL) -			ttm->pages[i] = ttm->dummy_read_page; -	} -  	return 0;  } +/** + * drm_ttm_populate: + * + * @ttm: the object to allocate pages for + * + * Allocate pages for all unset page entries, then + * call the backend to create the hardware mappings + */  int drm_ttm_populate(struct drm_ttm *ttm)  {  	struct page *page; @@ -285,21 +298,32 @@ int drm_ttm_populate(struct drm_ttm *ttm)  		return 0;  	be = ttm->be; -	for (i = 0; i < ttm->num_pages; ++i) { -		page = drm_ttm_get_page(ttm, i); -		if (!page) -			return -ENOMEM; +	if (ttm->page_flags & DRM_TTM_PAGE_WRITE) { +		for (i = 0; i < ttm->num_pages; ++i) { +			page = drm_ttm_get_page(ttm, i); +			if (!page) +				return -ENOMEM; +		}  	} -	be->func->populate(be, ttm->num_pages, ttm->pages); +	be->func->populate(be, ttm->num_pages, ttm->pages, ttm->dummy_read_page);  	ttm->state = ttm_unbound;  	return 0;  } -/* - * Initialize a ttm. +/** + * drm_ttm_create: + * + * @dev: the drm_device + * + * @size: The size (in bytes) of the desired object + * + * @page_flags: various DRM_TTM_PAGE_* flags. See drm_object.h. + * + * Allocate and initialize a ttm, leaving it unpopulated at this time   */ -struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size) +struct drm_ttm *drm_ttm_create(struct drm_device *dev, unsigned long size, +			       uint32_t page_flags, struct page *dummy_read_page)  {  	struct drm_bo_driver *bo_driver = dev->driver->bo_driver;  	struct drm_ttm *ttm; @@ -317,21 +341,23 @@ struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size)  	ttm->destroy = 0;  	ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; -	ttm->page_flags = 0; +	ttm->page_flags = page_flags; + +	ttm->dummy_read_page = dummy_read_page;  	/*  	 * Account also for AGP module memory usage.  	 */ -	ttm_alloc_pages(ttm); +	drm_ttm_alloc_pages(ttm);  	if (!ttm->pages) { -		drm_destroy_ttm(ttm); +		drm_ttm_destroy(ttm);  		DRM_ERROR("Failed allocating page table\n");  		return NULL;  	}  	ttm->be = bo_driver->create_ttm_backend_entry(dev);  	if (!ttm->be) { -		drm_destroy_ttm(ttm); +		drm_ttm_destroy(ttm);  		DRM_ERROR("Failed creating ttm backend entry\n");  		return NULL;  	} @@ -339,10 +365,15 @@ struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size)  	return ttm;  } -/* - * Unbind a ttm region from the aperture. +/** + * drm_ttm_evict: + * + * @ttm: the object to be unbound from the aperture. + * + * Transition a ttm from bound to evicted, where it + * isn't present in the aperture, but various caches may + * not be consistent.   */ -  void drm_ttm_evict(struct drm_ttm *ttm)  {  	struct drm_ttm_backend *be = ttm->be; @@ -356,17 +387,33 @@ void drm_ttm_evict(struct drm_ttm *ttm)  	ttm->state = ttm_evicted;  } +/** + * drm_ttm_fixup_caching: + * + * @ttm: the object to set unbound + * + * XXX this function is misnamed. Transition a ttm from evicted to + * unbound, flushing caches as appropriate. + */  void drm_ttm_fixup_caching(struct drm_ttm *ttm)  {  	if (ttm->state == ttm_evicted) {  		struct drm_ttm_backend *be = ttm->be;  		if (be->func->needs_ub_cache_adjust(be)) -			drm_set_caching(ttm, 0); +			drm_ttm_set_caching(ttm, 0);  		ttm->state = ttm_unbound;  	}  } +/** + * drm_ttm_unbind: + * + * @ttm: the object to unbind from the graphics device + * + * Unbind an object from the aperture. This removes the mappings + * from the graphics device and flushes caches if necessary. + */  void drm_ttm_unbind(struct drm_ttm *ttm)  {  	if (ttm->state == ttm_bound) @@ -375,7 +422,19 @@ void drm_ttm_unbind(struct drm_ttm *ttm)  	drm_ttm_fixup_caching(ttm);  } -int drm_bind_ttm(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem) +/** + * drm_ttm_bind: + * + * @ttm: the ttm object to bind to the graphics device + * + * @bo_mem: the aperture memory region which will hold the object + * + * Bind a ttm object to the aperture. This ensures that the necessary + * pages are allocated, flushes CPU caches as needed and marks the + * ttm as DRM_TTM_PAGE_USER_DIRTY to indicate that it may have been + * modified by the GPU + */ +int drm_ttm_bind(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem)  {  	struct drm_bo_driver *bo_driver = ttm->dev->driver->bo_driver;  	int ret = 0; @@ -393,7 +452,7 @@ int drm_bind_ttm(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem)  		return ret;  	if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED)) -		drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED); +		drm_ttm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);  	else if ((bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED) &&  		   bo_driver->ttm_cache_flush)  		bo_driver->ttm_cache_flush(ttm); @@ -410,4 +469,4 @@ int drm_bind_ttm(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem)  		ttm->page_flags |= DRM_TTM_PAGE_USER_DIRTY;  	return 0;  } -EXPORT_SYMBOL(drm_bind_ttm); +EXPORT_SYMBOL(drm_ttm_bind); diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index f2681cc9..dcefcb34 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -751,10 +751,10 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,  	 */  	if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) { -		uint32_t new_mask = bo->mem.mask | +		uint32_t new_flags = bo->mem.proposed_flags |  			DRM_BO_FLAG_MAPPABLE |  			DRM_BO_FLAG_FORCE_MAPPABLE; -		err = drm_bo_move_buffer(bo, new_mask, 0, 0); +		err = drm_bo_move_buffer(bo, new_flags, 0, 0);  		if (err) {  			ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;  			goto out_unlock; diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index b000a725..3dd236dd 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -38,11 +38,11 @@ struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev)  	return drm_agp_init_ttm(dev);  } -int i915_fence_types(struct drm_buffer_object *bo, +int i915_fence_type(struct drm_buffer_object *bo,  		     uint32_t *fclass,  		     uint32_t *type)  { -	if (bo->mem.mask & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) +	if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))  		*type = 3;  	else  		*type = 1; @@ -110,7 +110,16 @@ int i915_init_mem_type(struct drm_device *dev, uint32_t type,  	return 0;  } -uint32_t i915_evict_mask(struct drm_buffer_object *bo) +/* + * i915_evict_flags: + * + * @bo: the buffer object to be evicted + * + * Return the bo flags for a buffer which is not mapped to the hardware. + * These will be placed in proposed_flags so that when the move is + * finished, they'll end up in bo->mem.flags + */ +uint64_t i915_evict_flags(struct drm_buffer_object *bo)  {  	switch (bo->mem.mem_type) {  	case DRM_BO_MEM_LOCAL: diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c index d2bcf4b5..d3a4ab35 100644 --- a/linux-core/i915_drv.c +++ b/linux-core/i915_drv.c @@ -61,10 +61,10 @@ static struct drm_bo_driver i915_bo_driver = {  	.num_mem_type_prio = sizeof(i915_mem_prios)/sizeof(uint32_t),  	.num_mem_busy_prio = sizeof(i915_busy_prios)/sizeof(uint32_t),  	.create_ttm_backend_entry = i915_create_ttm_backend_entry, -	.fence_type = i915_fence_types, +	.fence_type = i915_fence_type,  	.invalidate_caches = i915_invalidate_caches,  	.init_mem_type = i915_init_mem_type, -	.evict_mask = i915_evict_mask, +	.evict_flags = i915_evict_flags,  	.move = i915_move,  	.ttm_cache_flush = i915_flush_ttm,  }; diff --git a/linux-core/i915_ioc32.c b/linux-core/i915_ioc32.c index 11dee035..0b8fff19 100644 --- a/linux-core/i915_ioc32.c +++ b/linux-core/i915_ioc32.c @@ -34,6 +34,7 @@  #include "drmP.h"  #include "drm.h"  #include "i915_drm.h" +#include "i915_drv.h"  typedef struct _drm_i915_batchbuffer32 {  	int start;		/* agp offset */ @@ -182,13 +183,73 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,  			 DRM_IOCTL_I915_ALLOC, (unsigned long) request);  } +typedef struct drm_i915_execbuffer32 { +	uint64_t ops_list; +	uint32_t num_buffers; +	struct _drm_i915_batchbuffer32 batch; +	drm_context_t context;  +	struct drm_fence_arg fence_arg; +} drm_i915_execbuffer32_t; + +static int compat_i915_execbuffer(struct file *file, unsigned int cmd, +			     unsigned long arg) +{ +	drm_i915_execbuffer32_t req32; +	struct drm_i915_execbuffer __user *request; +	int err; + +	if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) +		return -EFAULT; + +	request = compat_alloc_user_space(sizeof(*request)); + +	if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) +       || __put_user(req32.ops_list, &request->ops_list) +       || __put_user(req32.num_buffers, &request->num_buffers) +       || __put_user(req32.context, &request->context) +       || __copy_to_user(&request->fence_arg, &req32.fence_arg,  +                         sizeof(req32.fence_arg)) +       || __put_user(req32.batch.start, &request->batch.start) +       || __put_user(req32.batch.used, &request->batch.used) +       || __put_user(req32.batch.DR1, &request->batch.DR1) +       || __put_user(req32.batch.DR4, &request->batch.DR4) +       || __put_user(req32.batch.num_cliprects, +                     &request->batch.num_cliprects) +       || __put_user((int __user *)(unsigned long)req32.batch.cliprects, +                     &request->batch.cliprects)) +		return -EFAULT; + +	err = drm_ioctl(file->f_dentry->d_inode, file, +			 DRM_IOCTL_I915_EXECBUFFER, (unsigned long)request); + +	if (err) +		return err; + +	if (__get_user(req32.fence_arg.handle, &request->fence_arg.handle) +	    || __get_user(req32.fence_arg.fence_class, &request->fence_arg.fence_class) +	    || __get_user(req32.fence_arg.type, &request->fence_arg.type) +	    || __get_user(req32.fence_arg.flags, &request->fence_arg.flags) +	    || __get_user(req32.fence_arg.signaled, &request->fence_arg.signaled) +	    || __get_user(req32.fence_arg.error, &request->fence_arg.error) +	    || __get_user(req32.fence_arg.sequence, &request->fence_arg.sequence)) +		return -EFAULT; + +	if (copy_to_user((void __user *)arg, &req32, sizeof(req32))) +		return -EFAULT; + +	return 0; +} +  drm_ioctl_compat_t *i915_compat_ioctls[] = {  	[DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,  	[DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,  	[DRM_I915_GETPARAM] = compat_i915_getparam,  	[DRM_I915_IRQ_EMIT] = compat_i915_irq_emit, -	[DRM_I915_ALLOC] = compat_i915_alloc +	[DRM_I915_ALLOC] = compat_i915_alloc, +#ifdef I915_HAVE_BUFFER +	[DRM_I915_EXECBUFFER] = compat_i915_execbuffer, +#endif  };  /** diff --git a/linux-core/nouveau_buffer.c b/linux-core/nouveau_buffer.c index 9b252a05..a652bb1d 100644 --- a/linux-core/nouveau_buffer.c +++ b/linux-core/nouveau_buffer.c @@ -56,7 +56,7 @@ nouveau_bo_fence_type(struct drm_buffer_object *bo,  {  	/* When we get called, *fclass is set to the requested fence class */ -	if (bo->mem.mask & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) +	if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))  		*type = 3;  	else  		*type = 1; @@ -130,8 +130,8 @@ nouveau_bo_init_mem_type(struct drm_device *dev, uint32_t type,  	return 0;  } -static uint32_t -nouveau_bo_evict_mask(struct drm_buffer_object *bo) +static uint64_t +nouveau_bo_evict_flags(struct drm_buffer_object *bo)  {  	switch (bo->mem.mem_type) {  	case DRM_BO_MEM_LOCAL: @@ -207,15 +207,16 @@ nouveau_bo_move_gart(struct drm_buffer_object *bo, int evict, int no_wait,          tmp_mem = *new_mem;          tmp_mem.mm_node = NULL; -        tmp_mem.mask = DRM_BO_FLAG_MEM_TT | -                DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING; +        tmp_mem.proposed_flags = (DRM_BO_FLAG_MEM_TT | +				  DRM_BO_FLAG_CACHED | +				  DRM_BO_FLAG_FORCE_CACHING);          ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);          if (ret)                  return ret; -        ret = drm_bind_ttm(bo->ttm, &tmp_mem); +        ret = drm_ttm_bind (bo->ttm, &tmp_mem);          if (ret)                  goto out_cleanup; @@ -291,7 +292,7 @@ struct drm_bo_driver nouveau_bo_driver = {  	.fence_type = nouveau_bo_fence_type,  	.invalidate_caches = nouveau_bo_invalidate_caches,  	.init_mem_type = nouveau_bo_init_mem_type, -	.evict_mask = nouveau_bo_evict_mask, +	.evict_flags = nouveau_bo_evict_flags,  	.move = nouveau_bo_move,  	.ttm_cache_flush= nouveau_bo_flush_ttm  }; diff --git a/linux-core/nouveau_sgdma.c b/linux-core/nouveau_sgdma.c index f3bf5341..6c61819f 100644 --- a/linux-core/nouveau_sgdma.c +++ b/linux-core/nouveau_sgdma.c @@ -25,7 +25,7 @@ nouveau_sgdma_needs_ub_cache_adjust(struct drm_ttm_backend *be)  static int  nouveau_sgdma_populate(struct drm_ttm_backend *be, unsigned long num_pages, -		       struct page **pages) +		       struct page **pages, struct page *dummy_read_page)  {  	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;  	int p, d, o; @@ -41,8 +41,11 @@ nouveau_sgdma_populate(struct drm_ttm_backend *be, unsigned long num_pages,  	nvbe->pages_populated = d = 0;  	for (p = 0; p < num_pages; p++) {  		for (o = 0; o < PAGE_SIZE; o += NV_CTXDMA_PAGE_SIZE) { +			struct page *page = pages[p]; +			if (!page) +				page = dummy_read_page;  			nvbe->pagelist[d] = pci_map_page(nvbe->dev->pdev, -							 pages[p], o, +							 page, o,  							 NV_CTXDMA_PAGE_SIZE,  							 PCI_DMA_BIDIRECTIONAL);  			if (pci_dma_mapping_error(nvbe->pagelist[d])) { @@ -299,7 +302,7 @@ nouveau_sgdma_nottm_hack_init(struct drm_device *dev)  	}  	dev_priv->gart_info.sg_handle = sgreq.handle; -	if ((ret = be->func->populate(be, dev->sg->pages, dev->sg->pagelist))) { +	if ((ret = be->func->populate(be, dev->sg->pages, dev->sg->pagelist, dev->bm.dummy_read_page))) {  		DRM_ERROR("failed populate: %d\n", ret);  		return ret;  	} diff --git a/linux-core/via_buffer.c b/linux-core/via_buffer.c index ea755247..532fae6a 100644 --- a/linux-core/via_buffer.c +++ b/linux-core/via_buffer.c @@ -144,7 +144,7 @@ int via_init_mem_type(struct drm_device * dev, uint32_t type,  	return 0;  } -uint32_t via_evict_mask(struct drm_buffer_object *bo) +uint64_t via_evict_flags(struct drm_buffer_object *bo)  {  	switch (bo->mem.mem_type) {  	case DRM_BO_MEM_LOCAL: diff --git a/shared-core/drm.h b/shared-core/drm.h index ec07b895..6b4125f2 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -662,6 +662,10 @@ struct drm_fence_arg {  #define DRM_BO_FLAG_EXE         (1ULL << 2)  /* + * All of the bits related to access mode + */ +#define DRM_BO_MASK_ACCESS	(DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE) +/*   * Status flags. Can be read to determine the actual state of a buffer.   * Can also be set in the buffer mask before validation.   */ @@ -741,18 +745,50 @@ struct drm_fence_arg {  #define DRM_BO_FLAG_MEM_PRIV4  (1ULL << 31)  /* We can add more of these now with a 64-bit flag type */ -/* Memory flag mask */ +/* + * This is a mask covering all of the memory type flags; easier to just + * use a single constant than a bunch of | values. It covers + * DRM_BO_FLAG_MEM_LOCAL through DRM_BO_FLAG_MEM_PRIV4 + */  #define DRM_BO_MASK_MEM         0x00000000FF000000ULL -#define DRM_BO_MASK_MEMTYPE     0x00000000FF0800A0ULL - +/* + * This adds all of the CPU-mapping options in with the memory + * type to label all bits which change how the page gets mapped + */ +#define DRM_BO_MASK_MEMTYPE     (DRM_BO_MASK_MEM | \ +				 DRM_BO_FLAG_CACHED_MAPPED | \ +				 DRM_BO_FLAG_CACHED | \ +				 DRM_BO_FLAG_MAPPABLE) +				   /* Driver-private flags */  #define DRM_BO_MASK_DRIVER      0xFFFF000000000000ULL -/* Don't block on validate and map */ +/* + * Don't block on validate and map. Instead, return EBUSY. + */  #define DRM_BO_HINT_DONT_BLOCK  0x00000002 -/* Don't place this buffer on the unfenced list.*/ +/* + * Don't place this buffer on the unfenced list. This means + * that the buffer will not end up having a fence associated + * with it as a result of this operation + */  #define DRM_BO_HINT_DONT_FENCE  0x00000004 +/* + * Sleep while waiting for the operation to complete. + * Without this flag, the kernel will, instead, spin + * until this operation has completed. I'm not sure + * why you would ever want this, so please always + * provide DRM_BO_HINT_WAIT_LAZY to any operation + * which may block + */  #define DRM_BO_HINT_WAIT_LAZY   0x00000008 +/* + * The client has compute relocations refering to this buffer using the + * offset in the presumed_offset field. If that offset ends up matching + * where this buffer lands, the kernel is free to skip executing those + * relocations + */ +#define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010  #define DRM_BO_INIT_MAGIC 0xfe769812  #define DRM_BO_INIT_MAJOR 1 @@ -769,10 +805,11 @@ struct drm_bo_info_req {  	unsigned int desired_tile_stride;  	unsigned int tile_info;  	unsigned int pad64; +	uint64_t presumed_offset;  };  struct drm_bo_create_req { -	uint64_t mask; +	uint64_t flags;  	uint64_t size;  	uint64_t buffer_start;  	unsigned int hint; @@ -788,7 +825,7 @@ struct drm_bo_create_req {  struct drm_bo_info_rep {  	uint64_t flags; -	uint64_t mask; +	uint64_t proposed_flags;  	uint64_t size;  	uint64_t offset;  	uint64_t arg_handle; diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index 42114beb..df395ba7 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -702,7 +702,14 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,  	return 0;  } +#if DRM_DEBUG_CODE +#define DRM_DEBUG_RELOCATION	(drm_debug != 0) +#else +#define DRM_DEBUG_RELOCATION	0 +#endif +  #ifdef I915_HAVE_BUFFER +  struct i915_relocatee_info {  	struct drm_buffer_object *buf;  	unsigned long offset; @@ -712,15 +719,20 @@ struct i915_relocatee_info {  	int is_iomem;  }; -static void i915_dereference_buffers_locked(struct drm_buffer_object **buffers, +struct drm_i915_validate_buffer { +	struct drm_buffer_object *buffer; +	int presumed_offset_correct; +}; + +static void i915_dereference_buffers_locked(struct drm_i915_validate_buffer *buffers,  					    unsigned num_buffers)  {  	while (num_buffers--) -		drm_bo_usage_deref_locked(&buffers[num_buffers]); +		drm_bo_usage_deref_locked(&buffers[num_buffers].buffer);  }  int i915_apply_reloc(struct drm_file *file_priv, int num_buffers, -		     struct drm_buffer_object **buffers, +		     struct drm_i915_validate_buffer *buffers,  		     struct i915_relocatee_info *relocatee,  		     uint32_t *reloc)  { @@ -734,11 +746,25 @@ int i915_apply_reloc(struct drm_file *file_priv, int num_buffers,  		return -EINVAL;  	} +	/* +	 * Short-circuit relocations that were correctly +	 * guessed by the client +	 */ +	if (buffers[reloc[2]].presumed_offset_correct && !DRM_DEBUG_RELOCATION) +		return 0; +  	new_cmd_offset = reloc[0];  	if (!relocatee->data_page ||  	    !drm_bo_same_page(relocatee->offset, new_cmd_offset)) {  		drm_bo_kunmap(&relocatee->kmap);  		relocatee->offset = new_cmd_offset; +		mutex_lock (&relocatee->buf->mutex); +		ret = drm_bo_wait (relocatee->buf, 0, 0, FALSE); +		mutex_unlock (&relocatee->buf->mutex); +		if (ret) { +			DRM_ERROR("Could not wait for buffer to apply relocs\n %08lx", new_cmd_offset); +			return ret; +		}  		ret = drm_bo_kmap(relocatee->buf, new_cmd_offset >> PAGE_SHIFT,  				  1, &relocatee->kmap);  		if (ret) { @@ -751,12 +777,19 @@ int i915_apply_reloc(struct drm_file *file_priv, int num_buffers,  		relocatee->page_offset = (relocatee->offset & PAGE_MASK);  	} -	val = buffers[reloc[2]]->offset; +	val = buffers[reloc[2]].buffer->offset;  	index = (reloc[0] - relocatee->page_offset) >> 2;  	/* add in validate */  	val = val + reloc[1]; +	if (DRM_DEBUG_RELOCATION) { +		if (buffers[reloc[2]].presumed_offset_correct && +		    relocatee->data_page[index] != val) { +			DRM_DEBUG ("Relocation mismatch source %d target %d buffer %d user %08x kernel %08x\n", +				   reloc[0], reloc[1], reloc[2], relocatee->data_page[index], val); +		} +	}  	relocatee->data_page[index] = val;  	return 0;  } @@ -765,7 +798,7 @@ int i915_process_relocs(struct drm_file *file_priv,  			uint32_t buf_handle,  			uint32_t *reloc_buf_handle,  			struct i915_relocatee_info *relocatee, -			struct drm_buffer_object **buffers, +			struct drm_i915_validate_buffer *buffers,  			uint32_t num_buffers)  {  	struct drm_device *dev = file_priv->head->dev; @@ -851,12 +884,27 @@ out:  static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle,  			   drm_handle_t buf_reloc_handle, -			   struct drm_buffer_object **buffers, +			   struct drm_i915_validate_buffer *buffers,  			   uint32_t buf_count)  {  	struct drm_device *dev = file_priv->head->dev;  	struct i915_relocatee_info relocatee;  	int ret = 0; +	int b; + +	/* +	 * Short circuit relocations when all previous +	 * buffers offsets were correctly guessed by +	 * the client +	 */ +	if (!DRM_DEBUG_RELOCATION) { +		for (b = 0; b < buf_count; b++) +			if (!buffers[b].presumed_offset_correct) +				break; +	 +		if (b == buf_count) +			return 0; +	}  	memset(&relocatee, 0, sizeof(relocatee)); @@ -890,7 +938,7 @@ out_err:   */  int i915_validate_buffer_list(struct drm_file *file_priv,  			      unsigned int fence_class, uint64_t data, -			      struct drm_buffer_object **buffers, +			      struct drm_i915_validate_buffer *buffers,  			      uint32_t *num_buffers)  {  	struct drm_i915_op_arg arg; @@ -910,7 +958,8 @@ int i915_validate_buffer_list(struct drm_file *file_priv,  			goto out_err;  		} -		buffers[buf_count] = NULL; +		buffers[buf_count].buffer = NULL; +		buffers[buf_count].presumed_offset_correct = 0;  		if (copy_from_user(&arg, (void __user *)(unsigned long)data, sizeof(arg))) {  			ret = -EFAULT; @@ -920,7 +969,7 @@ int i915_validate_buffer_list(struct drm_file *file_priv,  		if (arg.handled) {  			data = arg.next;  			mutex_lock(&dev->struct_mutex); -			buffers[buf_count] = drm_lookup_buffer_object(file_priv, req->arg_handle, 1); +			buffers[buf_count].buffer = drm_lookup_buffer_object(file_priv, req->arg_handle, 1);  			mutex_unlock(&dev->struct_mutex);  			buf_count++;  			continue; @@ -945,19 +994,25 @@ int i915_validate_buffer_list(struct drm_file *file_priv,  		}  		rep.ret = drm_bo_handle_validate(file_priv, req->bo_req.handle, -						 req->bo_req.fence_class, -						 req->bo_req.flags, -						 req->bo_req.mask, +						 req->bo_req.flags, req->bo_req.mask,  						 req->bo_req.hint, -						 0, +						 req->bo_req.fence_class, 0,  						 &rep.bo_info, -						 &buffers[buf_count]); +						 &buffers[buf_count].buffer);  		if (rep.ret) {  			DRM_ERROR("error on handle validate %d\n", rep.ret);  			goto out_err;  		} - +		/* +		 * If the user provided a presumed offset hint, check whether +		 * the buffer is in the same place, if so, relocations relative to +		 * this buffer need not be performed +		 */ +		if ((req->bo_req.hint & DRM_BO_HINT_PRESUMED_OFFSET) && +		    buffers[buf_count].buffer->offset == req->bo_req.presumed_offset) { +			buffers[buf_count].presumed_offset_correct = 1; +		}  		next = arg.next;  		arg.handled = 1; @@ -991,7 +1046,7 @@ static int i915_execbuffer(struct drm_device *dev, void *data,  	struct drm_fence_arg *fence_arg = &exec_buf->fence_arg;  	int num_buffers;  	int ret; -	struct drm_buffer_object **buffers; +	struct drm_i915_validate_buffer *buffers;  	struct drm_fence_object *fence;  	if (!dev_priv->allow_batchbuffer) { @@ -1026,7 +1081,7 @@ static int i915_execbuffer(struct drm_device *dev, void *data,  	num_buffers = exec_buf->num_buffers; -	buffers = drm_calloc(num_buffers, sizeof(struct drm_buffer_object *), DRM_MEM_DRIVER); +	buffers = drm_calloc(num_buffers, sizeof(struct drm_i915_validate_buffer), DRM_MEM_DRIVER);  	if (!buffers) {  		drm_bo_read_unlock(&dev->bm.bm_lock);  		mutex_unlock(&dev_priv->cmdbuf_mutex); @@ -1044,7 +1099,7 @@ static int i915_execbuffer(struct drm_device *dev, void *data,  	drm_agp_chipset_flush(dev);  	/* submit buffer */ -	batch->start = buffers[num_buffers-1]->offset; +	batch->start = buffers[num_buffers-1].buffer->offset;  	DRM_DEBUG("i915 exec batchbuffer, start %x used %d cliprects %d\n",  		  batch->start, batch->used, batch->num_cliprects); diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index cb336659..2bdd4b79 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -57,10 +57,11 @@   * 1.9: Usable page flipping and triple buffering   * 1.10: Plane/pipe disentangling   * 1.11: TTM superioctl + * 1.12: TTM relocation optimization   */  #define DRIVER_MAJOR		1  #if defined(I915_HAVE_FENCE) && defined(I915_HAVE_BUFFER) -#define DRIVER_MINOR		11 +#define DRIVER_MINOR		12  #else  #define DRIVER_MINOR		6  #endif @@ -302,12 +303,12 @@ extern int i915_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t f  #ifdef I915_HAVE_BUFFER  /* i915_buffer.c */  extern struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev); -extern int i915_fence_types(struct drm_buffer_object *bo, uint32_t *fclass, -			    uint32_t *type); +extern int i915_fence_type(struct drm_buffer_object *bo, uint32_t *fclass, +			   uint32_t *type);  extern int i915_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);  extern int i915_init_mem_type(struct drm_device *dev, uint32_t type,  			       struct drm_mem_type_manager *man); -extern uint32_t i915_evict_mask(struct drm_buffer_object *bo); +extern uint64_t i915_evict_flags(struct drm_buffer_object *bo);  extern int i915_move(struct drm_buffer_object *bo, int evict,  		int no_wait, struct drm_bo_mem_reg *new_mem);  void i915_flush_ttm(struct drm_ttm *ttm); diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c index ee7c40b5..7e3d3f3b 100644 --- a/shared-core/i915_irq.c +++ b/shared-core/i915_irq.c @@ -719,7 +719,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,  	DRM_SPINLOCK_IRQSAVE(&dev_priv->swaps_lock, irqflags); -	list_add_tail((struct list_head *)vbl_swap, &dev_priv->vbl_swaps.head); +	list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);  	dev_priv->swaps_pending++;  	DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags); diff --git a/shared-core/mga_dma.c b/shared-core/mga_dma.c index 67236b2d..00154b23 100644 --- a/shared-core/mga_dma.c +++ b/shared-core/mga_dma.c @@ -997,7 +997,7 @@ static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)  		}  	} -	return 0; +	return err;  }  int mga_dma_init(struct drm_device *dev, void *data, diff --git a/shared-core/nv20_graph.c b/shared-core/nv20_graph.c index 81501ed7..a21fde71 100644 --- a/shared-core/nv20_graph.c +++ b/shared-core/nv20_graph.c @@ -432,19 +432,6 @@ static void nv30_31_graph_context_init(struct drm_device *dev,  	INSTANCE_WR(ctx, 0x385c/4, 0x3f800000);  	INSTANCE_WR(ctx, 0x3864/4, 0xbf800000);  	INSTANCE_WR(ctx, 0x386c/4, 0xbf800000); - -	/* nv30gl stuff */ -	for (i=0; i<8; i++) { -		INSTANCE_WR(ctx, (0x4dfc/4)+i, 0x001c527d); -	} -	INSTANCE_WR(ctx, 0x4e3c/4, 0x001c527c); -/* these ones make dma fifo hang -	INSTANCE_WR(ctx, 0x567c/4, 0x000a0000); -	INSTANCE_WR(ctx, 0x0878/4, 0x01000000); -	INSTANCE_WR(ctx, 0x02f4/4, 0x0001ffff); - -	INSTANCE_WR(ctx, 0x0028/4, INSTANCE_RD(ctx, 0x0028/4) | 1); -*/  }  static void nv34_graph_context_init(struct drm_device *dev, diff --git a/shared-core/via_drv.c b/shared-core/via_drv.c index 9f099555..6528a7c1 100644 --- a/shared-core/via_drv.c +++ b/shared-core/via_drv.c @@ -74,7 +74,7 @@ static struct drm_bo_driver via_bo_driver = {  	.fence_type = via_fence_types,  	.invalidate_caches = via_invalidate_caches,  	.init_mem_type = via_init_mem_type, -	.evict_mask = via_evict_mask, +	.evict_flags = via_evict_flags,  	.move = NULL,  };  #endif diff --git a/shared-core/via_drv.h b/shared-core/via_drv.h index d6da8bd9..39aedb1d 100644 --- a/shared-core/via_drv.h +++ b/shared-core/via_drv.h @@ -211,7 +211,7 @@ extern int via_fence_types(struct drm_buffer_object *bo, uint32_t *fclass,  extern int via_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);  extern int via_init_mem_type(struct drm_device *dev, uint32_t type,  			       struct drm_mem_type_manager *man); -extern uint32_t via_evict_mask(struct drm_buffer_object *bo); +extern uint64_t via_evict_flags(struct drm_buffer_object *bo);  extern int via_move(struct drm_buffer_object *bo, int evict,  		int no_wait, struct drm_bo_mem_reg *new_mem);  #endif  | 
