diff options
Diffstat (limited to 'linux-core')
| -rw-r--r-- | linux-core/drm_agpsupport.c | 15 | ||||
| -rw-r--r-- | linux-core/drm_bo.c | 220 | ||||
| -rw-r--r-- | linux-core/drm_bo_move.c | 196 | ||||
| -rw-r--r-- | linux-core/drm_compat.h | 5 | ||||
| -rw-r--r-- | linux-core/drm_fence.c | 183 | ||||
| -rw-r--r-- | linux-core/drm_object.c | 4 | ||||
| -rw-r--r-- | linux-core/drm_objects.h | 148 | ||||
| -rw-r--r-- | linux-core/drm_ttm.c | 12 | ||||
| -rw-r--r-- | linux-core/i915_buffer.c | 9 | ||||
| -rw-r--r-- | linux-core/i915_fence.c | 7 | ||||
| -rw-r--r-- | linux-core/nouveau_sgdma.c | 16 | ||||
| -rw-r--r-- | linux-core/via_buffer.c | 3 | ||||
| -rw-r--r-- | linux-core/via_fence.c | 3 | ||||
| -rw-r--r-- | linux-core/xgi_fence.c | 2 | 
14 files changed, 618 insertions, 205 deletions
| diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c index 4618823c..b68efc64 100644 --- a/linux-core/drm_agpsupport.c +++ b/linux-core/drm_agpsupport.c @@ -535,23 +535,23 @@ static int drm_agp_populate(struct drm_ttm_backend *backend, unsigned long num_p  }  static int drm_agp_bind_ttm(struct drm_ttm_backend *backend, -			    unsigned long offset, -			    int cached) +			    struct drm_bo_mem_reg *bo_mem)  { -	struct drm_agp_ttm_backend *agp_be =  +	struct drm_agp_ttm_backend *agp_be =  		container_of(backend, struct drm_agp_ttm_backend, backend);  	DRM_AGP_MEM *mem = agp_be->mem;  	int ret;  	DRM_DEBUG("drm_agp_bind_ttm\n");  	mem->is_flushed = TRUE; -	mem->type = (cached) ? AGP_USER_CACHED_MEMORY :  +	mem->type = (bo_mem->flags & DRM_BO_FLAG_CACHED) ? AGP_USER_CACHED_MEMORY :  		AGP_USER_MEMORY; -	ret = drm_agp_bind_memory(mem, offset); +	ret = drm_agp_bind_memory(mem, bo_mem->mm_node->start);  	if (ret) {  		DRM_ERROR("AGP Bind memory failed\n");  	} -	DRM_FLAG_MASKED(backend->flags, (cached) ? DRM_BE_FLAG_BOUND_CACHED : 0, +	DRM_FLAG_MASKED(backend->flags, (bo_mem->flags & DRM_BO_FLAG_CACHED) ? +			DRM_BE_FLAG_BOUND_CACHED : 0,  			DRM_BE_FLAG_BOUND_CACHED);  	return ret;  } @@ -643,7 +643,8 @@ struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev)  	agp_be->bridge = dev->agp->bridge;  	agp_be->populated = FALSE;  	agp_be->backend.func = &agp_ttm_backend; -	agp_be->backend.mem_type = DRM_BO_MEM_TT; +	//	agp_be->backend.mem_type = DRM_BO_MEM_TT; +	agp_be->backend.dev = dev;  	return &agp_be->backend;  } diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index a2f66dc6..4e735770 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -142,12 +142,8 @@ static int drm_bo_add_ttm(struct drm_buffer_object * bo)  	switch (bo->type) {  	case drm_bo_type_dc: -		bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT); -		if (!bo->ttm) -			ret = -ENOMEM; -		break;  	case drm_bo_type_kernel: -		bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT); +		bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);  		if (!bo->ttm)  			ret = -ENOMEM;  		break; @@ -175,7 +171,8 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,  	struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];  	int ret = 0; -	if (old_is_pci || new_is_pci) +	if (old_is_pci || new_is_pci || +	    ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))  		ret = drm_bo_vm_pre_move(bo, old_is_pci);  	if (ret)  		return ret; @@ -190,9 +187,7 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,  			goto out_err;  		if (mem->mem_type != DRM_BO_MEM_LOCAL) { -			ret = drm_bind_ttm(bo->ttm, new_man->flags & -					   DRM_BO_FLAG_CACHED, -					   mem->mm_node->start); +			ret = drm_bind_ttm(bo->ttm, mem);  			if (ret)  				goto out_err;  		} @@ -242,7 +237,9 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,  			_DRM_BO_FLAG_EVICTED);  	if (bo->mem.mm_node) -		bo->offset = bo->mem.mm_node->start << PAGE_SHIFT; +		bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) + +			bm->man[bo->mem.mem_type].gpu_offset; +  	return 0; @@ -290,6 +287,7 @@ int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,  	}  	return 0;  } +EXPORT_SYMBOL(drm_bo_wait);  static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors)  { @@ -417,7 +415,7 @@ static void drm_bo_destroy_locked(struct drm_buffer_object * bo)  		atomic_dec(&bm->count); -		BUG_ON(!list_empty(&bo->base.list)); +		//		BUG_ON(!list_empty(&bo->base.list));  		drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);  		return; @@ -503,6 +501,7 @@ void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo)  		drm_bo_destroy_locked(tmp_bo);  	}  } +EXPORT_SYMBOL(drm_bo_usage_deref_locked);  static void drm_bo_base_deref_locked(struct drm_file * file_priv,  				     struct drm_user_object * uo) @@ -531,37 +530,80 @@ void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo)  }  EXPORT_SYMBOL(drm_bo_usage_deref_unlocked); +void drm_putback_buffer_objects(struct drm_device *dev) +{ +	struct drm_buffer_manager *bm = &dev->bm; +	struct list_head *list = &bm->unfenced; +	struct drm_buffer_object *entry, *next; + +	mutex_lock(&dev->struct_mutex); +	list_for_each_entry_safe(entry, next, list, lru) { +		atomic_inc(&entry->usage); +		mutex_unlock(&dev->struct_mutex); + +		mutex_lock(&entry->mutex); +		BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED)); +		mutex_lock(&dev->struct_mutex); + +		list_del_init(&entry->lru); +		DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); +		DRM_WAKEUP(&entry->event_queue); + +		/* +		 * FIXME: Might want to put back on head of list +		 * instead of tail here. +		 */ + +		drm_bo_add_to_lru(entry); +		mutex_unlock(&entry->mutex); +		drm_bo_usage_deref_locked(&entry); +	} +	mutex_unlock(&dev->struct_mutex); +} +EXPORT_SYMBOL(drm_putback_buffer_objects); + +  /*   * Note. The caller has to register (if applicable)   * and deregister fence object usage.   */ -int drm_fence_buffer_objects(struct drm_file * file_priv, +int drm_fence_buffer_objects(struct drm_device *dev,  			     struct list_head *list, -			     uint32_t fence_class, uint32_t fence_flags, +			     uint32_t fence_flags,  			     struct drm_fence_object * fence,  			     struct drm_fence_object ** used_fence)  { -	struct drm_device *dev = file_priv->head->dev;  	struct drm_buffer_manager *bm = &dev->bm; -  	struct drm_buffer_object *entry;  	uint32_t fence_type = 0; +	uint32_t fence_class = ~0;  	int count = 0;  	int ret = 0;  	struct list_head *l; -	LIST_HEAD(f_list);  	mutex_lock(&dev->struct_mutex);  	if (!list)  		list = &bm->unfenced; +	if (fence) +		fence_class = fence->fence_class; +  	list_for_each_entry(entry, list, lru) {  		BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED)); -		fence_type |= entry->fence_type; -		if (entry->fence_class == fence_class) -			count++; +		fence_type |= entry->new_fence_type; +		if (fence_class == ~0) +			fence_class = entry->new_fence_class; +		else if (entry->new_fence_class != fence_class) { +			DRM_ERROR("Unmatching fence classes on unfenced list: " +				  "%d and %d.\n", +				  fence_class, +				  entry->new_fence_class); +			ret = -EINVAL; +			goto out; +		} +		count++;  	}  	if (!count) { @@ -569,14 +611,6 @@ int drm_fence_buffer_objects(struct drm_file * file_priv,  		goto out;  	} -	/* -	 * Transfer to a local list before we release the dev->struct_mutex; -	 * This is so we don't get any new unfenced objects while fencing -	 * the ones we already have.. -	 */ - -	list_splice_init(list, &f_list); -  	if (fence) {  		if ((fence_type & fence->type) != fence_type ||  		    (fence->fence_class != fence_class)) { @@ -596,8 +630,8 @@ int drm_fence_buffer_objects(struct drm_file * file_priv,  	}  	count = 0; -	l = f_list.next; -	while (l != &f_list) { +	l = list->next; +	while (l != list) {  		prefetch(l->next);  		entry = list_entry(l, struct drm_buffer_object, lru);  		atomic_inc(&entry->usage); @@ -611,6 +645,8 @@ int drm_fence_buffer_objects(struct drm_file * file_priv,  			if (entry->fence)  				drm_fence_usage_deref_locked(&entry->fence);  			entry->fence = drm_fence_reference_locked(fence); +			entry->fence_class = entry->new_fence_class; +			entry->fence_type = entry->new_fence_type;  			DRM_FLAG_MASKED(entry->priv_flags, 0,  					_DRM_BO_FLAG_UNFENCED);  			DRM_WAKEUP(&entry->event_queue); @@ -618,7 +654,7 @@ int drm_fence_buffer_objects(struct drm_file * file_priv,  		}  		mutex_unlock(&entry->mutex);  		drm_bo_usage_deref_locked(&entry); -		l = f_list.next; +		l = list->next;  	}  	DRM_DEBUG("Fenced %d buffers\n", count);        out: @@ -626,7 +662,6 @@ int drm_fence_buffer_objects(struct drm_file * file_priv,  	*used_fence = fence;  	return ret;  } -  EXPORT_SYMBOL(drm_fence_buffer_objects);  /* @@ -941,6 +976,7 @@ struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,  	atomic_inc(&bo->usage);  	return bo;  } +EXPORT_SYMBOL(drm_lookup_buffer_object);  /*   * Call bo->mutex locked. @@ -1076,9 +1112,12 @@ static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait,  static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo,  				struct drm_bo_info_rep *rep)  { +	if (!rep) +		return; +  	rep->handle = bo->base.hash.key;  	rep->flags = bo->mem.flags; -	rep->size = bo->mem.num_pages * PAGE_SIZE; +	rep->size = bo->num_pages * PAGE_SIZE;  	rep->offset = bo->offset;  	rep->arg_handle = bo->map_list.user_token;  	rep->mask = bo->mem.mask; @@ -1257,7 +1296,7 @@ int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags,  	if (ret)  		return ret; -	mem.num_pages = bo->mem.num_pages; +	mem.num_pages = bo->num_pages;  	mem.size = mem.num_pages << PAGE_SHIFT;  	mem.mask = new_mem_flags;  	mem.page_alignment = bo->mem.page_alignment; @@ -1305,7 +1344,7 @@ static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem)  	if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)  		return 0;  	if ((flag_diff & DRM_BO_FLAG_CACHED) && -	    (!(mem->mask & DRM_BO_FLAG_CACHED) || +	    (/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/  	     (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) {  	  return 0;  	} @@ -1372,7 +1411,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo,  		  (unsigned long long) bo->mem.mask,  		  (unsigned long long) bo->mem.flags); -	ret = driver->fence_type(bo, &ftype); +	ret = driver->fence_type(bo, &fence_class, &ftype);  	if (ret) {  		DRM_ERROR("Driver did not support given buffer permissions\n"); @@ -1401,13 +1440,15 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo,  			return ret;  	} -	 -	bo->fence_class = fence_class; -	bo->fence_type = ftype; + +	bo->new_fence_class = fence_class; +	bo->new_fence_type = ftype; +  	ret = drm_bo_wait_unmapped(bo, no_wait); -	if (ret) +	if (ret) { +	        DRM_ERROR("Timed out waiting for buffer unmap.\n");  		return ret; - +	}  	if (bo->type == drm_bo_type_fake) {  		ret = drm_bo_check_fake(dev, &bo->mem);  		if (ret) @@ -1462,23 +1503,13 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo,  	return 0;  } -static int drm_bo_handle_validate(struct drm_file *file_priv, -				  uint32_t handle, -				  uint32_t fence_class, -				  uint64_t flags, uint64_t mask, uint32_t hint, -				  struct drm_bo_info_rep *rep) +int drm_bo_do_validate(struct drm_buffer_object *bo, +		       uint64_t flags, uint64_t mask, uint32_t hint, +		       uint32_t fence_class, +		       int no_wait, +		       struct drm_bo_info_rep *rep)  { -	struct drm_device *dev = file_priv->head->dev; -	struct drm_buffer_object *bo;  	int ret; -	int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; - -	mutex_lock(&dev->struct_mutex); -	bo = drm_lookup_buffer_object(file_priv, handle, 1); -	mutex_unlock(&dev->struct_mutex); -	if (!bo) { -		return -EINVAL; -	}  	mutex_lock(&bo->mutex);  	ret = drm_bo_wait_unfenced(bo, no_wait, 0); @@ -1486,24 +1517,56 @@ static int drm_bo_handle_validate(struct drm_file *file_priv,  	if (ret)  		goto out; +  	DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);  	ret = drm_bo_new_mask(bo, flags, hint);  	if (ret)  		goto out; -	ret = -	    drm_buffer_object_validate(bo, fence_class, -				       !(hint & DRM_BO_HINT_DONT_FENCE), -				       no_wait); -	drm_bo_fill_rep_arg(bo, rep); - -      out: +	ret = drm_buffer_object_validate(bo, +					 fence_class, +					 !(hint & DRM_BO_HINT_DONT_FENCE), +					 no_wait); +out: +	if (rep) +		drm_bo_fill_rep_arg(bo, rep);  	mutex_unlock(&bo->mutex); +	return ret; +} +EXPORT_SYMBOL(drm_bo_do_validate); + + +int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle, +			   uint32_t fence_class, +			   uint64_t flags, uint64_t mask, uint32_t hint, +			   struct drm_bo_info_rep * rep, +			   struct drm_buffer_object **bo_rep) +{ +	struct drm_device *dev = file_priv->head->dev; +	struct drm_buffer_object *bo; +	int ret; +	int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; + +	mutex_lock(&dev->struct_mutex); +	bo = drm_lookup_buffer_object(file_priv, handle, 1); +	mutex_unlock(&dev->struct_mutex); + +	if (!bo) { +		return -EINVAL; +	} + +	ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, +				 no_wait, rep); + +	if (!ret && bo_rep) +		*bo_rep = bo; +	else +		drm_bo_usage_deref_unlocked(&bo); -	drm_bo_usage_deref_unlocked(&bo);  	return ret;  } +EXPORT_SYMBOL(drm_bo_handle_validate);  /**   * Fills out the generic buffer object ioctl reply with the information for @@ -1609,8 +1672,9 @@ int drm_buffer_object_create(struct drm_device *dev,  #endif  	bo->dev = dev;  	bo->type = type; +	bo->num_pages = num_pages;  	bo->mem.mem_type = DRM_BO_MEM_LOCAL; -	bo->mem.num_pages = num_pages; +	bo->mem.num_pages = bo->num_pages;  	bo->mem.mm_node = NULL;  	bo->mem.page_alignment = page_alignment;  	if (bo->type == drm_bo_type_fake) { @@ -1637,7 +1701,7 @@ int drm_buffer_object_create(struct drm_device *dev,  	}  	bo->fence_class = 0; -	ret = driver->fence_type(bo, &bo->fence_type); +	ret = driver->fence_type(bo, &bo->fence_class, &bo->fence_type);  	if (ret) {  		DRM_ERROR("Driver did not support given buffer permissions\n");  		goto out_err; @@ -1739,7 +1803,7 @@ int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr  						     req->bo_req.flags,  						     req->bo_req.mask,  						     req->bo_req.hint, -						     &rep); +						     &rep, NULL);  			break;  		case drm_bo_fence:  			ret = -EINVAL; @@ -2089,9 +2153,30 @@ static void drm_bo_clean_unfenced(struct drm_device *dev)  	struct drm_buffer_manager *bm  = &dev->bm;  	struct list_head *head, *list;  	struct drm_buffer_object *entry; +	struct drm_fence_object *fence;  	head = &bm->unfenced; +	if (list_empty(head)) +		return; + +	DRM_ERROR("Clean unfenced\n"); + +	if (drm_fence_buffer_objects(dev, NULL, 0, NULL, &fence)) { + +		/* +		 * Fixme: Should really wait here. +		 */ +	} + +	if (fence) +		drm_fence_usage_deref_locked(&fence); + +	if (list_empty(head)) +		return; + +	DRM_ERROR("Really clean unfenced\n"); +  	list = head->next;  	while(list != head) {  		prefetch(list->next); @@ -2251,7 +2336,7 @@ int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type)  	if (!man->has_type) {  		DRM_ERROR("Trying to take down uninitialized " -			  "memory manager type\n"); +			  "memory manager type %u\n", mem_type);  		return ret;  	}  	man->use_type = 0; @@ -2273,6 +2358,7 @@ int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type)  	return ret;  } +EXPORT_SYMBOL(drm_bo_clean_mm);  /**   *Evict all buffers of a particular mem_type, but leave memory manager diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 1a613916..2a35d45b 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -71,9 +71,7 @@ int drm_bo_move_ttm(struct drm_buffer_object * bo,  		save_flags = old_mem->flags;  	}  	if (new_mem->mem_type != DRM_BO_MEM_LOCAL) { -		ret = drm_bind_ttm(ttm, -				   new_mem->flags & DRM_BO_FLAG_CACHED, -				   new_mem->mm_node->start); +		ret = drm_bind_ttm(ttm, new_mem);  		if (ret)  			return ret;  	} @@ -344,6 +342,7 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo,  	ret = drm_fence_object_create(dev, fence_class, fence_type,  				      fence_flags | DRM_FENCE_FLAG_EMIT,  				      &bo->fence); +	bo->fence_type = fence_type;  	if (ret)  		return ret; @@ -410,3 +409,194 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo,  }  EXPORT_SYMBOL(drm_bo_move_accel_cleanup); + +int drm_bo_same_page(unsigned long offset, +		     unsigned long offset2) +{ +	return (offset & PAGE_MASK) == (offset2 & PAGE_MASK); +} +EXPORT_SYMBOL(drm_bo_same_page); + +unsigned long drm_bo_offset_end(unsigned long offset, +				unsigned long end) +{ + +	offset = (offset + PAGE_SIZE) & PAGE_MASK; +	return (end < offset) ? end : offset; +} +EXPORT_SYMBOL(drm_bo_offset_end); + + +static pgprot_t drm_kernel_io_prot(uint32_t map_type) +{ +	pgprot_t tmp = PAGE_KERNEL; + +#if defined(__i386__) || defined(__x86_64__) +#ifdef USE_PAT_WC +#warning using pat +	if (drm_use_pat() && map_type == _DRM_TTM) { +		pgprot_val(tmp) |= _PAGE_PAT; +		return tmp; +	} +#endif +	if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) { +		pgprot_val(tmp) |= _PAGE_PCD; +		pgprot_val(tmp) &= ~_PAGE_PWT; +	} +#elif defined(__powerpc__) +	pgprot_val(tmp) |= _PAGE_NO_CACHE; +	if (map_type == _DRM_REGISTERS) +		pgprot_val(tmp) |= _PAGE_GUARDED; +#endif +#if defined(__ia64__) +	if (map_type == _DRM_TTM) +		tmp = pgprot_writecombine(tmp); +	else +		tmp = pgprot_noncached(tmp); +#endif +	return tmp; +} + +static int drm_bo_ioremap(struct drm_buffer_object *bo, unsigned long bus_base, +			  unsigned long bus_offset, unsigned long bus_size, +			  struct drm_bo_kmap_obj *map) +{ +	struct drm_device *dev = bo->dev; +	struct drm_bo_mem_reg *mem = &bo->mem; +	struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type]; + +	if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) { +		map->bo_kmap_type = bo_map_premapped; +		map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset); +	} else { +		map->bo_kmap_type = bo_map_iomap; +		map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size); +	} +	return (!map->virtual) ? -ENOMEM : 0; +} + +static int drm_bo_kmap_ttm(struct drm_buffer_object *bo, unsigned long start_page, +			   unsigned long num_pages, struct drm_bo_kmap_obj *map) +{ +	struct drm_device *dev = bo->dev; +	struct drm_bo_mem_reg *mem = &bo->mem; +	struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type]; +	pgprot_t prot; +	struct drm_ttm *ttm = bo->ttm; +	struct page *d; +	int i; + +	BUG_ON(!ttm); + +	if (num_pages == 1 && (mem->flags & DRM_BO_FLAG_CACHED)) { + +		/* +		 * We're mapping a single page, and the desired +		 * page protection is consistent with the bo. +		 */ + +		map->bo_kmap_type = bo_map_kmap; +		map->page = drm_ttm_get_page(ttm, start_page); +		map->virtual = kmap(map->page); +	} else { +		/* +		 * Populate the part we're mapping; +		 */ + +		for (i = start_page; i< start_page + num_pages; ++i) { +			d = drm_ttm_get_page(ttm, i); +			if (!d) +				return -ENOMEM; +		} + +		/* +		 * We need to use vmap to get the desired page protection +		 * or to make the buffer object look contigous. +		 */ + +		prot = (mem->flags & DRM_BO_FLAG_CACHED) ? +			PAGE_KERNEL : +			drm_kernel_io_prot(man->drm_bus_maptype); +		map->bo_kmap_type = bo_map_vmap; +		map->virtual = vmap(ttm->pages + start_page, +				    num_pages, 0, prot); +	} +	return (!map->virtual) ? -ENOMEM : 0; +} + +/* + * This function is to be used for kernel mapping of buffer objects. + * It chooses the appropriate mapping method depending on the memory type + * and caching policy the buffer currently has. + * Mapping multiple pages or buffers that live in io memory is a bit slow and + * consumes vmalloc space. Be restrictive with such mappings. + * Mapping single pages usually returns the logical kernel address, (which is fast) + * BUG may use slower temporary mappings for high memory pages or + * uncached / write-combined pages. + * + * The function fills in a drm_bo_kmap_obj which can be used to return the + * kernel virtual address of the buffer. + * + * Code servicing a non-priviliged user request is only allowed to map one + * page at a time. We might need to implement a better scheme to stop such + * processes from consuming all vmalloc space. + */ + +int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page, +		unsigned long num_pages, struct drm_bo_kmap_obj *map) +{ +	int ret; +	unsigned long bus_base; +	unsigned long bus_offset; +	unsigned long bus_size; + +	map->virtual = NULL; + +	if (num_pages > bo->num_pages) +		return -EINVAL; +	if (start_page > bo->num_pages) +		return -EINVAL; +#if 0 +	if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) +		return -EPERM; +#endif +	ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base, +				&bus_offset, &bus_size); + +	if (ret) +		return ret; + +	if (bus_size == 0) { +		return drm_bo_kmap_ttm(bo, start_page, num_pages, map); +	} else { +		bus_offset += start_page << PAGE_SHIFT; +		bus_size = num_pages << PAGE_SHIFT; +		return drm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map); +	} +} +EXPORT_SYMBOL(drm_bo_kmap); + +void drm_bo_kunmap(struct drm_bo_kmap_obj *map) +{ +	if (!map->virtual) +		return; + +	switch(map->bo_kmap_type) { +	case bo_map_iomap: +		iounmap(map->virtual); +		break; +	case bo_map_vmap: +		vunmap(map->virtual); +		break; +	case bo_map_kmap: +		kunmap(map->page); +		break; +	case bo_map_premapped: +		break; +	default: +		BUG(); +	} +	map->virtual = NULL; +	map->page = NULL; +} +EXPORT_SYMBOL(drm_bo_kunmap); diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h index 870f8b73..f74f4bc2 100644 --- a/linux-core/drm_compat.h +++ b/linux-core/drm_compat.h @@ -193,7 +193,10 @@ extern void drm_clear_vma(struct vm_area_struct *vma,  extern pgprot_t vm_get_page_prot(unsigned long vm_flags);  #ifndef GFP_DMA32 -#define GFP_DMA32 0 +#define GFP_DMA32 GFP_KERNEL +#endif +#ifndef __GFP_DMA32 +#define __GFP_DMA32 GFP_KERNEL  #endif  #if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index a6787b09..c25ff3b8 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -35,7 +35,7 @@   */  void drm_fence_handler(struct drm_device * dev, uint32_t fence_class, -		       uint32_t sequence, uint32_t type) +		       uint32_t sequence, uint32_t type, uint32_t error)  {  	int wake = 0;  	uint32_t diff; @@ -49,6 +49,7 @@ void drm_fence_handler(struct drm_device * dev, uint32_t fence_class,  	int is_exe = (type & DRM_FENCE_TYPE_EXE);  	int ge_last_exe; +  	diff = (sequence - fc->exe_flush_sequence) & driver->sequence_mask;  	if (fc->pending_exe_flush && is_exe && diff < driver->wrap_diff) @@ -57,9 +58,6 @@ void drm_fence_handler(struct drm_device * dev, uint32_t fence_class,  	diff = (sequence - fc->last_exe_flush) & driver->sequence_mask;  	ge_last_exe = diff < driver->wrap_diff; -	if (ge_last_exe) -		fc->pending_flush &= ~type; -  	if (is_exe && ge_last_exe) {  		fc->last_exe_flush = sequence;  	} @@ -75,36 +73,66 @@ void drm_fence_handler(struct drm_device * dev, uint32_t fence_class,  		}  	} +	fc->pending_flush &= ~type;  	head = (found) ? &fence->ring : &fc->ring;  	list_for_each_entry_safe_reverse(fence, next, head, ring) {  		if (&fence->ring == &fc->ring)  			break; +		if (error) { +			fence->error = error; +			fence->signaled = fence->type; +			fence->submitted_flush = fence->type; +			fence->flush_mask = fence->type; +			list_del_init(&fence->ring); +			wake = 1; +			break; +		} +  		type |= fence->native_type;  		relevant = type & fence->type;  		if ((fence->signaled | relevant) != fence->signaled) {  			fence->signaled |= relevant; +			fence->flush_mask |= relevant; +			fence->submitted_flush |= relevant;  			DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",  				  fence->base.hash.key, fence->signaled); -			fence->submitted_flush |= relevant;  			wake = 1;  		}  		relevant = fence->flush_mask & -		    ~(fence->signaled | fence->submitted_flush); +			~(fence->submitted_flush | fence->signaled); -		if (relevant) { -			fc->pending_flush |= relevant; -			fence->submitted_flush = fence->flush_mask; -		} +		fc->pending_flush |= relevant; +		fence->submitted_flush |= relevant;  		if (!(fence->type & ~fence->signaled)) {  			DRM_DEBUG("Fence completely signaled 0x%08lx\n",  				  fence->base.hash.key);  			list_del_init(&fence->ring);  		} + +	} + +	/* +	 * Reinstate lost flush flags. +	 */ + +	if ((fc->pending_flush & type) != type) { +	        head = head->prev; +		list_for_each_entry(fence, head, ring) { +			if (&fence->ring == &fc->ring) +				break; +	    		diff = (fc->last_exe_flush - fence->sequence) & +				driver->sequence_mask; +			if (diff > driver->wrap_diff) +				break; + +			relevant = fence->submitted_flush & ~fence->signaled; +			fc->pending_flush |= relevant; +		}  	}  	if (wake) { @@ -141,6 +169,7 @@ void drm_fence_usage_deref_locked(struct drm_fence_object ** fence)  		drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);  	}  } +EXPORT_SYMBOL(drm_fence_usage_deref_locked);  void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence)  { @@ -160,6 +189,7 @@ void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence)  		mutex_unlock(&dev->struct_mutex);  	}  } +EXPORT_SYMBOL(drm_fence_usage_deref_unlocked);  struct drm_fence_object  *drm_fence_reference_locked(struct drm_fence_object *src) @@ -178,7 +208,7 @@ void drm_fence_reference_unlocked(struct drm_fence_object **dst,  	atomic_inc(&src->usage);  	mutex_unlock(&src->dev->struct_mutex);  } - +EXPORT_SYMBOL(drm_fence_reference_unlocked);  static void drm_fence_object_destroy(struct drm_file *priv, struct drm_user_object * base)  { @@ -206,6 +236,7 @@ int drm_fence_object_signaled(struct drm_fence_object * fence,  	return signaled;  } +EXPORT_SYMBOL(drm_fence_object_signaled);  static void drm_fence_flush_exe(struct drm_fence_class_manager * fc,  				struct drm_fence_driver * driver, uint32_t sequence) @@ -241,7 +272,8 @@ int drm_fence_object_flush(struct drm_fence_object * fence,  	write_lock_irqsave(&fm->lock, flags);  	fence->flush_mask |= type; -	if (fence->submitted_flush == fence->signaled) { +	if ((fence->submitted_flush & fence->signaled) +	    == fence->submitted_flush) {  		if ((fence->type & DRM_FENCE_TYPE_EXE) &&  		    !(fence->submitted_flush & DRM_FENCE_TYPE_EXE)) {  			drm_fence_flush_exe(fc, driver, fence->sequence); @@ -329,7 +361,15 @@ static int drm_fence_lazy_wait(struct drm_fence_object *fence,  		if (ret == -EBUSY) {  			DRM_ERROR("Fence timeout. "  				  "GPU lockup or fence driver was " -				  "taken down.\n"); +				  "taken down. %d 0x%08x 0x%02x 0x%02x 0x%02x\n", +				  fence->fence_class, +				  fence->sequence, +				  fence->type, +				  mask, +				  fence->signaled); +			DRM_ERROR("Pending exe flush %d 0x%08x\n", +				  fc->pending_exe_flush, +				  fc->exe_flush_sequence);  		}  		return ((ret == -EINTR) ? -EAGAIN : ret);  	} @@ -348,6 +388,7 @@ int drm_fence_object_wait(struct drm_fence_object * fence,  	if (mask & ~fence->type) {  		DRM_ERROR("Wait trying to extend fence type"  			  " 0x%08x 0x%08x\n", mask, fence->type); +		BUG();  		return -EINVAL;  	} @@ -402,9 +443,11 @@ int drm_fence_object_wait(struct drm_fence_object * fence,  	return 0;  } +EXPORT_SYMBOL(drm_fence_object_wait); +  int drm_fence_object_emit(struct drm_fence_object * fence, -			  uint32_t fence_flags, uint32_t class, uint32_t type) +			  uint32_t fence_flags, uint32_t fence_class, uint32_t type)  {  	struct drm_device *dev = fence->dev;  	struct drm_fence_manager *fm = &dev->fm; @@ -416,12 +459,12 @@ int drm_fence_object_emit(struct drm_fence_object * fence,  	int ret;  	drm_fence_unring(dev, &fence->ring); -	ret = driver->emit(dev, class, fence_flags, &sequence, &native_type); +	ret = driver->emit(dev, fence_class, fence_flags, &sequence, &native_type);  	if (ret)  		return ret;  	write_lock_irqsave(&fm->lock, flags); -	fence->fence_class = class; +	fence->fence_class = fence_class;  	fence->type = type;  	fence->flush_mask = 0x00;  	fence->submitted_flush = 0x00; @@ -434,8 +477,9 @@ int drm_fence_object_emit(struct drm_fence_object * fence,  	write_unlock_irqrestore(&fm->lock, flags);  	return 0;  } +EXPORT_SYMBOL(drm_fence_object_emit); -static int drm_fence_object_init(struct drm_device * dev, uint32_t class, +static int drm_fence_object_init(struct drm_device * dev, uint32_t fence_class,  				 uint32_t type,  				 uint32_t fence_flags,  				 struct drm_fence_object * fence) @@ -456,7 +500,7 @@ static int drm_fence_object_init(struct drm_device * dev, uint32_t class,  	 */  	INIT_LIST_HEAD(&fence->base.list); -	fence->fence_class = class; +	fence->fence_class = fence_class;  	fence->type = type;  	fence->flush_mask = 0;  	fence->submitted_flush = 0; @@ -491,7 +535,7 @@ out:  }  EXPORT_SYMBOL(drm_fence_add_user_object); -int drm_fence_object_create(struct drm_device * dev, uint32_t class, uint32_t type, +int drm_fence_object_create(struct drm_device * dev, uint32_t fence_class, uint32_t type,  			    unsigned flags, struct drm_fence_object ** c_fence)  {  	struct drm_fence_object *fence; @@ -501,7 +545,7 @@ int drm_fence_object_create(struct drm_device * dev, uint32_t class, uint32_t ty  	fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE);  	if (!fence)  		return -ENOMEM; -	ret = drm_fence_object_init(dev, class, type, flags, fence); +	ret = drm_fence_object_init(dev, fence_class, type, flags, fence);  	if (ret) {  		drm_fence_usage_deref_unlocked(&fence);  		return ret; @@ -517,7 +561,7 @@ EXPORT_SYMBOL(drm_fence_object_create);  void drm_fence_manager_init(struct drm_device * dev)  {  	struct drm_fence_manager *fm = &dev->fm; -	struct drm_fence_class_manager *class; +	struct drm_fence_class_manager *fence_class;  	struct drm_fence_driver *fed = dev->driver->fence_driver;  	int i;  	unsigned long flags; @@ -533,11 +577,11 @@ void drm_fence_manager_init(struct drm_device * dev)  	BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES);  	for (i=0; i<fm->num_classes; ++i) { -	    class = &fm->fence_class[i]; +	    fence_class = &fm->fence_class[i]; -	    INIT_LIST_HEAD(&class->ring); -	    class->pending_flush = 0; -	    DRM_INIT_WAITQUEUE(&class->fence_queue); +	    INIT_LIST_HEAD(&fence_class->ring); +	    fence_class->pending_flush = 0; +	    DRM_INIT_WAITQUEUE(&fence_class->fence_queue);  	}  	atomic_set(&fm->count, 0); @@ -545,6 +589,24 @@ void drm_fence_manager_init(struct drm_device * dev)  	write_unlock_irqrestore(&fm->lock, flags);  } +void drm_fence_fill_arg(struct drm_fence_object *fence, struct drm_fence_arg *arg) +{ +	struct drm_device *dev = fence->dev; +	struct drm_fence_manager *fm = &dev->fm; +	unsigned long irq_flags; + +	read_lock_irqsave(&fm->lock, irq_flags); +	arg->handle = fence->base.hash.key; +	arg->fence_class = fence->fence_class; +	arg->type = fence->type; +	arg->signaled = fence->signaled; +	arg->error = fence->error; +	arg->sequence = fence->sequence; +	read_unlock_irqrestore(&fm->lock, irq_flags); +} +EXPORT_SYMBOL(drm_fence_fill_arg); + +  void drm_fence_manager_takedown(struct drm_device * dev)  {  } @@ -572,7 +634,6 @@ int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *  	struct drm_fence_manager *fm = &dev->fm;  	struct drm_fence_arg *arg = data;  	struct drm_fence_object *fence; -	unsigned long flags;  	ret = 0;  	if (!fm->initialized) { @@ -597,14 +658,11 @@ int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *  	/*  	 * usage > 0. No need to lock dev->struct_mutex;  	 */ -	 +  	arg->handle = fence->base.hash.key; -	read_lock_irqsave(&fm->lock, flags); -	arg->fence_class = fence->fence_class; -	arg->type = fence->type; -	arg->signaled = fence->signaled; -	read_unlock_irqrestore(&fm->lock, flags); + +	drm_fence_fill_arg(fence, arg);  	drm_fence_usage_deref_unlocked(&fence);  	return ret; @@ -642,7 +700,6 @@ int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_fil  	struct drm_fence_arg *arg = data;  	struct drm_fence_object *fence;  	struct drm_user_object *uo; -	unsigned long flags;  	ret = 0;  	if (!fm->initialized) { @@ -654,12 +711,7 @@ int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_fil  	if (ret)  		return ret;  	fence = drm_lookup_fence_object(file_priv, arg->handle); - -	read_lock_irqsave(&fm->lock, flags); -	arg->fence_class = fence->fence_class; -	arg->type = fence->type; -	arg->signaled = fence->signaled; -	read_unlock_irqrestore(&fm->lock, flags); +	drm_fence_fill_arg(fence, arg);  	drm_fence_usage_deref_unlocked(&fence);  	return ret; @@ -687,7 +739,6 @@ int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file  	struct drm_fence_manager *fm = &dev->fm;  	struct drm_fence_arg *arg = data;  	struct drm_fence_object *fence; -	unsigned long flags;  	ret = 0;  	if (!fm->initialized) { @@ -699,11 +750,7 @@ int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file  	if (!fence)  		return -EINVAL; -	read_lock_irqsave(&fm->lock, flags); -	arg->fence_class = fence->fence_class; -	arg->type = fence->type; -	arg->signaled = fence->signaled; -	read_unlock_irqrestore(&fm->lock, flags); +	drm_fence_fill_arg(fence, arg);  	drm_fence_usage_deref_unlocked(&fence);  	return ret; @@ -715,7 +762,6 @@ int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *f  	struct drm_fence_manager *fm = &dev->fm;  	struct drm_fence_arg *arg = data;  	struct drm_fence_object *fence; -	unsigned long flags;  	ret = 0;  	if (!fm->initialized) { @@ -728,11 +774,7 @@ int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *f  		return -EINVAL;  	ret = drm_fence_object_flush(fence, arg->type); -	read_lock_irqsave(&fm->lock, flags); -	arg->fence_class = fence->fence_class; -	arg->type = fence->type; -	arg->signaled = fence->signaled; -	read_unlock_irqrestore(&fm->lock, flags); +	drm_fence_fill_arg(fence, arg);  	drm_fence_usage_deref_unlocked(&fence);  	return ret; @@ -745,7 +787,6 @@ int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *fi  	struct drm_fence_manager *fm = &dev->fm;  	struct drm_fence_arg *arg = data;  	struct drm_fence_object *fence; -	unsigned long flags;  	ret = 0;  	if (!fm->initialized) { @@ -760,11 +801,7 @@ int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *fi  				    arg->flags & DRM_FENCE_FLAG_WAIT_LAZY,  				    0, arg->type); -	read_lock_irqsave(&fm->lock, flags); -	arg->fence_class = fence->fence_class; -	arg->type = fence->type; -	arg->signaled = fence->signaled; -	read_unlock_irqrestore(&fm->lock, flags); +	drm_fence_fill_arg(fence, arg);  	drm_fence_usage_deref_unlocked(&fence);  	return ret; @@ -777,7 +814,6 @@ int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *fi  	struct drm_fence_manager *fm = &dev->fm;  	struct drm_fence_arg *arg = data;  	struct drm_fence_object *fence; -	unsigned long flags;  	ret = 0;  	if (!fm->initialized) { @@ -792,11 +828,7 @@ int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *fi  	ret = drm_fence_object_emit(fence, arg->flags, arg->fence_class,  				    arg->type); -	read_lock_irqsave(&fm->lock, flags); -	arg->fence_class = fence->fence_class; -	arg->type = fence->type; -	arg->signaled = fence->signaled; -	read_unlock_irqrestore(&fm->lock, flags); +	drm_fence_fill_arg(fence, arg);  	drm_fence_usage_deref_unlocked(&fence);  	return ret; @@ -808,7 +840,6 @@ int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file  	struct drm_fence_manager *fm = &dev->fm;  	struct drm_fence_arg *arg = data;  	struct drm_fence_object *fence; -	unsigned long flags;  	ret = 0;  	if (!fm->initialized) { @@ -821,24 +852,22 @@ int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file  		return -EINVAL;  	}  	LOCK_TEST_WITH_RETURN(dev, file_priv); -	ret = drm_fence_buffer_objects(file_priv, NULL, arg->fence_class, -				       arg->flags, NULL, &fence); -	if (ret) -		return ret; -	ret = drm_fence_add_user_object(file_priv, fence, -					arg->flags & -					DRM_FENCE_FLAG_SHAREABLE); +	ret = drm_fence_buffer_objects(dev, NULL, arg->flags, +				       NULL, &fence);  	if (ret)  		return ret; +	if (!(arg->flags & DRM_FENCE_FLAG_NO_USER)) { +		ret = drm_fence_add_user_object(file_priv, fence, +						arg->flags & +						DRM_FENCE_FLAG_SHAREABLE); +		if (ret) +			return ret; +	} +  	arg->handle = fence->base.hash.key; -	read_lock_irqsave(&fm->lock, flags); -	arg->fence_class = fence->fence_class; -	arg->type = fence->type; -	arg->signaled = fence->signaled; -	arg->sequence = fence->sequence; -	read_unlock_irqrestore(&fm->lock, flags); +	drm_fence_fill_arg(fence, arg);  	drm_fence_usage_deref_unlocked(&fence);  	return ret; diff --git a/linux-core/drm_object.c b/linux-core/drm_object.c index 3d866333..6bd89b1d 100644 --- a/linux-core/drm_object.c +++ b/linux-core/drm_object.c @@ -50,6 +50,7 @@ int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item,  	list_add_tail(&item->list, &priv->user_objects);  	return 0;  } +EXPORT_SYMBOL(drm_add_user_object);  struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t key)  { @@ -76,6 +77,7 @@ struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t  	}  	return item;  } +EXPORT_SYMBOL(drm_lookup_user_object);  static void drm_deref_user_object(struct drm_file * priv, struct drm_user_object * item)  { @@ -104,6 +106,7 @@ int drm_remove_user_object(struct drm_file * priv, struct drm_user_object * item  	drm_deref_user_object(priv, item);  	return 0;  } +EXPORT_SYMBOL(drm_remove_user_object);  static int drm_object_ref_action(struct drm_file * priv, struct drm_user_object * ro,  				 enum drm_ref_type action) @@ -196,6 +199,7 @@ struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv,  	return drm_hash_entry(hash, struct drm_ref_object, hash);  } +EXPORT_SYMBOL(drm_lookup_ref_object);  static void drm_remove_other_references(struct drm_file * priv,  					struct drm_user_object * ro) diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index b2f1ae17..9748baae 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -32,6 +32,7 @@  #define _DRM_OBJECTS_H  struct drm_device; +struct drm_bo_mem_reg;  /***************************************************   * User space objects. (drm_object.c) @@ -42,10 +43,14 @@ struct drm_device;  enum drm_object_type {  	drm_fence_type,  	drm_buffer_type, -	drm_ttm_type  	    /*  	     * Add other user space object types here.  	     */ +	drm_driver_type0 = 256, +	drm_driver_type1, +	drm_driver_type2, +	drm_driver_type3, +	drm_driver_type4  };  /* @@ -156,6 +161,7 @@ struct drm_fence_object {  	uint32_t sequence;  	uint32_t flush_mask;  	uint32_t submitted_flush; +        uint32_t error;  };  #define _DRM_FENCE_CLASSES 8 @@ -192,7 +198,7 @@ struct drm_fence_driver {  };  extern void drm_fence_handler(struct drm_device *dev, uint32_t fence_class, -			      uint32_t sequence, uint32_t type); +			      uint32_t sequence, uint32_t type, uint32_t error);  extern void drm_fence_manager_init(struct drm_device *dev);  extern void drm_fence_manager_takedown(struct drm_device *dev);  extern void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class, @@ -210,6 +216,12 @@ extern int drm_fence_object_wait(struct drm_fence_object * fence,  extern int drm_fence_object_create(struct drm_device *dev, uint32_t type,  				   uint32_t fence_flags, uint32_t fence_class,  				   struct drm_fence_object ** c_fence); +extern int drm_fence_object_emit(struct drm_fence_object * fence, +				 uint32_t fence_flags, uint32_t class, +				 uint32_t type); +extern void drm_fence_fill_arg(struct drm_fence_object *fence, +			       struct drm_fence_arg *arg); +  extern int drm_fence_add_user_object(struct drm_file * priv,  				     struct drm_fence_object * fence, int shareable); @@ -258,23 +270,22 @@ struct drm_ttm_backend_func {  			 unsigned long num_pages, struct page ** pages);  	void (*clear) (struct drm_ttm_backend * backend);  	int (*bind) (struct drm_ttm_backend * backend, -		     unsigned long offset, int cached); +		     struct drm_bo_mem_reg * bo_mem);  	int (*unbind) (struct drm_ttm_backend * backend);  	void (*destroy) (struct drm_ttm_backend * backend);  }; -struct drm_ttm_backend { -	uint32_t flags; -	int mem_type; -	struct drm_ttm_backend_func *func; -}; +typedef struct drm_ttm_backend { +        struct drm_device *dev; +        uint32_t flags; +        struct drm_ttm_backend_func *func; +} drm_ttm_backend_t;  struct drm_ttm {  	struct page **pages;  	uint32_t page_flags;  	unsigned long num_pages; -	unsigned long aper_offset;  	atomic_t vma_count;  	struct drm_device *dev;  	int destroy; @@ -290,11 +301,13 @@ struct drm_ttm {  };  extern struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size); -extern int drm_bind_ttm(struct drm_ttm * ttm, int cached, unsigned long aper_offset); +extern int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem);  extern void drm_ttm_unbind(struct drm_ttm * ttm);  extern void drm_ttm_evict(struct drm_ttm * ttm);  extern void drm_ttm_fixup_caching(struct drm_ttm * ttm);  extern struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index); +extern void drm_ttm_cache_flush(void); +extern int drm_ttm_populate(struct drm_ttm * ttm);  /*   * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this, @@ -333,6 +346,8 @@ struct drm_bo_mem_reg {  	uint32_t mem_type;  	uint64_t flags;  	uint64_t mask; +        uint32_t desired_tile_stride; +        uint32_t hw_tile_stride;  };  struct drm_buffer_object { @@ -356,10 +371,13 @@ struct drm_buffer_object {  	uint32_t fence_type;  	uint32_t fence_class; +        uint32_t new_fence_type; +        uint32_t new_fence_class;  	struct drm_fence_object *fence;  	uint32_t priv_flags;  	wait_queue_head_t event_queue;  	struct mutex mutex; +	unsigned long num_pages;  	/* For pinned buffers */  	int pinned; @@ -368,7 +386,6 @@ struct drm_buffer_object {  	struct list_head pinned_lru;  	/* For vm */ -  	struct drm_ttm *ttm;  	struct drm_map_list map_list;  	uint32_t memory_type; @@ -395,6 +412,7 @@ struct drm_mem_type_manager {  	struct list_head pinned;  	uint32_t flags;  	uint32_t drm_bus_maptype; +        unsigned long gpu_offset;  	unsigned long io_offset;  	unsigned long io_size;  	void *io_addr; @@ -434,7 +452,8 @@ struct drm_bo_driver {  	uint32_t num_mem_busy_prio;  	struct drm_ttm_backend *(*create_ttm_backend_entry)  	 (struct drm_device * dev); -	int (*fence_type) (struct drm_buffer_object *bo, uint32_t * type); +	int (*fence_type) (struct drm_buffer_object *bo, uint32_t *fclass, +		     uint32_t * type);  	int (*invalidate_caches) (struct drm_device * dev, uint64_t flags);  	int (*init_mem_type) (struct drm_device * dev, uint32_t type,  			      struct drm_mem_type_manager * man); @@ -472,32 +491,44 @@ extern int drm_bo_pci_offset(struct drm_device *dev,  extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg * mem);  extern void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo); -extern int drm_fence_buffer_objects(struct drm_file * priv, +extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo); +extern void drm_putback_buffer_objects(struct drm_device *dev); +extern int drm_fence_buffer_objects(struct drm_device * dev,  				    struct list_head *list, -				    uint32_t fence_class, uint32_t fence_flags, +				    uint32_t fence_flags,  				    struct drm_fence_object * fence,  				    struct drm_fence_object ** used_fence);  extern void drm_bo_add_to_lru(struct drm_buffer_object * bo); +extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size, +				    enum drm_bo_type type, uint64_t mask, +				    uint32_t hint, uint32_t page_alignment, +				    unsigned long buffer_start, +				    struct drm_buffer_object **bo);  extern int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,  		       int no_wait);  extern int drm_bo_mem_space(struct drm_buffer_object * bo,  			    struct drm_bo_mem_reg * mem, int no_wait);  extern int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags,  			      int no_wait, int move_unfenced); -extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size, -				    enum drm_bo_type type, uint64_t mask, -				    uint32_t hint, uint32_t page_alignment, -				    unsigned long buffer_start, -				    struct drm_buffer_object **bo); -extern int drm_bo_init_mm(struct drm_device *dev, unsigned type, +extern int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type); +extern int drm_bo_init_mm(struct drm_device * dev, unsigned type,  			  unsigned long p_offset, unsigned long p_size); -extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type); -extern int drm_bo_add_user_object(struct drm_file *file_priv, -				  struct drm_buffer_object *bo, int sharable); -extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo); +extern int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle, +				  uint32_t fence_class, uint64_t flags, +				  uint64_t mask, uint32_t hint, +				  struct drm_bo_info_rep * rep, +				  struct drm_buffer_object **bo_rep); +extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file * file_priv, +							  uint32_t handle, +							  int check_owner); +extern int drm_bo_do_validate(struct drm_buffer_object *bo, +			      uint64_t flags, uint64_t mask, uint32_t hint, +			      uint32_t fence_class, +			      int no_wait, +			      struct drm_bo_info_rep *rep);  /* - * Buffer object memory move helpers. + * Buffer object memory move- and map helpers.   * drm_bo_move.c   */ @@ -513,11 +544,69 @@ extern int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo,  				     uint32_t fence_type,  				     uint32_t fence_flags,  				     struct drm_bo_mem_reg * new_mem); +extern int drm_bo_same_page(unsigned long offset, unsigned long offset2); +extern unsigned long drm_bo_offset_end(unsigned long offset, +				       unsigned long end); -extern int drm_mem_reg_ioremap(struct drm_device *dev, -			       struct drm_bo_mem_reg *mem, void **virtual); -extern void drm_mem_reg_iounmap(struct drm_device *dev, -				struct drm_bo_mem_reg *mem, void *virtual); +struct drm_bo_kmap_obj { +	void *virtual; +	struct page *page; +	enum { +		bo_map_iomap, +		bo_map_vmap, +		bo_map_kmap, +		bo_map_premapped, +	} bo_kmap_type; +}; + +static inline void *drm_bmo_virtual(struct drm_bo_kmap_obj *map, int *is_iomem) +{ +	*is_iomem = (map->bo_kmap_type == bo_map_iomap || +		     map->bo_kmap_type == bo_map_premapped); +	return map->virtual; +} +extern void drm_bo_kunmap(struct drm_bo_kmap_obj *map); +extern int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page, +		       unsigned long num_pages, struct drm_bo_kmap_obj *map); + + +/* + * drm_regman.c + */ + +struct drm_reg { +	struct list_head head; +	struct drm_fence_object *fence; +	uint32_t fence_type; +	uint32_t new_fence_type; +}; + +struct drm_reg_manager { +	struct list_head free; +	struct list_head lru; +	struct list_head unfenced; + +	int (*reg_reusable)(const struct drm_reg *reg, const void *data); +	void (*reg_destroy)(struct drm_reg *reg); +}; + +extern int drm_regs_alloc(struct drm_reg_manager *manager, +			  const void *data, +			  uint32_t fence_class, +			  uint32_t fence_type, +			  int interruptible, +			  int no_wait, +			  struct drm_reg **reg); + +extern void drm_regs_fence(struct drm_reg_manager *regs, +			   struct drm_fence_object *fence); + +extern void drm_regs_free(struct drm_reg_manager *manager); +extern void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg); +extern void drm_regs_init(struct drm_reg_manager *manager, +			  int (*reg_reusable)(const struct drm_reg *, +					      const void *), +			  void (*reg_destroy)(struct drm_reg *));  #ifdef CONFIG_DEBUG_MUTEXES  #define DRM_ASSERT_LOCKED(_mutex)					\ @@ -526,5 +615,4 @@ extern void drm_mem_reg_iounmap(struct drm_device *dev,  #else  #define DRM_ASSERT_LOCKED(_mutex)  #endif -  #endif diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c index 60c64cba..33bbe1d4 100644 --- a/linux-core/drm_ttm.c +++ b/linux-core/drm_ttm.c @@ -35,11 +35,12 @@ static void drm_ttm_ipi_handler(void *null)  	flush_agp_cache();  } -static void drm_ttm_cache_flush(void) +void drm_ttm_cache_flush(void)  {  	if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0)  		DRM_ERROR("Timed out waiting for drm cache flush.\n");  } +EXPORT_SYMBOL(drm_ttm_cache_flush);  /*   * Use kmalloc if possible. Otherwise fall back to vmalloc. @@ -207,7 +208,7 @@ struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index)  	return p;  } -static int drm_ttm_populate(struct drm_ttm * ttm) +int drm_ttm_populate(struct drm_ttm * ttm)  {  	struct page *page;  	unsigned long i; @@ -308,7 +309,7 @@ void drm_ttm_unbind(struct drm_ttm * ttm)  	drm_ttm_fixup_caching(ttm);  } -int drm_bind_ttm(struct drm_ttm * ttm, int cached, unsigned long aper_offset) +int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem)  {  	int ret = 0; @@ -325,17 +326,16 @@ int drm_bind_ttm(struct drm_ttm * ttm, int cached, unsigned long aper_offset)  	if (ret)  		return ret; -	if (ttm->state == ttm_unbound && !cached) { +	if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED)) {  		drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);  	} -	if ((ret = be->func->bind(be, aper_offset, cached))) { +	if ((ret = be->func->bind(be, bo_mem))) {  		ttm->state = ttm_evicted;  		DRM_ERROR("Couldn't bind backend.\n");  		return ret;  	} -	ttm->aper_offset = aper_offset;  	ttm->state = ttm_bound;  	return 0; diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index bf500cc6..75763e71 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -38,7 +38,9 @@ struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device * dev)  	return drm_agp_init_ttm(dev);  } -int i915_fence_types(struct drm_buffer_object *bo, uint32_t * type) +int i915_fence_types(struct drm_buffer_object *bo, +		     uint32_t * fclass, +		     uint32_t * type)  {  	if (bo->mem.flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))  		*type = 3; @@ -71,6 +73,7 @@ int i915_init_mem_type(struct drm_device * dev, uint32_t type,  		man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |  		    _DRM_FLAG_MEMTYPE_CACHED;  		man->drm_bus_maptype = 0; +		man->gpu_offset = 0;  		break;  	case DRM_BO_MEM_TT:  		if (!(drm_core_has_AGP(dev) && dev->agp)) { @@ -84,6 +87,7 @@ int i915_init_mem_type(struct drm_device * dev, uint32_t type,  		man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |  		    _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;  		man->drm_bus_maptype = _DRM_AGP; +		man->gpu_offset = 0;  		break;  	case DRM_BO_MEM_PRIV0:  		if (!(drm_core_has_AGP(dev) && dev->agp)) { @@ -97,6 +101,7 @@ int i915_init_mem_type(struct drm_device * dev, uint32_t type,  		man->flags =  _DRM_FLAG_MEMTYPE_MAPPABLE |  		    _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP;  		man->drm_bus_maptype = _DRM_AGP; +		man->gpu_offset = 0;  		break;  	default:  		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); @@ -196,7 +201,7 @@ static int i915_move_flip(struct drm_buffer_object * bo,  	if (ret)  		return ret; -	ret = drm_bind_ttm(bo->ttm, 1, tmp_mem.mm_node->start); +	ret = drm_bind_ttm(bo->ttm, &tmp_mem);  	if (ret)  		goto out_cleanup; diff --git a/linux-core/i915_fence.c b/linux-core/i915_fence.c index 89830333..a0f22785 100644 --- a/linux-core/i915_fence.c +++ b/linux-core/i915_fence.c @@ -63,7 +63,8 @@ static void i915_perform_flush(struct drm_device * dev)  		diff = (sequence - fc->last_exe_flush) & BREADCRUMB_MASK;  		if (diff < driver->wrap_diff && diff != 0) { -			drm_fence_handler(dev, 0, sequence, DRM_FENCE_TYPE_EXE); +		        drm_fence_handler(dev, 0, sequence, +					  DRM_FENCE_TYPE_EXE, 0);  		}  		if (dev_priv->fence_irq_on && !fc->pending_exe_flush) { @@ -82,7 +83,7 @@ static void i915_perform_flush(struct drm_device * dev)  			flush_flags = dev_priv->flush_flags;  			flush_sequence = dev_priv->flush_sequence;  			dev_priv->flush_pending = 0; -			drm_fence_handler(dev, 0, flush_sequence, flush_flags); +			drm_fence_handler(dev, 0, flush_sequence, flush_flags, 0);  		}  	} @@ -103,7 +104,7 @@ static void i915_perform_flush(struct drm_device * dev)  			flush_flags = dev_priv->flush_flags;  			flush_sequence = dev_priv->flush_sequence;  			dev_priv->flush_pending = 0; -			drm_fence_handler(dev, 0, flush_sequence, flush_flags); +			drm_fence_handler(dev, 0, flush_sequence, flush_flags, 0);  		}  	} diff --git a/linux-core/nouveau_sgdma.c b/linux-core/nouveau_sgdma.c index 97d5330b..b86c5d7c 100644 --- a/linux-core/nouveau_sgdma.c +++ b/linux-core/nouveau_sgdma.c @@ -80,16 +80,16 @@ nouveau_sgdma_clear(struct drm_ttm_backend *be)  }  static int -nouveau_sgdma_bind(struct drm_ttm_backend *be, unsigned long pg_start, -		   int cached) +nouveau_sgdma_bind(struct drm_ttm_backend *be, struct drm_bo_mem_reg *mem)  {  	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;  	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;  	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; -	uint64_t offset = (pg_start << PAGE_SHIFT); +	uint64_t offset = (mem->mm_node->start << PAGE_SHIFT);  	uint32_t i; -	DRM_DEBUG("pg=0x%lx (0x%llx), cached=%d\n", pg_start, offset, cached); +	DRM_DEBUG("pg=0x%lx (0x%llx), cached=%d\n", mem->mm_node->start, +		  offset, (mem->flags & DRM_BO_FLAG_CACHED) == 1);  	if (offset & NV_CTXDMA_PAGE_MASK)  		return -EINVAL; @@ -188,7 +188,6 @@ nouveau_sgdma_init_ttm(struct drm_device *dev)  	nvbe->dev = dev;  	nvbe->backend.func	= &nouveau_sgdma_backend; -	nvbe->backend.mem_type	= DRM_BO_MEM_TT;  	return &nvbe->backend;  } @@ -278,6 +277,8 @@ nouveau_sgdma_nottm_hack_init(struct drm_device *dev)  	struct drm_nouveau_private *dev_priv = dev->dev_private;  	struct drm_ttm_backend *be;  	struct drm_scatter_gather sgreq; +	struct drm_mm_node mm_node; +	struct drm_bo_mem_reg mem;  	int ret;  	dev_priv->gart_info.sg_be = nouveau_sgdma_init_ttm(dev); @@ -303,7 +304,10 @@ nouveau_sgdma_nottm_hack_init(struct drm_device *dev)  		return ret;  	} -	if ((ret = be->func->bind(be, 0, 0))) { +	mm_node.start = 0; +	mem.mm_node = &mm_node; + +	if ((ret = be->func->bind(be, &mem))) {  		DRM_ERROR("failed bind: %d\n", ret);  		return ret;  	} diff --git a/linux-core/via_buffer.c b/linux-core/via_buffer.c index eb5ea826..a6c59832 100644 --- a/linux-core/via_buffer.c +++ b/linux-core/via_buffer.c @@ -37,7 +37,8 @@ struct drm_ttm_backend *via_create_ttm_backend_entry(struct drm_device * dev)  	return drm_agp_init_ttm(dev);  } -int via_fence_types(struct drm_buffer_object *bo, uint32_t * type) +int via_fence_types(struct drm_buffer_object *bo, uint32_t * fclass, +		    uint32_t * type)  {  	*type = 3;  	return 0; diff --git a/linux-core/via_fence.c b/linux-core/via_fence.c index 8d60afa6..9af1bf3b 100644 --- a/linux-core/via_fence.c +++ b/linux-core/via_fence.c @@ -98,7 +98,8 @@ static uint32_t via_perform_flush(struct drm_device *dev, uint32_t class)  				drm_idlelock_release(&dev->lock);  				dev_priv->have_idlelock = 0;  			} -			drm_fence_handler(dev, 0, dev_priv->emit_0_sequence, signaled_flush_types); +			drm_fence_handler(dev, 0, dev_priv->emit_0_sequence, +					  signaled_flush_types, 0);  		}  	} diff --git a/linux-core/xgi_fence.c b/linux-core/xgi_fence.c index a98a8422..526bc5db 100644 --- a/linux-core/xgi_fence.c +++ b/linux-core/xgi_fence.c @@ -60,7 +60,7 @@ static uint32_t xgi_do_flush(struct drm_device * dev, uint32_t class)  		if (signaled_flush_types) {  			drm_fence_handler(dev, 0, info->complete_sequence, -					  signaled_flush_types); +					  signaled_flush_types, 0);  		}  	} | 
