diff options
author | Jesse Barnes <jbarnes@jbarnes-t61.(none)> | 2008-06-18 15:25:54 -0700 |
---|---|---|
committer | Jesse Barnes <jbarnes@virtuousgeek.org> | 2008-06-18 15:25:54 -0700 |
commit | 86accbcb344ff25fbb47a788bb0f7464b5cd797f (patch) | |
tree | d023b852e7dab469bcc1d2b7f37493709e657fe5 /linux-core | |
parent | c843d47b906e57fb3002af4a609d3cb95c5e195d (diff) | |
parent | e7424e4580159b0ac3e232674dff5c862e851dff (diff) |
Merge commit 'origin/drm-gem' into modesetting-gem
Lots of conflicts, seems to load ok, but I'm sure some bugs snuck in.
Conflicts:
linux-core/drmP.h
linux-core/drm_lock.c
linux-core/i915_gem.c
shared-core/drm.h
shared-core/i915_dma.c
shared-core/i915_drv.h
shared-core/i915_irq.c
Diffstat (limited to 'linux-core')
-rw-r--r-- | linux-core/Makefile | 7 | ||||
-rw-r--r-- | linux-core/drmP.h | 51 | ||||
-rw-r--r-- | linux-core/drm_drv.c | 9 | ||||
-rw-r--r-- | linux-core/drm_gem.c | 386 | ||||
-rw-r--r-- | linux-core/drm_irq.c | 20 | ||||
-rw-r--r-- | linux-core/drm_lock.c | 59 | ||||
-rw-r--r-- | linux-core/drm_proc.c | 7 | ||||
-rw-r--r-- | linux-core/i915_drv.c | 31 | ||||
-rw-r--r-- | linux-core/i915_gem.c | 607 |
9 files changed, 502 insertions, 675 deletions
diff --git a/linux-core/Makefile b/linux-core/Makefile index 846386a5..a359f775 100644 --- a/linux-core/Makefile +++ b/linux-core/Makefile @@ -30,7 +30,6 @@ # # make DRM_MODULES="r128 radeon" # -DRM_MODULES=i915 SHELL=/bin/sh @@ -118,7 +117,7 @@ V := $(shell if [ -f $(BOOTVERSION_PREFIX)version.h ]; then \ ifeq ($(V),"$(RUNNING_REL)") HEADERFROMBOOT := 1 -GETCONFIG := MAKEFILES=$(shell pwd)/.config +GETCONFIG := MAKEFILES=$(shell /bin/pwd)/.config HAVECONFIG := y endif @@ -165,7 +164,7 @@ endif all: modules modules: includes - +make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`pwd` DRMSRCDIR=`pwd` modules + +make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`/bin/pwd` DRMSRCDIR=`/bin/pwd` modules ifeq ($(HEADERFROMBOOT),1) @@ -241,7 +240,7 @@ drmstat: drmstat.c $(CC) $(PRGCFLAGS) $< -o $@ $(DRMSTATLIBS) install: - make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`pwd` DRMSRCDIR=`pwd` modules_install + make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`/bin/pwd` DRMSRCDIR=`/bin/pwd` modules_install else diff --git a/linux-core/drmP.h b/linux-core/drmP.h index ffe8b8ef..4a9cc761 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -819,21 +819,6 @@ struct drm_driver { int (*gem_init_object) (struct drm_gem_object *obj); void (*gem_free_object) (struct drm_gem_object *obj); - /** - * Driver-specific callback to set memory domains from userspace - */ - int (*gem_set_domain) (struct drm_gem_object *obj, - struct drm_file *file_priv, - uint32_t read_domains, - uint32_t write_domain); - - /** - * Driver-specific callback to flush pwrite through chipset - */ - int (*gem_flush_pwrite) (struct drm_gem_object *obj, - uint64_t offset, - uint64_t size); - struct drm_fence_driver *fence_driver; struct drm_bo_driver *bo_driver; @@ -1037,6 +1022,12 @@ struct drm_device { spinlock_t object_name_lock; struct idr object_name_idr; atomic_t object_count; + atomic_t object_memory; + atomic_t pin_count; + atomic_t pin_memory; + atomic_t gtt_count; + atomic_t gtt_memory; + uint32_t gtt_total; uint32_t invalidate_domains; /* domains pending invalidation */ uint32_t flush_domains; /* domains pending flush */ /*@} */ @@ -1252,10 +1243,6 @@ extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context); extern void drm_idlelock_take(struct drm_lock_data *lock_data); extern void drm_idlelock_release(struct drm_lock_data *lock_data); -extern int drm_client_lock_take(struct drm_device *dev, - struct drm_file *file_priv); -extern void drm_client_lock_release(struct drm_device *dev, - struct drm_file *file_priv); /* * These are exported to drivers so that they can implement fencing using @@ -1472,6 +1459,11 @@ static inline void drm_gem_object_unreference(struct drm_gem_object *obj) kref_put (&obj->refcount, drm_gem_object_free); } +int +drm_gem_handle_create(struct drm_file *file_priv, + struct drm_gem_object *obj, + int *handlep); + static inline void drm_gem_object_handle_reference (struct drm_gem_object *obj) { drm_gem_object_reference (obj); @@ -1495,37 +1487,16 @@ static inline void drm_gem_object_handle_unreference (struct drm_gem_object *obj struct drm_gem_object * drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, int handle); -int drm_gem_create_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); int drm_gem_close_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -int drm_gem_pread_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_gem_pwrite_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_gem_mmap_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); int drm_gem_flink_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_gem_open_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -int drm_gem_set_domain_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); void drm_gem_open(struct drm_device *dev, struct drm_file *file_private); void drm_gem_release(struct drm_device *dev, struct drm_file *file_private); - -/* - * Given the new read/write domains for an object, - * compute the invalidate/flush domains for the whole device. - * - */ -int drm_gem_object_set_domain (struct drm_gem_object *object, - uint32_t read_domains, - uint32_t write_domains); - - extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev); extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev); diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index df09e72b..64ad067a 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -175,15 +175,6 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_BO_VERSION, drm_bo_version_ioctl, 0), DRM_IOCTL_DEF(DRM_IOCTL_MM_INFO, drm_mm_info_ioctl, 0), - - DRM_IOCTL_DEF(DRM_IOCTL_GEM_CREATE, drm_gem_create_ioctl, 0), - DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0), - DRM_IOCTL_DEF(DRM_IOCTL_GEM_PREAD, drm_gem_pread_ioctl, 0), - DRM_IOCTL_DEF(DRM_IOCTL_GEM_PWRITE, drm_gem_pwrite_ioctl, 0), - DRM_IOCTL_DEF(DRM_IOCTL_GEM_MMAP, drm_gem_mmap_ioctl, 0), - DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_GEM_SET_DOMAIN, drm_gem_set_domain_ioctl, DRM_AUTH), }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) diff --git a/linux-core/drm_gem.c b/linux-core/drm_gem.c index a8ecaf76..76d7aa94 100644 --- a/linux-core/drm_gem.c +++ b/linux-core/drm_gem.c @@ -74,6 +74,11 @@ drm_gem_init(struct drm_device *dev) spin_lock_init(&dev->object_name_lock); idr_init(&dev->object_name_idr); atomic_set(&dev->object_count, 0); + atomic_set(&dev->object_memory, 0); + atomic_set(&dev->pin_count, 0); + atomic_set(&dev->pin_memory, 0); + atomic_set(&dev->gtt_count, 0); + atomic_set(&dev->gtt_memory, 0); return 0; } @@ -99,15 +104,6 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size) kref_init(&obj->refcount); kref_init(&obj->handlecount); obj->size = size; - - /* - * We've just allocated pages from the kernel, - * so they've just been written by the CPU with - * zeros. They'll need to be clflushed before we - * use them with the GPU. - */ - obj->write_domain = DRM_GEM_DOMAIN_CPU; - obj->read_domains = DRM_GEM_DOMAIN_CPU; if (dev->driver->gem_init_object != NULL && dev->driver->gem_init_object(obj) != 0) { fput(obj->filp); @@ -115,55 +111,17 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size) return NULL; } atomic_inc(&dev->object_count); + atomic_add(obj->size, &dev->object_memory); return obj; } EXPORT_SYMBOL(drm_gem_object_alloc); /** - * Removes the mapping from handle to filp for this object. - */ -static int -drm_gem_handle_delete(struct drm_file *filp, int handle) -{ - struct drm_device *dev; - struct drm_gem_object *obj; - - /* This is gross. The idr system doesn't let us try a delete and - * return an error code. It just spews if you fail at deleting. - * So, we have to grab a lock around finding the object and then - * doing the delete on it and dropping the refcount, or the user - * could race us to double-decrement the refcount and cause a - * use-after-free later. Given the frequency of our handle lookups, - * we may want to use ida for number allocation and a hash table - * for the pointers, anyway. - */ - spin_lock(&filp->table_lock); - - /* Check if we currently have a reference on the object */ - obj = idr_find(&filp->object_idr, handle); - if (obj == NULL) { - spin_unlock(&filp->table_lock); - return -EINVAL; - } - dev = obj->dev; - - /* Release reference and decrement refcount. */ - idr_remove(&filp->object_idr, handle); - spin_unlock(&filp->table_lock); - - mutex_lock(&dev->struct_mutex); - drm_gem_object_handle_unreference(obj); - mutex_unlock(&dev->struct_mutex); - - return 0; -} - -/** * Create a handle for this object. This adds a handle reference * to the object, which includes a regular reference count. Callers * will likely want to dereference the object afterwards. */ -static int +int drm_gem_handle_create(struct drm_file *file_priv, struct drm_gem_object *obj, int *handlep) @@ -191,6 +149,7 @@ again: drm_gem_object_handle_reference(obj); return 0; } +EXPORT_SYMBOL(drm_gem_handle_create); /** Returns a reference to the object named by the handle. */ struct drm_gem_object * @@ -217,334 +176,6 @@ drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, EXPORT_SYMBOL(drm_gem_object_lookup); /** - * Creates a new mm object and returns a handle to it. - */ -int -drm_gem_create_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_gem_create *args = data; - struct drm_gem_object *obj; - int handle, ret; - - if (!(dev->driver->driver_features & DRIVER_GEM)) - return -ENODEV; - - args->size = roundup(args->size, PAGE_SIZE); - - /* Allocate the new object */ - obj = drm_gem_object_alloc(dev, args->size); - if (obj == NULL) - return -ENOMEM; - - ret = drm_gem_handle_create(file_priv, obj, &handle); - mutex_lock(&dev->struct_mutex); - drm_gem_object_handle_unreference(obj); - mutex_unlock(&dev->struct_mutex); - - if (ret) - return ret; - - args->handle = handle; - - return 0; -} - -/** - * Releases the handle to an mm object. - */ -int -drm_gem_close_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_gem_close *args = data; - int ret; - - if (!(dev->driver->driver_features & DRIVER_GEM)) - return -ENODEV; - - ret = drm_gem_handle_delete(file_priv, args->handle); - - return ret; -} - -/** - * Reads data from the object referenced by handle. - * - * On error, the contents of *data are undefined. - */ -int -drm_gem_pread_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_gem_pread *args = data; - struct drm_gem_object *obj; - ssize_t read; - loff_t offset; - int ret; - - if (!(dev->driver->driver_features & DRIVER_GEM)) - return -ENODEV; - - obj = drm_gem_object_lookup(dev, file_priv, args->handle); - if (obj == NULL) - return -EINVAL; - - mutex_lock(&dev->struct_mutex); - if (dev->driver->gem_set_domain) { - ret = dev->driver->gem_set_domain(obj, file_priv, - DRM_GEM_DOMAIN_CPU, - 0); - if (ret) { - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - return ret; - } - } - offset = args->offset; - - read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr, - args->size, &offset); - if (read != args->size) { - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - if (read < 0) - return read; - else - return -EINVAL; - } - - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - - return 0; -} - -/** - * Maps the contents of an object, returning the address it is mapped - * into. - * - * While the mapping holds a reference on the contents of the object, it doesn't - * imply a ref on the object itself. - */ -int -drm_gem_mmap_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_gem_mmap *args = data; - struct drm_gem_object *obj; - loff_t offset; - unsigned long addr; - - if (!(dev->driver->driver_features & DRIVER_GEM)) - return -ENODEV; - - obj = drm_gem_object_lookup(dev, file_priv, args->handle); - if (obj == NULL) - return -EINVAL; - - offset = args->offset; - - down_write(¤t->mm->mmap_sem); - addr = do_mmap(obj->filp, 0, args->size, - PROT_READ | PROT_WRITE, MAP_SHARED, - args->offset); - up_write(¤t->mm->mmap_sem); - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - if (IS_ERR((void *)addr)) - return addr; - - args->addr_ptr = (uint64_t) addr; - - return 0; -} - -/** - * Writes data to the object referenced by handle. - * - * On error, the contents of the buffer that were to be modified are undefined. - */ -int -drm_gem_pwrite_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_gem_pwrite *args = data; - struct drm_gem_object *obj; - ssize_t written; - loff_t offset; - int ret; - - if (!(dev->driver->driver_features & DRIVER_GEM)) - return -ENODEV; - - obj = drm_gem_object_lookup(dev, file_priv, args->handle); - if (obj == NULL) - return -EINVAL; - - mutex_lock(&dev->struct_mutex); - if (dev->driver->gem_set_domain) { - ret = dev->driver->gem_set_domain(obj, file_priv, - DRM_GEM_DOMAIN_CPU, - DRM_GEM_DOMAIN_CPU); - if (ret) { - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - return ret; - } - } - offset = args->offset; - - written = vfs_write(obj->filp, - (char __user *)(uintptr_t) args->data_ptr, - args->size, &offset); - - if (written != args->size) { - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - if (written < 0) - return written; - else - return -EINVAL; - } - - if (dev->driver->gem_flush_pwrite) - dev->driver->gem_flush_pwrite(obj, - args->offset, - args->size); - - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - - return 0; -} - -/** - * Create a global name for an object, returning the name. - * - * Note that the name does not hold a reference; when the object - * is freed, the name goes away. - */ -int -drm_gem_flink_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_gem_flink *args = data; - struct drm_gem_object *obj; - int ret; - - if (!(dev->driver->driver_features & DRIVER_GEM)) - return -ENODEV; - - obj = drm_gem_object_lookup(dev, file_priv, args->handle); - if (obj == NULL) - return -EINVAL; - -again: - if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) - return -ENOMEM; - - spin_lock(&dev->object_name_lock); - if (obj->name) { - spin_unlock(&dev->object_name_lock); - return -EEXIST; - } - ret = idr_get_new_above(&dev->object_name_idr, obj, 1, - &obj->name); - spin_unlock(&dev->object_name_lock); - if (ret == -EAGAIN) - goto again; - - if (ret != 0) { - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - return ret; - } - - /* - * Leave the reference from the lookup around as the - * name table now holds one - */ - args->name = (uint64_t) obj->name; - - return 0; -} - -/** - * Open an object using the global name, returning a handle and the size. - * - * This handle (of course) holds a reference to the object, so the object - * will not go away until the handle is deleted. - */ -int -drm_gem_open_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_gem_open *args = data; - struct drm_gem_object *obj; - int ret; - int handle; - - if (!(dev->driver->driver_features & DRIVER_GEM)) - return -ENODEV; - - spin_lock(&dev->object_name_lock); - obj = idr_find(&dev->object_name_idr, (int) args->name); - if (obj) - drm_gem_object_reference(obj); - spin_unlock(&dev->object_name_lock); - if (!obj) - return -ENOENT; - - ret = drm_gem_handle_create(file_priv, obj, &handle); - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - if (ret) - return ret; - - args->handle = handle; - args->size = obj->size; - - return 0; -} - -/** - * Called when user space prepares to use an object - */ -int -drm_gem_set_domain_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_gem_set_domain *args = data; - struct drm_gem_object *obj; - int ret; - - if (!(dev->driver->driver_features & DRIVER_GEM)) - return -ENODEV; - - obj = drm_gem_object_lookup(dev, file_priv, args->handle); - if (obj == NULL) - return -EINVAL; - - mutex_lock(&dev->struct_mutex); - if (dev->driver->gem_set_domain) { - ret = dev->driver->gem_set_domain(obj, file_priv, - args->read_domains, - args->write_domain); - } else { - obj->read_domains = args->read_domains; - obj->write_domain = args->write_domain; - ret = 0; - } - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - return ret; -} - -/** * Called at device open time, sets up the structure for handling refcounting * of mm objects. */ @@ -603,6 +234,7 @@ drm_gem_object_free(struct kref *kref) fput(obj->filp); atomic_dec(&dev->object_count); + atomic_sub(obj->size, &dev->object_memory); kfree(obj); } EXPORT_SYMBOL(drm_gem_object_free); diff --git a/linux-core/drm_irq.c b/linux-core/drm_irq.c index c6024d95..0dfbe57a 100644 --- a/linux-core/drm_irq.c +++ b/linux-core/drm_irq.c @@ -63,7 +63,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data, p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn)) return -EINVAL; - p->irq = dev->irq; + p->irq = dev->pdev->irq; DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum, p->irq); @@ -285,7 +285,7 @@ int drm_irq_install(struct drm_device * dev) if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) return -EINVAL; - if (dev->irq == 0) + if (dev->pdev->irq == 0) return -EINVAL; mutex_lock(&dev->struct_mutex); @@ -303,7 +303,7 @@ int drm_irq_install(struct drm_device * dev) dev->irq_enabled = 1; mutex_unlock(&dev->struct_mutex); - DRM_DEBUG("irq=%d\n", dev->irq); + DRM_DEBUG("irq=%d\n", dev->pdev->irq); /* Before installing handler */ dev->driver->irq_preinstall(dev); @@ -312,7 +312,7 @@ int drm_irq_install(struct drm_device * dev) if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED)) sh_flags = IRQF_SHARED; - ret = request_irq(dev->irq, dev->driver->irq_handler, + ret = request_irq(dev->pdev->irq, dev->driver->irq_handler, sh_flags, dev->devname, dev); if (ret < 0) { mutex_lock(&dev->struct_mutex); @@ -320,6 +320,10 @@ int drm_irq_install(struct drm_device * dev) mutex_unlock(&dev->struct_mutex); return ret; } + /* Expose the device irq to device drivers that want to export it for + * whatever reason. + */ + dev->irq = dev->pdev->irq; /* After installing handler */ ret = dev->driver->irq_postinstall(dev); @@ -355,11 +359,11 @@ int drm_irq_uninstall(struct drm_device * dev) if (!irq_enabled) return -EINVAL; - DRM_DEBUG("irq=%d\n", dev->irq); + DRM_DEBUG("irq=%d\n", dev->pdev->irq); dev->driver->irq_uninstall(dev); - free_irq(dev->irq, dev); + free_irq(dev->pdev->irq, dev); drm_vblank_cleanup(dev); @@ -397,7 +401,7 @@ int drm_control(struct drm_device *dev, void *data, if (drm_core_check_feature(dev, DRIVER_MODESET)) return 0; if (dev->if_version < DRM_IF_VERSION(1, 2) && - ctl->irq != dev->irq) + ctl->irq != dev->pdev->irq) return -EINVAL; return drm_irq_install(dev); case DRM_UNINST_HANDLER: @@ -580,7 +584,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, int ret = 0; unsigned int flags, seq, crtc; - if ((!dev->irq) || (!dev->irq_enabled)) + if ((!dev->pdev->irq) || (!dev->irq_enabled)) return -EINVAL; if (vblwait->request.type & diff --git a/linux-core/drm_lock.c b/linux-core/drm_lock.c index d2fb1feb..e8bdad92 100644 --- a/linux-core/drm_lock.c +++ b/linux-core/drm_lock.c @@ -384,65 +384,6 @@ void drm_idlelock_release(struct drm_lock_data *lock_data) } EXPORT_SYMBOL(drm_idlelock_release); -/** - * Takes the lock on behalf of the client if needed, using the kernel context. - * - * This allows us to hide the hardware lock when it's required for protection - * of data structures (such as command ringbuffer) shared with the X Server, an - - * a way for us to transition to lockless for those requests when the X Server - * stops accessing the ringbuffer directly, without having to update the - * other userland clients. - */ -int drm_client_lock_take(struct drm_device *dev, struct drm_file *file_priv) -{ - struct drm_master *master = file_priv->master; - int ret; - unsigned long irqflags; - - /* If the client has the lock, we're already done. */ - if (drm_i_have_hw_lock(dev, file_priv)) - return 0; - - mutex_unlock (&dev->struct_mutex); - /* Client doesn't hold the lock. Block taking the lock with the kernel - * context on behalf of the client, and return whether we were - * successful. - */ - spin_lock_irqsave(&master->lock.spinlock, irqflags); - master->lock.user_waiters++; - spin_unlock_irqrestore(&master->lock.spinlock, irqflags); - ret = wait_event_interruptible(master->lock.lock_queue, - drm_lock_take(&master->lock, - DRM_KERNEL_CONTEXT)); - spin_lock_irqsave(&master->lock.spinlock, irqflags); - master->lock.user_waiters--; - if (ret != 0) { - spin_unlock_irqrestore(&master->lock.spinlock, irqflags); - } else { - master->lock.file_priv = file_priv; - master->lock.lock_time = jiffies; - master->lock.kernel_held = 1; - file_priv->lock_count++; - spin_unlock_irqrestore(&master->lock.spinlock, irqflags); - } - mutex_lock (&dev->struct_mutex); - return ret; -} -EXPORT_SYMBOL(drm_client_lock_take); - -void drm_client_lock_release(struct drm_device *dev, struct drm_file *file_priv) -{ - struct drm_master *master = file_priv->master; - - if (master->lock.kernel_held) { - master->lock.kernel_held = 0; - master->lock.file_priv = NULL; - drm_lock_free(&master->lock, DRM_KERNEL_CONTEXT); - } -} -EXPORT_SYMBOL(drm_client_lock_release); - int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv) { struct drm_master *master = file_priv->master; diff --git a/linux-core/drm_proc.c b/linux-core/drm_proc.c index 690e081c..127a7987 100644 --- a/linux-core/drm_proc.c +++ b/linux-core/drm_proc.c @@ -658,7 +658,12 @@ static int drm_gem_object_info(char *buf, char **start, off_t offset, *start = &buf[offset]; *eof = 0; - DRM_PROC_PRINT ("%d objects\n", atomic_read (&dev->object_count)); + DRM_PROC_PRINT("%d objects\n", atomic_read (&dev->object_count)); + DRM_PROC_PRINT("%d object bytes\n", atomic_read (&dev->object_memory)); + DRM_PROC_PRINT("%d pinned\n", atomic_read (&dev->pin_count)); + DRM_PROC_PRINT("%d pin bytes\n", atomic_read (&dev->pin_memory)); + DRM_PROC_PRINT("%d gtt bytes\n", atomic_read (&dev->gtt_memory)); + DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total); if (len > request + offset) return request; *eof = 1; diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c index 574282b2..1817c964 100644 --- a/linux-core/i915_drv.c +++ b/linux-core/i915_drv.c @@ -569,6 +569,8 @@ static int i915_resume(struct drm_device *dev) } static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static void remove(struct pci_dev *pdev); + static struct drm_driver driver = { /* don't use mtrr's here, the Xserver or user space app should * deal with them for intel hardware. @@ -579,8 +581,10 @@ static struct drm_driver driver = { .load = i915_driver_load, .unload = i915_driver_unload, .firstopen = i915_driver_firstopen, + .open = i915_driver_open, .lastclose = i915_driver_lastclose, .preclose = i915_driver_preclose, + .postclose = i915_driver_postclose, .suspend = i915_suspend, .resume = i915_resume, .device_is_agp = i915_driver_device_is_agp, @@ -599,8 +603,6 @@ static struct drm_driver driver = { .ioctls = i915_ioctls, .gem_init_object = i915_gem_init_object, .gem_free_object = i915_gem_free_object, - .gem_set_domain = i915_gem_set_domain, - .gem_flush_pwrite = i915_gem_flush_pwrite, .fops = { .owner = THIS_MODULE, .open = drm_open, @@ -617,7 +619,7 @@ static struct drm_driver driver = { .name = DRIVER_NAME, .id_table = pciidlist, .probe = probe, - .remove = __devexit_p(drm_cleanup_pci), + .remove = remove, }, #if defined(I915_HAVE_FENCE) && defined(I915_TTM) .fence_driver = &i915_fence_driver, @@ -635,7 +637,28 @@ static struct drm_driver driver = { static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - return drm_get_dev(pdev, ent, &driver); + int ret; + + /* On the 945G/GM, the chipset reports the MSI capability on the + * integrated graphics even though the support isn't actually there + * according to the published specs. It doesn't appear to function + * correctly in testing on 945G. + * This may be a side effect of MSI having been made available for PEG + * and the registers being closely associated. + */ + if (pdev->device != 0x2772 && pdev->device != 0x27A2) + (void )pci_enable_msi(pdev); + + ret = drm_get_dev(pdev, ent, &driver); + if (ret && pdev->msi_enabled) + pci_disable_msi(pdev); + return ret; +} +static void remove(struct pci_dev *pdev) +{ + if (pdev->msi_enabled) + pci_disable_msi(pdev); + drm_cleanup_pci(pdev); } static int __init i915_init(void) diff --git a/linux-core/i915_gem.c b/linux-core/i915_gem.c index 47745010..787251f5 100644 --- a/linux-core/i915_gem.c +++ b/linux-core/i915_gem.c @@ -35,11 +35,17 @@ #define WATCH_EXEC 0 #define WATCH_LRU 0 #define WATCH_RELOC 0 +#define WATCH_INACTIVE 0 static int i915_gem_object_set_domain(struct drm_gem_object *obj, uint32_t read_domains, uint32_t write_domain); +int +i915_gem_set_domain(struct drm_gem_object *obj, + struct drm_file *file_priv, + uint32_t read_domains, + uint32_t write_domain); int i915_gem_do_init(struct drm_device *dev, unsigned long start, unsigned long end) @@ -55,6 +61,8 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start, drm_memrange_init(&dev_priv->mm.gtt_space, start, end - start); + dev->gtt_total = (uint32_t) (end - start); + return 0; } @@ -72,6 +80,199 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data, return ret; } + +/** + * Creates a new mm object and returns a handle to it. + */ +int +i915_gem_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_i915_gem_create *args = data; + struct drm_gem_object *obj; + int handle, ret; + + args->size = roundup(args->size, PAGE_SIZE); + + /* Allocate the new object */ + obj = drm_gem_object_alloc(dev, args->size); + if (obj == NULL) + return -ENOMEM; + + ret = drm_gem_handle_create(file_priv, obj, &handle); + mutex_lock(&dev->struct_mutex); + drm_gem_object_handle_unreference(obj); + mutex_unlock(&dev->struct_mutex); + + if (ret) + return ret; + + args->handle = handle; + + return 0; +} + +/** + * Reads data from the object referenced by handle. + * + * On error, the contents of *data are undefined. + */ +int +i915_gem_pread_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_i915_gem_pread *args = data; + struct drm_gem_object *obj; + ssize_t read; + loff_t offset; + int ret; + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) + return -EINVAL; + + mutex_lock(&dev->struct_mutex); + ret = i915_gem_set_domain(obj, file_priv, + I915_GEM_DOMAIN_CPU, 0); + if (ret) { + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + return ret; + } + offset = args->offset; + + read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr, + args->size, &offset); + if (read != args->size) { + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + if (read < 0) + return read; + else + return -EINVAL; + } + + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + + return 0; +} + +/** + * Writes data to the object referenced by handle. + * + * On error, the contents of the buffer that were to be modified are undefined. + */ +int +i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_i915_gem_pwrite *args = data; + struct drm_gem_object *obj; + ssize_t written; + loff_t offset; + int ret; + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) + return -EINVAL; + + mutex_lock(&dev->struct_mutex); + ret = i915_gem_set_domain(obj, file_priv, + I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU); + if (ret) { + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + return ret; + } + offset = args->offset; + + written = vfs_write(obj->filp, + (char __user *)(uintptr_t) args->data_ptr, + args->size, &offset); + + if (written != args->size) { + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + if (written < 0) + return written; + else + return -EINVAL; + } + + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + + return 0; +} + +/** + * Called when user space prepares to use an object + */ +int +i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_i915_gem_set_domain *args = data; + struct drm_gem_object *obj; + int ret; + + if (!(dev->driver->driver_features & DRIVER_GEM)) + return -ENODEV; + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) + return -EINVAL; + + mutex_lock(&dev->struct_mutex); + ret = i915_gem_set_domain(obj, file_priv, + args->read_domains, args->write_domain); + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + return ret; +} + +/** + * Maps the contents of an object, returning the address it is mapped + * into. + * + * While the mapping holds a reference on the contents of the object, it doesn't + * imply a ref on the object itself. + */ +int +i915_gem_mmap_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_i915_gem_mmap *args = data; + struct drm_gem_object *obj; + loff_t offset; + unsigned long addr; + + if (!(dev->driver->driver_features & DRIVER_GEM)) + return -ENODEV; + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) + return -EINVAL; + + offset = args->offset; + + down_write(¤t->mm->mmap_sem); + addr = do_mmap(obj->filp, 0, args->size, + PROT_READ | PROT_WRITE, MAP_SHARED, + args->offset); + up_write(¤t->mm->mmap_sem); + mutex_lock(&dev->struct_mutex); + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + if (IS_ERR((void *)addr)) + return addr; + + args->addr_ptr = (uint64_t) addr; + + return 0; +} + static void i915_gem_object_free_page_list(struct drm_gem_object *obj) { @@ -110,6 +311,26 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj) &dev_priv->mm.active_list); } +#if WATCH_INACTIVE +static void +i915_verify_inactive(struct drm_device *dev, char *file, int line) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + + list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { + obj = obj_priv->obj; + if (obj_priv->pin_count || obj_priv->active || (obj->write_domain & ~I915_GEM_DOMAIN_CPU)) + DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n", + obj, + obj_priv->pin_count, obj_priv->active, obj->write_domain, file, line); + } +} +#else +#define i915_verify_inactive(dev,file,line) +#endif + static void i915_gem_object_move_to_inactive(struct drm_gem_object *obj) { @@ -117,6 +338,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = obj->driver_private; + i915_verify_inactive(dev, __FILE__, __LINE__); if (obj_priv->pin_count != 0) list_del_init(&obj_priv->list); else @@ -126,6 +348,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) obj_priv->active = 0; drm_gem_object_unreference(obj); } + i915_verify_inactive(dev, __FILE__, __LINE__); } /** @@ -142,6 +365,7 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_request *request; uint32_t seqno; + int was_empty; RING_LOCALS; request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER); @@ -169,11 +393,11 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains) request->seqno = seqno; request->emitted_jiffies = jiffies; request->flush_domains = flush_domains; - if (list_empty(&dev_priv->mm.request_list)) - mod_timer(&dev_priv->mm.retire_timer, jiffies + HZ); - + was_empty = list_empty(&dev_priv->mm.request_list); list_add_tail(&request->list, &dev_priv->mm.request_list); + if (was_empty) + schedule_delayed_work (&dev_priv->mm.retire_work, HZ); return seqno; } @@ -194,7 +418,7 @@ i915_retire_commands(struct drm_device *dev) /* The sampler always gets flushed on i965 (sigh) */ if (IS_I965G(dev)) - flush_domains |= DRM_GEM_DOMAIN_I915_SAMPLER; + flush_domains |= I915_GEM_DOMAIN_SAMPLER; BEGIN_LP_RING(2); OUT_RING(cmd); OUT_RING(0); /* noop */ @@ -306,33 +530,24 @@ i915_gem_retire_requests(struct drm_device *dev) list_del(&request->list); drm_free(request, sizeof(*request), DRM_MEM_DRIVER); } else - break; + break; } } void -i915_gem_retire_timeout(unsigned long data) -{ - struct drm_device *dev = (struct drm_device *) data; - struct drm_i915_private *dev_priv = dev->dev_private; - - schedule_work(&dev_priv->mm.retire_task); -} - -void -i915_gem_retire_handler(struct work_struct *work) +i915_gem_retire_work_handler(struct work_struct *work) { struct drm_i915_private *dev_priv; struct drm_device *dev; dev_priv = container_of(work, struct drm_i915_private, - mm.retire_task); + mm.retire_work.work); dev = dev_priv->dev; mutex_lock(&dev->struct_mutex); i915_gem_retire_requests(dev); if (!list_empty(&dev_priv->mm.request_list)) - mod_timer(&dev_priv->mm.retire_timer, jiffies + HZ); + schedule_delayed_work (&dev_priv->mm.retire_work, HZ); mutex_unlock(&dev->struct_mutex); } @@ -356,8 +571,8 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno) i915_user_irq_off(dev); } if (ret) - DRM_ERROR ("%s returns %d (awaiting %d at %d)\n", - __func__, ret, seqno, i915_get_gem_seqno(dev)); + DRM_ERROR("%s returns %d (awaiting %d at %d)\n", + __func__, ret, seqno, i915_get_gem_seqno(dev)); /* Directly dispatch request retiring. While we have the work queue * to handle this, the waiter on a request often wants an associated @@ -384,51 +599,51 @@ i915_gem_flush(struct drm_device *dev, invalidate_domains, flush_domains); #endif - if (flush_domains & DRM_GEM_DOMAIN_CPU) + if (flush_domains & I915_GEM_DOMAIN_CPU) drm_agp_chipset_flush(dev); - if ((invalidate_domains|flush_domains) & ~DRM_GEM_DOMAIN_CPU) { + if ((invalidate_domains|flush_domains) & ~I915_GEM_DOMAIN_CPU) { /* * read/write caches: * - * DRM_GEM_DOMAIN_I915_RENDER is always invalidated, but is + * I915_GEM_DOMAIN_RENDER is always invalidated, but is * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is * also flushed at 2d versus 3d pipeline switches. * * read-only caches: * - * DRM_GEM_DOMAIN_I915_SAMPLER is flushed on pre-965 if + * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if * MI_READ_FLUSH is set, and is always flushed on 965. * - * DRM_GEM_DOMAIN_I915_COMMAND may not exist? + * I915_GEM_DOMAIN_COMMAND may not exist? * - * DRM_GEM_DOMAIN_I915_INSTRUCTION, which exists on 965, is + * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is * invalidated when MI_EXE_FLUSH is set. * - * DRM_GEM_DOMAIN_I915_VERTEX, which exists on 965, is + * I915_GEM_DOMAIN_VERTEX, which exists on 965, is * invalidated with every MI_FLUSH. * * TLBs: * - * On 965, TLBs associated with DRM_GEM_DOMAIN_I915_COMMAND - * and DRM_GEM_DOMAIN_CPU in are invalidated at PTE write and - * DRM_GEM_DOMAIN_I915_RENDER and DRM_GEM_DOMAIN_I915_SAMPLER + * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND + * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and + * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER * are flushed at any MI_FLUSH. */ cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; if ((invalidate_domains|flush_domains) & - DRM_GEM_DOMAIN_I915_RENDER) + I915_GEM_DOMAIN_RENDER) cmd &= ~MI_NO_WRITE_FLUSH; if (!IS_I965G(dev)) { /* * On the 965, the sampler cache always gets flushed * and this bit is reserved. */ - if (invalidate_domains & DRM_GEM_DOMAIN_I915_SAMPLER) + if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) cmd |= MI_READ_FLUSH; } - if (invalidate_domains & DRM_GEM_DOMAIN_I915_INSTRUCTION) + if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) cmd |= MI_EXE_FLUSH; #if WATCH_EXEC @@ -455,7 +670,7 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj) /* If there are writes queued to the buffer, flush and * create a new seqno to wait for. */ - if (obj->write_domain & ~(DRM_GEM_DOMAIN_CPU)) { + if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU)) { uint32_t write_domain = obj->write_domain; #if WATCH_BUF DRM_INFO("%s: flushing object %p from write domain %08x\n", @@ -494,6 +709,7 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj) static int i915_gem_object_unbind(struct drm_gem_object *obj) { + struct drm_device *dev = obj->dev; struct drm_i915_gem_object *obj_priv = obj->driver_private; int ret = 0; @@ -504,16 +720,31 @@ i915_gem_object_unbind(struct drm_gem_object *obj) if (obj_priv->gtt_space == NULL) return 0; + if (obj_priv->pin_count != 0) { + DRM_ERROR("Attempting to unbind pinned buffer\n"); + return -EINVAL; + } + + /* Wait for any rendering to complete + */ + ret = i915_gem_object_wait_rendering(obj); + if (ret) { + DRM_ERROR ("wait_rendering failed: %d\n", ret); + return ret; + } + /* Move the object to the CPU domain to ensure that * any possible CPU writes while it's not in the GTT * are flushed when we go to remap it. This will * also ensure that all pending GPU writes are finished * before we unbind. */ - ret = i915_gem_object_set_domain (obj, DRM_GEM_DOMAIN_CPU, - DRM_GEM_DOMAIN_CPU); - if (ret) + ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU, + I915_GEM_DOMAIN_CPU); + if (ret) { + DRM_ERROR("set_domain failed: %d\n", ret); return ret; + } if (obj_priv->agp_mem != NULL) { drm_unbind_agp(obj_priv->agp_mem); @@ -521,21 +752,20 @@ i915_gem_object_unbind(struct drm_gem_object *obj) obj_priv->agp_mem = NULL; } + BUG_ON(obj_priv->active); + i915_gem_object_free_page_list(obj); + atomic_dec(&dev->gtt_count); + atomic_sub(obj->size, &dev->gtt_memory); + drm_memrange_put_block(obj_priv->gtt_space); obj_priv->gtt_space = NULL; /* Remove ourselves from the LRU list if present. */ - if (!list_empty(&obj_priv->list)) { + if (!list_empty(&obj_priv->list)) list_del_init(&obj_priv->list); - if (obj_priv->active) { - DRM_ERROR("Failed to wait on buffer when unbinding, " - "continued anyway.\n"); - obj_priv->active = 0; - drm_gem_object_unreference(obj); - } - } + return 0; } @@ -622,7 +852,7 @@ i915_gem_evict_something(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; - int ret; + int ret = 0; for (;;) { /* If there's an inactive buffer available now, grab it @@ -634,6 +864,13 @@ i915_gem_evict_something(struct drm_device *dev) list); obj = obj_priv->obj; BUG_ON(obj_priv->pin_count != 0); +#if WATCH_LRU + DRM_INFO("%s: evicting %p\n", __func__, obj); +#endif + BUG_ON(obj_priv->active); + + /* Wait on the rendering and unbind the buffer. */ + ret = i915_gem_object_unbind(obj); break; } @@ -643,17 +880,21 @@ i915_gem_evict_something(struct drm_device *dev) */ if (!list_empty(&dev_priv->mm.request_list)) { struct drm_i915_gem_request *request; - int ret; request = list_first_entry(&dev_priv->mm.request_list, struct drm_i915_gem_request, list); ret = i915_wait_request(dev, request->seqno); - if (ret != 0) - return ret; - continue; + /* if waiting caused an object to become inactive, + * then loop around and wait for it. Otherwise, we + * assume that waiting freed and unbound something, + * so there should now be some space in the GTT + */ + if (!list_empty(&dev_priv->mm.inactive_list)) + continue; + break; } /* If we didn't have anything on the request list but there @@ -676,21 +917,15 @@ i915_gem_evict_something(struct drm_device *dev) continue; } + DRM_ERROR("inactive empty %d request empty %d flushing empty %d\n", + list_empty(&dev_priv->mm.inactive_list), + list_empty(&dev_priv->mm.request_list), + list_empty(&dev_priv->mm.flushing_list)); /* If we didn't do any of the above, there's nothing to be done * and we just can't fit it in. */ return -ENOMEM; } - -#if WATCH_LRU - DRM_INFO("%s: evicting %p\n", __func__, obj); -#endif - - BUG_ON(obj_priv->active); - - /* Wait on the rendering and unbind the buffer. */ - ret = i915_gem_object_unbind(obj); - return ret; } @@ -776,7 +1011,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) ret = i915_gem_evict_something(dev); if (ret != 0) { - DRM_ERROR("Failed to evict a buffer\n"); + DRM_ERROR("Failed to evict a buffer %d\n", ret); return ret; } goto search_free; @@ -807,13 +1042,15 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) obj_priv->gtt_space = NULL; return -ENOMEM; } + atomic_inc(&dev->gtt_count); + atomic_add(obj->size, &dev->gtt_memory); /* Assert that the object is not currently in any GPU domain. As it * wasn't in the GTT, there shouldn't be any way it could have been in * a GPU cache */ - BUG_ON(obj->read_domains & ~DRM_GEM_DOMAIN_CPU); - BUG_ON(obj->write_domain & ~DRM_GEM_DOMAIN_CPU); + BUG_ON(obj->read_domains & ~I915_GEM_DOMAIN_CPU); + BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU); return 0; } @@ -980,7 +1217,7 @@ i915_gem_object_set_domain(struct drm_gem_object *obj, * stale data. That is, any new read domains. */ invalidate_domains |= read_domains & ~obj->read_domains; - if ((flush_domains | invalidate_domains) & DRM_GEM_DOMAIN_CPU) { + if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { #if WATCH_BUF DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", __func__, flush_domains, invalidate_domains); @@ -990,8 +1227,8 @@ i915_gem_object_set_domain(struct drm_gem_object *obj, * then pause for rendering so that the GPU caches will be * flushed before the cpu cache is invalidated */ - if ((invalidate_domains & DRM_GEM_DOMAIN_CPU) && - (flush_domains & ~DRM_GEM_DOMAIN_CPU)) { + if ((invalidate_domains & I915_GEM_DOMAIN_CPU) && + (flush_domains & ~I915_GEM_DOMAIN_CPU)) { ret = i915_gem_object_wait_rendering(obj); if (ret) return ret; @@ -1051,7 +1288,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) int bad_count = 0; DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n", - __FUNCTION__, obj, obj_priv->gtt_offset, handle, + __func__, obj, obj_priv->gtt_offset, handle, obj->size / 1024); gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset, @@ -1110,29 +1347,25 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) #endif /** - * Bind an object to the GTT and evaluate the relocations landing in it - * - * + * Pin an object to the GTT and evaluate the relocations landing in it. */ static int -i915_gem_object_bind_and_relocate(struct drm_gem_object *obj, - struct drm_file *file_priv, - struct drm_i915_gem_exec_object *entry) +i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, + struct drm_file *file_priv, + struct drm_i915_gem_exec_object *entry) { struct drm_device *dev = obj->dev; struct drm_i915_gem_relocation_entry reloc; struct drm_i915_gem_relocation_entry __user *relocs; struct drm_i915_gem_object *obj_priv = obj->driver_private; - int i; + int i, ret; uint32_t last_reloc_offset = -1; void *reloc_page = NULL; /* Choose the GTT offset for our buffer and put it there. */ - if (obj_priv->gtt_space == NULL) { - i915_gem_object_bind_to_gtt(obj, (unsigned) entry->alignment); - if (obj_priv->gtt_space == NULL) - return -ENOMEM; - } + ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); + if (ret) + return ret; entry->offset = obj_priv->gtt_offset; @@ -1148,13 +1381,17 @@ i915_gem_object_bind_and_relocate(struct drm_gem_object *obj, int ret; ret = copy_from_user(&reloc, relocs + i, sizeof(reloc)); - if (ret != 0) + if (ret != 0) { + i915_gem_object_unpin(obj); return ret; + } target_obj = drm_gem_object_lookup(obj->dev, file_priv, reloc.target_handle); - if (target_obj == NULL) + if (target_obj == NULL) { + i915_gem_object_unpin(obj); return -EINVAL; + } target_obj_priv = target_obj->driver_private; /* The target buffer should have appeared before us in the @@ -1164,6 +1401,7 @@ i915_gem_object_bind_and_relocate(struct drm_gem_object *obj, DRM_ERROR("No GTT space found for object %d\n", reloc.target_handle); drm_gem_object_unreference(target_obj); + i915_gem_object_unpin(obj); return -EINVAL; } @@ -1173,6 +1411,7 @@ i915_gem_object_bind_and_relocate(struct drm_gem_object *obj, obj, reloc.target_handle, (int) reloc.offset, (int) obj->size); drm_gem_object_unreference(target_obj); + i915_gem_object_unpin(obj); return -EINVAL; } if (reloc.offset & 3) { @@ -1181,6 +1420,7 @@ i915_gem_object_bind_and_relocate(struct drm_gem_object *obj, obj, reloc.target_handle, (int) reloc.offset); drm_gem_object_unreference(target_obj); + i915_gem_object_unpin(obj); return -EINVAL; } @@ -1194,6 +1434,7 @@ i915_gem_object_bind_and_relocate(struct drm_gem_object *obj, reloc.write_domain, target_obj->pending_write_domain); drm_gem_object_unreference(target_obj); + i915_gem_object_unpin(obj); return -EINVAL; } @@ -1232,7 +1473,7 @@ i915_gem_object_bind_and_relocate(struct drm_gem_object *obj, /* As we're writing through the gtt, flush * any CPU writes before we write the relocations */ - if (obj->write_domain & DRM_GEM_DOMAIN_CPU) { + if (obj->write_domain & I915_GEM_DOMAIN_CPU) { i915_gem_clflush_object(obj); drm_agp_chipset_flush(dev); obj->write_domain = 0; @@ -1254,6 +1495,7 @@ i915_gem_object_bind_and_relocate(struct drm_gem_object *obj, last_reloc_offset = reloc_offset; if (reloc_page == NULL) { drm_gem_object_unreference(target_obj); + i915_gem_object_unpin(obj); return -ENOMEM; } } @@ -1276,6 +1518,7 @@ i915_gem_object_bind_and_relocate(struct drm_gem_object *obj, ret = copy_to_user(relocs + i, &reloc, sizeof(reloc)); if (ret != 0) { drm_gem_object_unreference(target_obj); + i915_gem_object_unpin(obj); return ret; } @@ -1362,32 +1605,17 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev, * relatively low latency when blocking on a particular request to finish. */ static int -i915_gem_ring_throttle(struct drm_device *dev) +i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; int ret = 0; + uint32_t seqno; mutex_lock(&dev->struct_mutex); - while (!list_empty(&dev_priv->mm.request_list)) { - struct drm_i915_gem_request *request; - - request = list_first_entry(&dev_priv->mm.request_list, - struct drm_i915_gem_request, - list); - - /* Break out if we're close enough. */ - if ((long) (jiffies - request->emitted_jiffies) <= (20 * HZ) / 1000) { - mutex_unlock(&dev->struct_mutex); - return 0; - } - - /* Wait on the last request if not. */ - ret = i915_wait_request(dev, request->seqno); - if (ret != 0) { - mutex_unlock(&dev->struct_mutex); - return ret; - } - } + seqno = i915_file_priv->mm.last_gem_throttle_seqno; + i915_file_priv->mm.last_gem_throttle_seqno = i915_file_priv->mm.last_gem_seqno; + if (seqno) + ret = i915_wait_request(dev, seqno); mutex_unlock(&dev->struct_mutex); return ret; } @@ -1397,21 +1625,19 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; struct drm_i915_gem_execbuffer *args = data; struct drm_i915_gem_exec_object *exec_list = NULL; struct drm_gem_object **object_list = NULL; struct drm_gem_object *batch_obj; - int ret, i; + int ret, i, pinned = 0; uint64_t exec_offset; uint32_t seqno, flush_domains; - LOCK_TEST_WITH_RETURN(dev, file_priv); - #if WATCH_EXEC DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", (int) args->buffers_ptr, args->buffer_count, args->batch_len); #endif - i915_kernel_lost_context(dev); /* Copy in the exec list from userland */ exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count, @@ -1437,6 +1663,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, mutex_lock(&dev->struct_mutex); + i915_verify_inactive(dev, __FILE__, __LINE__); if (dev_priv->mm.suspended) { DRM_ERROR("Execbuf while VT-switched.\n"); mutex_unlock(&dev->struct_mutex); @@ -1463,20 +1690,23 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, object_list[i]->pending_read_domains = 0; object_list[i]->pending_write_domain = 0; - ret = i915_gem_object_bind_and_relocate(object_list[i], - file_priv, - &exec_list[i]); + ret = i915_gem_object_pin_and_relocate(object_list[i], + file_priv, + &exec_list[i]); if (ret) { DRM_ERROR("object bind and relocate failed %d\n", ret); goto err; } + pinned = i + 1; } /* Set the pending read domains for the batch buffer to COMMAND */ batch_obj = object_list[args->buffer_count-1]; - batch_obj->pending_read_domains = DRM_GEM_DOMAIN_I915_COMMAND; + batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND; batch_obj->pending_write_domain = 0; + i915_verify_inactive(dev, __FILE__, __LINE__); + for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; struct drm_i915_gem_object *obj_priv = obj->driver_private; @@ -1499,9 +1729,13 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, goto err; } + i915_verify_inactive(dev, __FILE__, __LINE__); + /* Flush/invalidate caches and chipset buffer */ flush_domains = i915_gem_dev_set_domain(dev); + i915_verify_inactive(dev, __FILE__, __LINE__); + #if WATCH_COHERENCY for (i = 0; i < args->buffer_count; i++) { i915_gem_object_check_coherency(object_list[i], @@ -1531,6 +1765,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, */ flush_domains |= i915_retire_commands(dev); + i915_verify_inactive(dev, __FILE__, __LINE__); + /* * Get a seqno representing the execution of the current buffer, * which we can wait on. We would like to mitigate these interrupts, @@ -1540,6 +1776,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, */ seqno = i915_add_request(dev, flush_domains); BUG_ON(seqno == 0); + i915_file_priv->mm.last_gem_seqno = seqno; for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; struct drm_i915_gem_object *obj_priv = obj->driver_private; @@ -1554,6 +1791,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, i915_dump_lru(dev, __func__); #endif + i915_verify_inactive(dev, __FILE__, __LINE__); + /* Copy the new buffer offsets back to the user's exec list. */ ret = copy_to_user((struct drm_i915_relocation_entry __user *) (uintptr_t) args->buffers_ptr, @@ -1565,6 +1804,9 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, args->buffer_count, ret); err: if (object_list != NULL) { + for (i = 0; i < pinned; i++) + i915_gem_object_unpin(object_list[i]); + for (i = 0; i < args->buffer_count; i++) drm_gem_object_unreference(object_list[i]); } @@ -1586,28 +1828,55 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) struct drm_i915_gem_object *obj_priv = obj->driver_private; int ret; + i915_verify_inactive(dev, __FILE__, __LINE__); if (obj_priv->gtt_space == NULL) { ret = i915_gem_object_bind_to_gtt(obj, alignment); if (ret != 0) { - DRM_ERROR("Failure to bind in " - "i915_gem_pin_ioctl(): %d\n", - ret); - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); + DRM_ERROR("Failure to bind: %d", ret); return ret; } } - obj_priv->pin_count++; + + /* If the object is not active and not pending a flush, + * remove it from the inactive list + */ + if (obj_priv->pin_count == 1) { + atomic_inc(&dev->pin_count); + atomic_add(obj->size, &dev->pin_memory); + if (!obj_priv->active && (obj->write_domain & ~I915_GEM_DOMAIN_CPU) == 0 && + !list_empty(&obj_priv->list)) + list_del_init(&obj_priv->list); + } + i915_verify_inactive(dev, __FILE__, __LINE__); + return 0; } void i915_gem_object_unpin(struct drm_gem_object *obj) { + struct drm_device *dev = obj->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = obj->driver_private; + i915_verify_inactive(dev, __FILE__, __LINE__); obj_priv->pin_count--; + BUG_ON(obj_priv->pin_count < 0); + BUG_ON(obj_priv->gtt_space == NULL); + + /* If the object is no longer pinned, and is + * neither active nor being flushed, then stick it on + * the inactive list + */ + if (obj_priv->pin_count == 0) { + if (!obj_priv->active && (obj->write_domain & ~I915_GEM_DOMAIN_CPU) == 0) + list_move_tail(&obj_priv->list, + &dev_priv->mm.inactive_list); + atomic_dec(&dev->pin_count); + atomic_sub(obj->size, &dev->pin_memory); + } + i915_verify_inactive(dev, __FILE__, __LINE__); } int @@ -1621,7 +1890,6 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, mutex_lock(&dev->struct_mutex); - i915_kernel_lost_context(dev); obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) { DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n", @@ -1654,7 +1922,6 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, mutex_lock(&dev->struct_mutex); - i915_kernel_lost_context(dev); obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) { DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n", @@ -1689,7 +1956,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, obj_priv = obj->driver_private; args->busy = obj_priv->active; - + drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); return 0; @@ -1699,7 +1966,7 @@ int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - return i915_gem_ring_throttle(dev); + return i915_gem_ring_throttle(dev, file_priv); } int i915_gem_init_object(struct drm_gem_object *obj) @@ -1710,6 +1977,15 @@ int i915_gem_init_object(struct drm_gem_object *obj) if (obj_priv == NULL) return -ENOMEM; + /* + * We've just allocated pages from the kernel, + * so they've just been written by the CPU with + * zeros. They'll need to be clflushed before we + * use them with the GPU. + */ + obj->write_domain = I915_GEM_DOMAIN_CPU; + obj->read_domains = I915_GEM_DOMAIN_CPU; + obj->driver_private = obj_priv; obj_priv->obj = obj; INIT_LIST_HEAD(&obj_priv->list); @@ -1718,7 +1994,6 @@ int i915_gem_init_object(struct drm_gem_object *obj) void i915_gem_free_object(struct drm_gem_object *obj) { - i915_kernel_lost_context(obj->dev); i915_gem_object_unbind(obj); drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); @@ -1735,41 +2010,11 @@ i915_gem_set_domain(struct drm_gem_object *obj, BUG_ON(!mutex_is_locked(&dev->struct_mutex)); - drm_client_lock_take(dev, file_priv); - i915_kernel_lost_context(dev); ret = i915_gem_object_set_domain(obj, read_domains, write_domain); - if (ret) { - drm_client_lock_release(dev, file_priv); + if (ret) return ret; - } i915_gem_dev_set_domain(obj->dev); - drm_client_lock_release(dev, file_priv); - return 0; -} - -int -i915_gem_flush_pwrite(struct drm_gem_object *obj, - uint64_t offset, uint64_t size) -{ -#if 0 - struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = obj->driver_private; - - /* - * For writes much less than the size of the object and - * which are already pinned in memory, do the flush right now - */ - - if ((size < obj->size >> 1) && obj_priv->page_list != NULL) { - unsigned long first_page = offset / PAGE_SIZE; - unsigned long beyond_page = roundup(offset + size, PAGE_SIZE) / PAGE_SIZE; - drm_ttm_cache_flush(obj_priv->page_list + first_page, - beyond_page - first_page); - drm_agp_chipset_flush(dev); - obj->write_domain = 0; - } -#endif return 0; } @@ -1817,8 +2062,10 @@ i915_gem_init_ringbuffer(struct drm_device *dev) obj_priv = obj->driver_private; ret = i915_gem_object_pin(obj, 4096); - if (ret != 0) + if (ret != 0) { + drm_gem_object_unreference(obj); return ret; + } /* Set up the kernel mapping for the ring. */ dev_priv->ring.Size = obj->size; @@ -1886,6 +2133,10 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, return ret; mutex_lock(&dev->struct_mutex); + BUG_ON(!list_empty(&dev_priv->mm.active_list)); + BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); + BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); + BUG_ON(!list_empty(&dev_priv->mm.request_list)); dev_priv->mm.suspended = 0; mutex_unlock(&dev->struct_mutex); return 0; @@ -1929,6 +2180,8 @@ i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t seqno; + int ret; mutex_lock(&dev->struct_mutex); /* Hack! Don't let anybody do execbuf while we don't control the chip. @@ -1936,32 +2189,40 @@ i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, */ dev_priv->mm.suspended = 1; - /* Move all buffers out of the GTT. */ - i915_gem_evict_from_list(dev, &dev_priv->mm.active_list); - i915_gem_evict_from_list(dev, &dev_priv->mm.flushing_list); - i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list); + i915_kernel_lost_context(dev); - /* Make sure the harware's idle. */ - while (!list_empty(&dev_priv->mm.request_list)) { - struct drm_i915_gem_request *request; - int ret; + /* Flush the GPU along with all non-CPU write domains + */ + i915_gem_flush(dev, ~I915_GEM_DOMAIN_CPU, ~I915_GEM_DOMAIN_CPU); + seqno = i915_add_request(dev, ~I915_GEM_DOMAIN_CPU); + if (seqno == 0) { + mutex_unlock(&dev->struct_mutex); + return -ENOMEM; + } + ret = i915_wait_request(dev, seqno); + if (ret) { + mutex_unlock(&dev->struct_mutex); + return ret; + } - request = list_first_entry(&dev_priv->mm.request_list, - struct drm_i915_gem_request, - list); + /* Active and flushing should now be empty as we've + * waited for a sequence higher than any pending execbuffer + */ + BUG_ON(!list_empty(&dev_priv->mm.active_list)); + BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); - ret = i915_wait_request(dev, request->seqno); - if (ret != 0) { - DRM_ERROR("Error waiting for idle at LeaveVT: %d\n", - ret); - mutex_unlock(&dev->struct_mutex); - return ret; - } - } + /* Request should now be empty as we've also waited + * for the last request in the list + */ + BUG_ON(!list_empty(&dev_priv->mm.request_list)); + + /* Move all buffers out of the GTT. */ + i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list); BUG_ON(!list_empty(&dev_priv->mm.active_list)); BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); + BUG_ON(!list_empty(&dev_priv->mm.request_list)); i915_gem_cleanup_ringbuffer(dev); |