summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--libdrm/intel/intel_bufmgr_fake.c2
-rw-r--r--libdrm/intel/intel_bufmgr_gem.c30
-rw-r--r--linux-core/drmP.h41
-rw-r--r--linux-core/drm_drv.c5
-rw-r--r--linux-core/drm_gem.c231
-rw-r--r--linux-core/i915_drv.c2
-rw-r--r--linux-core/i915_gem.c281
-rw-r--r--shared-core/drm.h83
-rw-r--r--shared-core/i915_dma.c5
-rw-r--r--shared-core/i915_drm.h101
-rw-r--r--shared-core/i915_drv.h20
-rw-r--r--tests/gem_basic.c9
-rw-r--r--tests/gem_mmap.c19
-rw-r--r--tests/gem_readwrite.c13
14 files changed, 389 insertions, 453 deletions
diff --git a/libdrm/intel/intel_bufmgr_fake.c b/libdrm/intel/intel_bufmgr_fake.c
index 3f5a22d3..06e85b2b 100644
--- a/libdrm/intel/intel_bufmgr_fake.c
+++ b/libdrm/intel/intel_bufmgr_fake.c
@@ -1062,7 +1062,7 @@ dri_fake_process_relocs(dri_bo *batch_buf)
dri_fake_calculate_domains(batch_buf);
- batch_fake->read_domains = DRM_GEM_DOMAIN_I915_COMMAND;
+ batch_fake->read_domains = I915_GEM_DOMAIN_COMMAND;
/* we've ran out of RAM so blow the whole lot away and retry */
restart:
diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c
index a65ae982..e057d949 100644
--- a/libdrm/intel/intel_bufmgr_gem.c
+++ b/libdrm/intel/intel_bufmgr_gem.c
@@ -299,7 +299,7 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name,
}
if (!alloc_from_cache) {
- struct drm_gem_create create;
+ struct drm_i915_gem_create create;
bo_gem = calloc(1, sizeof(*bo_gem));
if (!bo_gem)
@@ -309,7 +309,7 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name,
memset(&create, 0, sizeof(create));
create.size = bo_size;
- ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CREATE, &create);
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
bo_gem->gem_handle = create.handle;
if (ret != 0) {
free(bo_gem);
@@ -455,7 +455,7 @@ dri_gem_bo_map(dri_bo *bo, int write_enable)
{
dri_bufmgr_gem *bufmgr_gem;
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
- struct drm_gem_set_domain set_domain;
+ struct drm_i915_gem_set_domain set_domain;
int ret;
bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
@@ -470,13 +470,13 @@ dri_gem_bo_map(dri_bo *bo, int write_enable)
DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
if (bo_gem->virtual == NULL) {
- struct drm_gem_mmap mmap_arg;
+ struct drm_i915_gem_mmap mmap_arg;
memset(&mmap_arg, 0, sizeof(mmap_arg));
mmap_arg.handle = bo_gem->gem_handle;
mmap_arg.offset = 0;
mmap_arg.size = bo->size;
- ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_MMAP, &mmap_arg);
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
if (ret != 0) {
fprintf(stderr, "%s:%d: Error mapping buffer %d (%s): %s .\n",
__FILE__, __LINE__,
@@ -491,9 +491,9 @@ dri_gem_bo_map(dri_bo *bo, int write_enable)
if (!bo_gem->cpu_domain_set) {
set_domain.handle = bo_gem->gem_handle;
- set_domain.read_domains = DRM_GEM_DOMAIN_CPU;
- set_domain.write_domain = write_enable ? DRM_GEM_DOMAIN_CPU : 0;
- ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_GEM_SET_DOMAIN, &set_domain);
+ set_domain.read_domains = I915_GEM_DOMAIN_CPU;
+ set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_CPU : 0;
+ ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
if (ret != 0) {
fprintf (stderr, "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
__FILE__, __LINE__,
@@ -525,7 +525,7 @@ dri_gem_bo_subdata (dri_bo *bo, unsigned long offset,
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
- struct drm_gem_pwrite pwrite;
+ struct drm_i915_gem_pwrite pwrite;
int ret;
memset (&pwrite, 0, sizeof (pwrite));
@@ -533,7 +533,7 @@ dri_gem_bo_subdata (dri_bo *bo, unsigned long offset,
pwrite.offset = offset;
pwrite.size = size;
pwrite.data_ptr = (uint64_t) (uintptr_t) data;
- ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_GEM_PWRITE, &pwrite);
+ ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
if (ret != 0) {
fprintf (stderr, "%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
__FILE__, __LINE__,
@@ -549,7 +549,7 @@ dri_gem_bo_get_subdata (dri_bo *bo, unsigned long offset,
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
- struct drm_gem_pread pread;
+ struct drm_i915_gem_pread pread;
int ret;
memset (&pread, 0, sizeof (pread));
@@ -557,7 +557,7 @@ dri_gem_bo_get_subdata (dri_bo *bo, unsigned long offset,
pread.offset = offset;
pread.size = size;
pread.data_ptr = (uint64_t) (uintptr_t) data;
- ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_GEM_PREAD, &pread);
+ ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PREAD, &pread);
if (ret != 0) {
fprintf (stderr, "%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
__FILE__, __LINE__,
@@ -572,13 +572,13 @@ dri_gem_bo_wait_rendering(dri_bo *bo)
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
- struct drm_gem_set_domain set_domain;
+ struct drm_i915_gem_set_domain set_domain;
int ret;
set_domain.handle = bo_gem->gem_handle;
- set_domain.read_domains = DRM_GEM_DOMAIN_CPU;
+ set_domain.read_domains = I915_GEM_DOMAIN_CPU;
set_domain.write_domain = 0;
- ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_GEM_SET_DOMAIN, &set_domain);
+ ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
if (ret != 0) {
fprintf (stderr, "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
__FILE__, __LINE__,
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index 6a7f28d1..48ff4b87 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -779,21 +779,6 @@ struct drm_driver {
int (*gem_init_object) (struct drm_gem_object *obj);
void (*gem_free_object) (struct drm_gem_object *obj);
- /**
- * Driver-specific callback to set memory domains from userspace
- */
- int (*gem_set_domain) (struct drm_gem_object *obj,
- struct drm_file *file_priv,
- uint32_t read_domains,
- uint32_t write_domain);
-
- /**
- * Driver-specific callback to flush pwrite through chipset
- */
- int (*gem_flush_pwrite) (struct drm_gem_object *obj,
- uint64_t offset,
- uint64_t size);
-
struct drm_fence_driver *fence_driver;
struct drm_bo_driver *bo_driver;
@@ -1390,6 +1375,11 @@ static inline void drm_gem_object_unreference(struct drm_gem_object *obj)
kref_put (&obj->refcount, drm_gem_object_free);
}
+int
+drm_gem_handle_create(struct drm_file *file_priv,
+ struct drm_gem_object *obj,
+ int *handlep);
+
static inline void drm_gem_object_handle_reference (struct drm_gem_object *obj)
{
drm_gem_object_reference (obj);
@@ -1413,37 +1403,16 @@ static inline void drm_gem_object_handle_unreference (struct drm_gem_object *obj
struct drm_gem_object *
drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
int handle);
-int drm_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
int drm_gem_close_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int drm_gem_pread_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_gem_pwrite_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_gem_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int drm_gem_set_domain_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
-
-/*
- * Given the new read/write domains for an object,
- * compute the invalidate/flush domains for the whole device.
- *
- */
-int drm_gem_object_set_domain (struct drm_gem_object *object,
- uint32_t read_domains,
- uint32_t write_domains);
-
-
extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c
index edc1f057..980752c2 100644
--- a/linux-core/drm_drv.c
+++ b/linux-core/drm_drv.c
@@ -151,14 +151,9 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MM_INFO, drm_mm_info_ioctl, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_CREATE, drm_gem_create_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_PREAD, drm_gem_pread_ioctl, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_PWRITE, drm_gem_pwrite_ioctl, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_MMAP, drm_gem_mmap_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_SET_DOMAIN, drm_gem_set_domain_ioctl, DRM_AUTH),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
diff --git a/linux-core/drm_gem.c b/linux-core/drm_gem.c
index a8ecaf76..47b55434 100644
--- a/linux-core/drm_gem.c
+++ b/linux-core/drm_gem.c
@@ -99,15 +99,6 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
kref_init(&obj->refcount);
kref_init(&obj->handlecount);
obj->size = size;
-
- /*
- * We've just allocated pages from the kernel,
- * so they've just been written by the CPU with
- * zeros. They'll need to be clflushed before we
- * use them with the GPU.
- */
- obj->write_domain = DRM_GEM_DOMAIN_CPU;
- obj->read_domains = DRM_GEM_DOMAIN_CPU;
if (dev->driver->gem_init_object != NULL &&
dev->driver->gem_init_object(obj) != 0) {
fput(obj->filp);
@@ -163,7 +154,7 @@ drm_gem_handle_delete(struct drm_file *filp, int handle)
* to the object, which includes a regular reference count. Callers
* will likely want to dereference the object afterwards.
*/
-static int
+int
drm_gem_handle_create(struct drm_file *file_priv,
struct drm_gem_object *obj,
int *handlep)
@@ -191,6 +182,7 @@ again:
drm_gem_object_handle_reference(obj);
return 0;
}
+EXPORT_SYMBOL(drm_gem_handle_create);
/** Returns a reference to the object named by the handle. */
struct drm_gem_object *
@@ -217,40 +209,6 @@ drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
EXPORT_SYMBOL(drm_gem_object_lookup);
/**
- * Creates a new mm object and returns a handle to it.
- */
-int
-drm_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_gem_create *args = data;
- struct drm_gem_object *obj;
- int handle, ret;
-
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
- args->size = roundup(args->size, PAGE_SIZE);
-
- /* Allocate the new object */
- obj = drm_gem_object_alloc(dev, args->size);
- if (obj == NULL)
- return -ENOMEM;
-
- ret = drm_gem_handle_create(file_priv, obj, &handle);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_handle_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
-
- if (ret)
- return ret;
-
- args->handle = handle;
-
- return 0;
-}
-
-/**
* Releases the handle to an mm object.
*/
int
@@ -269,158 +227,6 @@ drm_gem_close_ioctl(struct drm_device *dev, void *data,
}
/**
- * Reads data from the object referenced by handle.
- *
- * On error, the contents of *data are undefined.
- */
-int
-drm_gem_pread_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_gem_pread *args = data;
- struct drm_gem_object *obj;
- ssize_t read;
- loff_t offset;
- int ret;
-
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -EINVAL;
-
- mutex_lock(&dev->struct_mutex);
- if (dev->driver->gem_set_domain) {
- ret = dev->driver->gem_set_domain(obj, file_priv,
- DRM_GEM_DOMAIN_CPU,
- 0);
- if (ret) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
- }
- offset = args->offset;
-
- read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
- args->size, &offset);
- if (read != args->size) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- if (read < 0)
- return read;
- else
- return -EINVAL;
- }
-
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-/**
- * Maps the contents of an object, returning the address it is mapped
- * into.
- *
- * While the mapping holds a reference on the contents of the object, it doesn't
- * imply a ref on the object itself.
- */
-int
-drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_gem_mmap *args = data;
- struct drm_gem_object *obj;
- loff_t offset;
- unsigned long addr;
-
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -EINVAL;
-
- offset = args->offset;
-
- down_write(&current->mm->mmap_sem);
- addr = do_mmap(obj->filp, 0, args->size,
- PROT_READ | PROT_WRITE, MAP_SHARED,
- args->offset);
- up_write(&current->mm->mmap_sem);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- if (IS_ERR((void *)addr))
- return addr;
-
- args->addr_ptr = (uint64_t) addr;
-
- return 0;
-}
-
-/**
- * Writes data to the object referenced by handle.
- *
- * On error, the contents of the buffer that were to be modified are undefined.
- */
-int
-drm_gem_pwrite_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_gem_pwrite *args = data;
- struct drm_gem_object *obj;
- ssize_t written;
- loff_t offset;
- int ret;
-
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -EINVAL;
-
- mutex_lock(&dev->struct_mutex);
- if (dev->driver->gem_set_domain) {
- ret = dev->driver->gem_set_domain(obj, file_priv,
- DRM_GEM_DOMAIN_CPU,
- DRM_GEM_DOMAIN_CPU);
- if (ret) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
- }
- offset = args->offset;
-
- written = vfs_write(obj->filp,
- (char __user *)(uintptr_t) args->data_ptr,
- args->size, &offset);
-
- if (written != args->size) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- if (written < 0)
- return written;
- else
- return -EINVAL;
- }
-
- if (dev->driver->gem_flush_pwrite)
- dev->driver->gem_flush_pwrite(obj,
- args->offset,
- args->size);
-
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-/**
* Create a global name for an object, returning the name.
*
* Note that the name does not hold a reference; when the object
@@ -512,39 +318,6 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
}
/**
- * Called when user space prepares to use an object
- */
-int
-drm_gem_set_domain_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_gem_set_domain *args = data;
- struct drm_gem_object *obj;
- int ret;
-
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -EINVAL;
-
- mutex_lock(&dev->struct_mutex);
- if (dev->driver->gem_set_domain) {
- ret = dev->driver->gem_set_domain(obj, file_priv,
- args->read_domains,
- args->write_domain);
- } else {
- obj->read_domains = args->read_domains;
- obj->write_domain = args->write_domain;
- ret = 0;
- }
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
-}
-
-/**
* Called at device open time, sets up the structure for handling refcounting
* of mm objects.
*/
diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c
index 3f246a08..8f51cd4d 100644
--- a/linux-core/i915_drv.c
+++ b/linux-core/i915_drv.c
@@ -588,8 +588,6 @@ static struct drm_driver driver = {
.ioctls = i915_ioctls,
.gem_init_object = i915_gem_init_object,
.gem_free_object = i915_gem_free_object,
- .gem_set_domain = i915_gem_set_domain,
- .gem_flush_pwrite = i915_gem_flush_pwrite,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
diff --git a/linux-core/i915_gem.c b/linux-core/i915_gem.c
index 3d47ec1a..e51de316 100644
--- a/linux-core/i915_gem.c
+++ b/linux-core/i915_gem.c
@@ -40,6 +40,11 @@ static int
i915_gem_object_set_domain(struct drm_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
+int
+i915_gem_set_domain(struct drm_gem_object *obj,
+ struct drm_file *file_priv,
+ uint32_t read_domains,
+ uint32_t write_domain);
int
i915_gem_init_ioctl(struct drm_device *dev, void *data,
@@ -65,6 +70,199 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
return 0;
}
+
+/**
+ * Creates a new mm object and returns a handle to it.
+ */
+int
+i915_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_create *args = data;
+ struct drm_gem_object *obj;
+ int handle, ret;
+
+ args->size = roundup(args->size, PAGE_SIZE);
+
+ /* Allocate the new object */
+ obj = drm_gem_object_alloc(dev, args->size);
+ if (obj == NULL)
+ return -ENOMEM;
+
+ ret = drm_gem_handle_create(file_priv, obj, &handle);
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_handle_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+
+ return 0;
+}
+
+/**
+ * Reads data from the object referenced by handle.
+ *
+ * On error, the contents of *data are undefined.
+ */
+int
+i915_gem_pread_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_pread *args = data;
+ struct drm_gem_object *obj;
+ ssize_t read;
+ loff_t offset;
+ int ret;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EINVAL;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_gem_set_domain(obj, file_priv,
+ I915_GEM_DOMAIN_CPU, 0);
+ if (ret) {
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+ offset = args->offset;
+
+ read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
+ args->size, &offset);
+ if (read != args->size) {
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ if (read < 0)
+ return read;
+ else
+ return -EINVAL;
+ }
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+/**
+ * Writes data to the object referenced by handle.
+ *
+ * On error, the contents of the buffer that were to be modified are undefined.
+ */
+int
+i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_pwrite *args = data;
+ struct drm_gem_object *obj;
+ ssize_t written;
+ loff_t offset;
+ int ret;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EINVAL;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_gem_set_domain(obj, file_priv,
+ I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+ if (ret) {
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+ offset = args->offset;
+
+ written = vfs_write(obj->filp,
+ (char __user *)(uintptr_t) args->data_ptr,
+ args->size, &offset);
+
+ if (written != args->size) {
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ if (written < 0)
+ return written;
+ else
+ return -EINVAL;
+ }
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+/**
+ * Called when user space prepares to use an object
+ */
+int
+i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_set_domain *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EINVAL;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_gem_set_domain(obj, file_priv,
+ args->read_domains, args->write_domain);
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+/**
+ * Maps the contents of an object, returning the address it is mapped
+ * into.
+ *
+ * While the mapping holds a reference on the contents of the object, it doesn't
+ * imply a ref on the object itself.
+ */
+int
+i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_mmap *args = data;
+ struct drm_gem_object *obj;
+ loff_t offset;
+ unsigned long addr;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EINVAL;
+
+ offset = args->offset;
+
+ down_write(&current->mm->mmap_sem);
+ addr = do_mmap(obj->filp, 0, args->size,
+ PROT_READ | PROT_WRITE, MAP_SHARED,
+ args->offset);
+ up_write(&current->mm->mmap_sem);
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ if (IS_ERR((void *)addr))
+ return addr;
+
+ args->addr_ptr = (uint64_t) addr;
+
+ return 0;
+}
+
static void
i915_gem_object_free_page_list(struct drm_gem_object *obj)
{
@@ -187,7 +385,7 @@ i915_retire_commands(struct drm_device *dev)
/* The sampler always gets flushed on i965 (sigh) */
if (IS_I965G(dev))
- flush_domains |= DRM_GEM_DOMAIN_I915_SAMPLER;
+ flush_domains |= I915_GEM_DOMAIN_SAMPLER;
BEGIN_LP_RING(2);
OUT_RING(cmd);
OUT_RING(0); /* noop */
@@ -377,51 +575,51 @@ i915_gem_flush(struct drm_device *dev,
invalidate_domains, flush_domains);
#endif
- if (flush_domains & DRM_GEM_DOMAIN_CPU)
+ if (flush_domains & I915_GEM_DOMAIN_CPU)
drm_agp_chipset_flush(dev);
- if ((invalidate_domains|flush_domains) & ~DRM_GEM_DOMAIN_CPU) {
+ if ((invalidate_domains|flush_domains) & ~I915_GEM_DOMAIN_CPU) {
/*
* read/write caches:
*
- * DRM_GEM_DOMAIN_I915_RENDER is always invalidated, but is
+ * I915_GEM_DOMAIN_RENDER is always invalidated, but is
* only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
* also flushed at 2d versus 3d pipeline switches.
*
* read-only caches:
*
- * DRM_GEM_DOMAIN_I915_SAMPLER is flushed on pre-965 if
+ * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
* MI_READ_FLUSH is set, and is always flushed on 965.
*
- * DRM_GEM_DOMAIN_I915_COMMAND may not exist?
+ * I915_GEM_DOMAIN_COMMAND may not exist?
*
- * DRM_GEM_DOMAIN_I915_INSTRUCTION, which exists on 965, is
+ * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
* invalidated when MI_EXE_FLUSH is set.
*
- * DRM_GEM_DOMAIN_I915_VERTEX, which exists on 965, is
+ * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
* invalidated with every MI_FLUSH.
*
* TLBs:
*
- * On 965, TLBs associated with DRM_GEM_DOMAIN_I915_COMMAND
- * and DRM_GEM_DOMAIN_CPU in are invalidated at PTE write and
- * DRM_GEM_DOMAIN_I915_RENDER and DRM_GEM_DOMAIN_I915_SAMPLER
+ * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
+ * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
+ * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
* are flushed at any MI_FLUSH.
*/
cmd = CMD_MI_FLUSH | MI_NO_WRITE_FLUSH;
if ((invalidate_domains|flush_domains) &
- DRM_GEM_DOMAIN_I915_RENDER)
+ I915_GEM_DOMAIN_RENDER)
cmd &= ~MI_NO_WRITE_FLUSH;
if (!IS_I965G(dev)) {
/*
* On the 965, the sampler cache always gets flushed
* and this bit is reserved.
*/
- if (invalidate_domains & DRM_GEM_DOMAIN_I915_SAMPLER)
+ if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
cmd |= MI_READ_FLUSH;
}
- if (invalidate_domains & DRM_GEM_DOMAIN_I915_INSTRUCTION)
+ if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
cmd |= MI_EXE_FLUSH;
#if WATCH_EXEC
@@ -448,7 +646,7 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
/* If there are writes queued to the buffer, flush and
* create a new seqno to wait for.
*/
- if (obj->write_domain & ~(DRM_GEM_DOMAIN_CPU)) {
+ if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU)) {
uint32_t write_domain = obj->write_domain;
#if WATCH_BUF
DRM_INFO("%s: flushing object %p from write domain %08x\n",
@@ -503,8 +701,8 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
* also ensure that all pending GPU writes are finished
* before we unbind.
*/
- ret = i915_gem_object_set_domain (obj, DRM_GEM_DOMAIN_CPU,
- DRM_GEM_DOMAIN_CPU);
+ ret = i915_gem_object_set_domain (obj, I915_GEM_DOMAIN_CPU,
+ I915_GEM_DOMAIN_CPU);
if (ret)
return ret;
@@ -805,8 +1003,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
* wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache
*/
- BUG_ON(obj->read_domains & ~DRM_GEM_DOMAIN_CPU);
- BUG_ON(obj->write_domain & ~DRM_GEM_DOMAIN_CPU);
+ BUG_ON(obj->read_domains & ~I915_GEM_DOMAIN_CPU);
+ BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU);
return 0;
}
@@ -973,7 +1171,7 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
* stale data. That is, any new read domains.
*/
invalidate_domains |= read_domains & ~obj->read_domains;
- if ((flush_domains | invalidate_domains) & DRM_GEM_DOMAIN_CPU) {
+ if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
#if WATCH_BUF
DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
__func__, flush_domains, invalidate_domains);
@@ -983,8 +1181,8 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
* then pause for rendering so that the GPU caches will be
* flushed before the cpu cache is invalidated
*/
- if ((invalidate_domains & DRM_GEM_DOMAIN_CPU) &&
- (flush_domains & ~DRM_GEM_DOMAIN_CPU)) {
+ if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
+ (flush_domains & ~I915_GEM_DOMAIN_CPU)) {
ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
@@ -1225,7 +1423,7 @@ i915_gem_object_bind_and_relocate(struct drm_gem_object *obj,
/* As we're writing through the gtt, flush
* any CPU writes before we write the relocations
*/
- if (obj->write_domain & DRM_GEM_DOMAIN_CPU) {
+ if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
i915_gem_clflush_object(obj);
drm_agp_chipset_flush(dev);
obj->write_domain = 0;
@@ -1466,7 +1664,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
/* Set the pending read domains for the batch buffer to COMMAND */
batch_obj = object_list[args->buffer_count-1];
- batch_obj->pending_read_domains = DRM_GEM_DOMAIN_I915_COMMAND;
+ batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
batch_obj->pending_write_domain = 0;
for (i = 0; i < args->buffer_count; i++) {
@@ -1700,6 +1898,15 @@ int i915_gem_init_object(struct drm_gem_object *obj)
if (obj_priv == NULL)
return -ENOMEM;
+ /*
+ * We've just allocated pages from the kernel,
+ * so they've just been written by the CPU with
+ * zeros. They'll need to be clflushed before we
+ * use them with the GPU.
+ */
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
+
obj->driver_private = obj_priv;
obj_priv->obj = obj;
INIT_LIST_HEAD(&obj_priv->list);
@@ -1735,32 +1942,6 @@ i915_gem_set_domain(struct drm_gem_object *obj,
return 0;
}
-int
-i915_gem_flush_pwrite(struct drm_gem_object *obj,
- uint64_t offset, uint64_t size)
-{
-#if 0
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
-
- /*
- * For writes much less than the size of the object and
- * which are already pinned in memory, do the flush right now
- */
-
- if ((size < obj->size >> 1) && obj_priv->page_list != NULL) {
- unsigned long first_page = offset / PAGE_SIZE;
- unsigned long beyond_page = roundup(offset + size, PAGE_SIZE) / PAGE_SIZE;
-
- drm_ttm_cache_flush(obj_priv->page_list + first_page,
- beyond_page - first_page);
- drm_agp_chipset_flush(dev);
- obj->write_domain = 0;
- }
-#endif
- return 0;
-}
-
void
i915_gem_lastclose(struct drm_device *dev)
{
diff --git a/shared-core/drm.h b/shared-core/drm.h
index 2373a22e..6012ef21 100644
--- a/shared-core/drm.h
+++ b/shared-core/drm.h
@@ -959,69 +959,12 @@ struct drm_mm_info_arg {
uint64_t p_size;
};
-
-struct drm_gem_create {
- /**
- * Requested size for the object.
- *
- * The (page-aligned) allocated size for the object will be returned.
- */
- uint64_t size;
- /**
- * Returned handle for the object.
- *
- * Object handles are nonzero.
- */
- uint32_t handle;
- uint32_t pad;
-};
-
struct drm_gem_close {
/** Handle of the object to be closed. */
uint32_t handle;
uint32_t pad;
};
-struct drm_gem_pread {
- /** Handle for the object being read. */
- uint32_t handle;
- uint32_t pad;
- /** Offset into the object to read from */
- uint64_t offset;
- /** Length of data to read */
- uint64_t size;
- /** Pointer to write the data into. */
- uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */
-};
-
-struct drm_gem_pwrite {
- /** Handle for the object being written to. */
- uint32_t handle;
- uint32_t pad;
- /** Offset into the object to write to */
- uint64_t offset;
- /** Length of data to write */
- uint64_t size;
- /** Pointer to read the data from. */
- uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */
-};
-
-struct drm_gem_mmap {
- /** Handle for the object being mapped. */
- uint32_t handle;
- uint32_t pad;
- /** Offset in the object to map. */
- uint64_t offset;
- /**
- * Length of data to map.
- *
- * The value will be page-aligned.
- */
- uint64_t size;
- /** Returned pointer the data was mapped at */
- uint64_t addr_ptr; /* void *, but pointers are not 32/64 compatible */
-};
-
struct drm_gem_flink {
/** Handle for the object being named */
uint32_t handle;
@@ -1041,19 +984,6 @@ struct drm_gem_open {
uint64_t size;
};
-struct drm_gem_set_domain {
- /** Handle for the object */
- uint32_t handle;
-
- /** New read domains */
- uint32_t read_domains;
-
- /** New write domain */
- uint32_t write_domain;
-};
-
-#define DRM_GEM_DOMAIN_CPU 0x00000001
-
/**
* \name Ioctls Definitions
*/
@@ -1075,6 +1005,10 @@ struct drm_gem_set_domain {
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
+#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
+#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
+#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
+
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block)
@@ -1122,15 +1056,6 @@ struct drm_gem_set_domain {
#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
-#define DRM_IOCTL_GEM_CREATE DRM_IOWR(0x09, struct drm_gem_create)
-#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x0a, struct drm_gem_close)
-#define DRM_IOCTL_GEM_PREAD DRM_IOW (0x0b, struct drm_gem_pread)
-#define DRM_IOCTL_GEM_PWRITE DRM_IOW (0x0c, struct drm_gem_pwrite)
-#define DRM_IOCTL_GEM_MMAP DRM_IOWR(0x0d, struct drm_gem_mmap)
-#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0e, struct drm_gem_flink)
-#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0f, struct drm_gem_open)
-#define DRM_IOCTL_GEM_SET_DOMAIN DRM_IOW (0xb7, struct drm_gem_set_domain)
-
#define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, struct drm_mm_init_arg)
#define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, struct drm_mm_type_arg)
#define DRM_IOCTL_MM_LOCK DRM_IOWR(0xc2, struct drm_mm_type_arg)
diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c
index 4243b4e9..7e4de18b 100644
--- a/shared-core/i915_dma.c
+++ b/shared-core/i915_dma.c
@@ -1190,6 +1190,11 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/shared-core/i915_drm.h b/shared-core/i915_drm.h
index ce01640a..fe47708d 100644
--- a/shared-core/i915_drm.h
+++ b/shared-core/i915_drm.h
@@ -183,7 +183,12 @@ typedef struct drm_i915_sarea {
#define DRM_I915_GEM_BUSY 0x17
#define DRM_I915_GEM_THROTTLE 0x18
#define DRM_I915_GEM_ENTERVT 0x19
-#define DRM_I915_GEM_LEAVEVT 0x20
+#define DRM_I915_GEM_LEAVEVT 0x1a
+#define DRM_I915_GEM_CREATE 0x1b
+#define DRM_I915_GEM_PREAD 0x1c
+#define DRM_I915_GEM_PWRITE 0x1d
+#define DRM_I915_GEM_MMAP 0x1e
+#define DRM_I915_GEM_SET_DOMAIN 0x1f
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -211,6 +216,11 @@ typedef struct drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
+#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
+#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
+#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
+#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
+#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
/* Asynchronous page flipping:
*/
@@ -424,6 +434,73 @@ struct drm_i915_gem_init {
uint64_t gtt_end;
};
+struct drm_i915_gem_create {
+ /**
+ * Requested size for the object.
+ *
+ * The (page-aligned) allocated size for the object will be returned.
+ */
+ uint64_t size;
+ /**
+ * Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ uint32_t handle;
+ uint32_t pad;
+};
+
+struct drm_i915_gem_pread {
+ /** Handle for the object being read. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset into the object to read from */
+ uint64_t offset;
+ /** Length of data to read */
+ uint64_t size;
+ /** Pointer to write the data into. */
+ uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */
+};
+
+struct drm_i915_gem_pwrite {
+ /** Handle for the object being written to. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset into the object to write to */
+ uint64_t offset;
+ /** Length of data to write */
+ uint64_t size;
+ /** Pointer to read the data from. */
+ uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */
+};
+
+struct drm_i915_gem_mmap {
+ /** Handle for the object being mapped. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset in the object to map. */
+ uint64_t offset;
+ /**
+ * Length of data to map.
+ *
+ * The value will be page-aligned.
+ */
+ uint64_t size;
+ /** Returned pointer the data was mapped at */
+ uint64_t addr_ptr; /* void *, but pointers are not 32/64 compatible */
+};
+
+struct drm_i915_gem_set_domain {
+ /** Handle for the object */
+ uint32_t handle;
+
+ /** New read domains */
+ uint32_t read_domains;
+
+ /** New write domain */
+ uint32_t write_domain;
+};
+
struct drm_i915_gem_relocation_entry {
/**
* Handle of the buffer being pointed to by this relocation entry.
@@ -469,20 +546,26 @@ struct drm_i915_gem_relocation_entry {
uint32_t write_domain;
};
-/**
+/** @{
* Intel memory domains
*
* Most of these just align with the various caches in
* the system and are used to flush and invalidate as
* objects end up cached in different domains.
*/
-
-/* 0x00000001 is DRM_GEM_DOMAIN_CPU */
-#define DRM_GEM_DOMAIN_I915_RENDER 0x00000002 /* Render cache, used by 2D and 3D drawing */
-#define DRM_GEM_DOMAIN_I915_SAMPLER 0x00000004 /* Sampler cache, used by texture engine */
-#define DRM_GEM_DOMAIN_I915_COMMAND 0x00000008 /* Command queue, used to load batch buffers */
-#define DRM_GEM_DOMAIN_I915_INSTRUCTION 0x00000010 /* Instruction cache, used by shader programs */
-#define DRM_GEM_DOMAIN_I915_VERTEX 0x00000020 /* Vertex address cache */
+/** CPU cache */
+#define I915_GEM_DOMAIN_CPU 0x00000001
+/** Render cache, used by 2D and 3D drawing */
+#define I915_GEM_DOMAIN_RENDER 0x00000002
+/** Sampler cache, used by texture engine */
+#define I915_GEM_DOMAIN_SAMPLER 0x00000004
+/** Command queue, used to load batch buffers */
+#define I915_GEM_DOMAIN_COMMAND 0x00000008
+/** Instruction cache, used by shader programs */
+#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
+/** Vertex address cache */
+#define I915_GEM_DOMAIN_VERTEX 0x00000020
+/** @} */
struct drm_i915_gem_exec_object {
/**
diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h
index 3a22ae3c..9c9925b5 100644
--- a/shared-core/i915_drv.h
+++ b/shared-core/i915_drv.h
@@ -37,7 +37,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20080312"
+#define DRIVER_DATE "20080611"
#if defined(__linux__)
#define I915_HAVE_FENCE
@@ -61,7 +61,7 @@
*/
#define DRIVER_MAJOR 1
#if defined(I915_HAVE_FENCE) && defined(I915_HAVE_BUFFER)
-#define DRIVER_MINOR 13
+#define DRIVER_MINOR 14
#else
#define DRIVER_MINOR 6
#endif
@@ -461,6 +461,16 @@ int i915_execbuffer(struct drm_device *dev, void *data,
/* i915_gem.c */
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int i915_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
int i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
@@ -479,12 +489,6 @@ int i915_gem_init_object(struct drm_gem_object *obj);
void i915_gem_free_object(struct drm_gem_object *obj);
int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
void i915_gem_object_unpin(struct drm_gem_object *obj);
-int i915_gem_set_domain(struct drm_gem_object *obj,
- struct drm_file *file_priv,
- uint32_t read_domains,
- uint32_t write_domain);
-int i915_gem_flush_pwrite(struct drm_gem_object *obj,
- uint64_t offset, uint64_t size);
void i915_gem_lastclose(struct drm_device *dev);
void i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_timeout(unsigned long data);
diff --git a/tests/gem_basic.c b/tests/gem_basic.c
index 8b8b63d0..b2176fba 100644
--- a/tests/gem_basic.c
+++ b/tests/gem_basic.c
@@ -34,6 +34,7 @@
#include <errno.h>
#include <sys/stat.h>
#include "drm.h"
+#include "i915_drm.h"
static void
test_bad_close(int fd)
@@ -52,7 +53,7 @@ test_bad_close(int fd)
static void
test_create_close(int fd)
{
- struct drm_gem_create create;
+ struct drm_i915_gem_create create;
struct drm_gem_close close;
int ret;
@@ -60,7 +61,7 @@ test_create_close(int fd)
memset(&create, 0, sizeof(create));
create.size = 16 * 1024;
- ret = ioctl(fd, DRM_IOCTL_GEM_CREATE, &create);
+ ret = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
assert(ret == 0);
close.handle = create.handle;
@@ -70,14 +71,14 @@ test_create_close(int fd)
static void
test_create_fd_close(int fd)
{
- struct drm_gem_create create;
+ struct drm_i915_gem_create create;
int ret;
printf("Testing closing with an object allocated.\n");
memset(&create, 0, sizeof(create));
create.size = 16 * 1024;
- ret = ioctl(fd, DRM_IOCTL_GEM_CREATE, &create);
+ ret = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
assert(ret == 0);
close(fd);
diff --git a/tests/gem_mmap.c b/tests/gem_mmap.c
index 3f8e27a0..c3a51883 100644
--- a/tests/gem_mmap.c
+++ b/tests/gem_mmap.c
@@ -34,12 +34,13 @@
#include <errno.h>
#include <sys/stat.h>
#include "drm.h"
+#include "i915_drm.h"
#define OBJECT_SIZE 16384
int do_read(int fd, int handle, void *buf, int offset, int size)
{
- struct drm_gem_pread read;
+ struct drm_i915_gem_pread read;
/* Ensure that we don't have any convenient data in buf in case
* we fail.
@@ -52,12 +53,12 @@ int do_read(int fd, int handle, void *buf, int offset, int size)
read.size = size;
read.offset = offset;
- return ioctl(fd, DRM_IOCTL_GEM_PREAD, &read);
+ return ioctl(fd, DRM_IOCTL_I915_GEM_PREAD, &read);
}
int do_write(int fd, int handle, void *buf, int offset, int size)
{
- struct drm_gem_pwrite write;
+ struct drm_i915_gem_pwrite write;
memset(&write, 0, sizeof(write));
write.handle = handle;
@@ -65,14 +66,14 @@ int do_write(int fd, int handle, void *buf, int offset, int size)
write.size = size;
write.offset = offset;
- return ioctl(fd, DRM_IOCTL_GEM_PWRITE, &write);
+ return ioctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &write);
}
int main(int argc, char **argv)
{
int fd;
- struct drm_gem_create create;
- struct drm_gem_mmap mmap;
+ struct drm_i915_gem_create create;
+ struct drm_i915_gem_mmap mmap;
struct drm_gem_close unref;
uint8_t expected[OBJECT_SIZE];
uint8_t buf[OBJECT_SIZE];
@@ -87,12 +88,12 @@ int main(int argc, char **argv)
mmap.offset = 0;
mmap.size = 4096;
printf("Testing mmaping of bad object.\n");
- ret = ioctl(fd, DRM_IOCTL_GEM_MMAP, &mmap);
+ ret = ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap);
assert(ret == -1 && errno == EINVAL);
memset(&create, 0, sizeof(create));
create.size = OBJECT_SIZE;
- ret = ioctl(fd, DRM_IOCTL_GEM_CREATE, &create);
+ ret = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
assert(ret == 0);
handle = create.handle;
@@ -100,7 +101,7 @@ int main(int argc, char **argv)
mmap.handle = handle;
mmap.offset = 0;
mmap.size = OBJECT_SIZE;
- ret = ioctl(fd, DRM_IOCTL_GEM_MMAP, &mmap);
+ ret = ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap);
assert(ret == 0);
addr = (uint8_t *)(uintptr_t)mmap.addr_ptr;
diff --git a/tests/gem_readwrite.c b/tests/gem_readwrite.c
index a48f9847..54b25ea3 100644
--- a/tests/gem_readwrite.c
+++ b/tests/gem_readwrite.c
@@ -34,12 +34,13 @@
#include <errno.h>
#include <sys/stat.h>
#include "drm.h"
+#include "i915_drm.h"
#define OBJECT_SIZE 16384
int do_read(int fd, int handle, void *buf, int offset, int size)
{
- struct drm_gem_pread read;
+ struct drm_i915_gem_pread read;
/* Ensure that we don't have any convenient data in buf in case
* we fail.
@@ -52,12 +53,12 @@ int do_read(int fd, int handle, void *buf, int offset, int size)
read.size = size;
read.offset = offset;
- return ioctl(fd, DRM_IOCTL_GEM_PREAD, &read);
+ return ioctl(fd, DRM_IOCTL_I915_GEM_PREAD, &read);
}
int do_write(int fd, int handle, void *buf, int offset, int size)
{
- struct drm_gem_pwrite write;
+ struct drm_i915_gem_pwrite write;
memset(&write, 0, sizeof(write));
write.handle = handle;
@@ -65,13 +66,13 @@ int do_write(int fd, int handle, void *buf, int offset, int size)
write.size = size;
write.offset = offset;
- return ioctl(fd, DRM_IOCTL_GEM_PWRITE, &write);
+ return ioctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &write);
}
int main(int argc, char **argv)
{
int fd;
- struct drm_gem_create create;
+ struct drm_i915_gem_create create;
uint8_t expected[OBJECT_SIZE];
uint8_t buf[OBJECT_SIZE];
int ret;
@@ -81,7 +82,7 @@ int main(int argc, char **argv)
memset(&create, 0, sizeof(create));
create.size = OBJECT_SIZE;
- ret = ioctl(fd, DRM_IOCTL_GEM_CREATE, &create);
+ ret = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
assert(ret == 0);
handle = create.handle;