summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--configure.ac2
-rw-r--r--libdrm/intel/intel_bufmgr_fake.c14
-rw-r--r--libdrm/intel/intel_bufmgr_gem.c37
-rw-r--r--libdrm/intel/mm.c12
-rw-r--r--libdrm/intel/mm.h22
-rw-r--r--libdrm/xf86drm.c172
-rw-r--r--linux-core/Makefile7
-rw-r--r--linux-core/drmP.h51
-rw-r--r--linux-core/drm_drv.c9
-rw-r--r--linux-core/drm_gem.c386
-rw-r--r--linux-core/drm_irq.c20
-rw-r--r--linux-core/drm_lock.c59
-rw-r--r--linux-core/drm_proc.c7
-rw-r--r--linux-core/i915_drv.c31
-rw-r--r--linux-core/i915_gem.c607
-rw-r--r--shared-core/drm.h91
-rw-r--r--shared-core/i915_dma.c12
-rw-r--r--shared-core/i915_drm.h121
-rw-r--r--shared-core/i915_drv.h39
-rw-r--r--shared-core/i915_init.c39
-rw-r--r--shared-core/i915_irq.c70
-rw-r--r--tests/gem_basic.c9
-rw-r--r--tests/gem_mmap.c19
-rw-r--r--tests/gem_readwrite.c13
24 files changed, 873 insertions, 976 deletions
diff --git a/configure.ac b/configure.ac
index a8855684..1cf877d5 100644
--- a/configure.ac
+++ b/configure.ac
@@ -19,7 +19,7 @@
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
AC_PREREQ(2.57)
-AC_INIT([libdrm], 2.3.1, [dri-devel@lists.sourceforge.net], libdrm)
+AC_INIT([libdrm], 2.4.0, [dri-devel@lists.sourceforge.net], libdrm)
AC_CONFIG_SRCDIR([Makefile.am])
AM_INIT_AUTOMAKE([dist-bzip2])
diff --git a/libdrm/intel/intel_bufmgr_fake.c b/libdrm/intel/intel_bufmgr_fake.c
index 3f5a22d3..e988eb58 100644
--- a/libdrm/intel/intel_bufmgr_fake.c
+++ b/libdrm/intel/intel_bufmgr_fake.c
@@ -252,7 +252,7 @@ alloc_block(dri_bo *bo)
sz = (bo->size + bo_fake->alignment - 1) & ~(bo_fake->alignment - 1);
- block->mem = drmmmAllocMem(bufmgr_fake->heap, sz, align_log2, 0);
+ block->mem = mmAllocMem(bufmgr_fake->heap, sz, align_log2, 0);
if (!block->mem) {
free(block);
return 0;
@@ -300,7 +300,7 @@ static void free_block(dri_bufmgr_fake *bufmgr_fake, struct block *block)
DBG(" - free immediately\n");
DRMLISTDEL(block);
- drmmmFreeMem(block->mem);
+ mmFreeMem(block->mem);
free(block);
}
}
@@ -415,7 +415,7 @@ static int clear_fenced(dri_bufmgr_fake *bufmgr_fake,
DBG("delayed free: offset %x sz %x\n",
block->mem->ofs, block->mem->size);
DRMLISTDEL(block);
- drmmmFreeMem(block->mem);
+ mmFreeMem(block->mem);
free(block);
}
else {
@@ -923,7 +923,7 @@ dri_fake_destroy(dri_bufmgr *bufmgr)
{
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
- drmmmDestroy(bufmgr_fake->heap);
+ mmDestroy(bufmgr_fake->heap);
free(bufmgr);
}
@@ -1062,7 +1062,7 @@ dri_fake_process_relocs(dri_bo *batch_buf)
dri_fake_calculate_domains(batch_buf);
- batch_fake->read_domains = DRM_GEM_DOMAIN_I915_COMMAND;
+ batch_fake->read_domains = I915_GEM_DOMAIN_COMMAND;
/* we've ran out of RAM so blow the whole lot away and retry */
restart:
@@ -1074,7 +1074,7 @@ dri_fake_process_relocs(dri_bo *batch_buf)
bufmgr_fake->fail = 0;
goto restart;
} else /* dump out the memory here */
- drmmmDumpMemInfo(bufmgr_fake->heap);
+ mmDumpMemInfo(bufmgr_fake->heap);
}
assert(ret == 0);
@@ -1193,7 +1193,7 @@ intel_bufmgr_fake_init(unsigned long low_offset, void *low_virtual,
bufmgr_fake->low_offset = low_offset;
bufmgr_fake->virtual = low_virtual;
bufmgr_fake->size = size;
- bufmgr_fake->heap = drmmmInit(low_offset, size);
+ bufmgr_fake->heap = mmInit(low_offset, size);
/* Hook in methods */
bufmgr_fake->bufmgr.bo_alloc = dri_fake_bo_alloc;
diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c
index a65ae982..5a28bd14 100644
--- a/libdrm/intel/intel_bufmgr_gem.c
+++ b/libdrm/intel/intel_bufmgr_gem.c
@@ -299,7 +299,7 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name,
}
if (!alloc_from_cache) {
- struct drm_gem_create create;
+ struct drm_i915_gem_create create;
bo_gem = calloc(1, sizeof(*bo_gem));
if (!bo_gem)
@@ -309,7 +309,7 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name,
memset(&create, 0, sizeof(create));
create.size = bo_size;
- ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CREATE, &create);
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
bo_gem->gem_handle = create.handle;
if (ret != 0) {
free(bo_gem);
@@ -455,7 +455,7 @@ dri_gem_bo_map(dri_bo *bo, int write_enable)
{
dri_bufmgr_gem *bufmgr_gem;
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
- struct drm_gem_set_domain set_domain;
+ struct drm_i915_gem_set_domain set_domain;
int ret;
bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
@@ -470,13 +470,13 @@ dri_gem_bo_map(dri_bo *bo, int write_enable)
DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
if (bo_gem->virtual == NULL) {
- struct drm_gem_mmap mmap_arg;
+ struct drm_i915_gem_mmap mmap_arg;
memset(&mmap_arg, 0, sizeof(mmap_arg));
mmap_arg.handle = bo_gem->gem_handle;
mmap_arg.offset = 0;
mmap_arg.size = bo->size;
- ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_MMAP, &mmap_arg);
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
if (ret != 0) {
fprintf(stderr, "%s:%d: Error mapping buffer %d (%s): %s .\n",
__FILE__, __LINE__,
@@ -491,9 +491,12 @@ dri_gem_bo_map(dri_bo *bo, int write_enable)
if (!bo_gem->cpu_domain_set) {
set_domain.handle = bo_gem->gem_handle;
- set_domain.read_domains = DRM_GEM_DOMAIN_CPU;
- set_domain.write_domain = write_enable ? DRM_GEM_DOMAIN_CPU : 0;
- ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_GEM_SET_DOMAIN, &set_domain);
+ set_domain.read_domains = I915_GEM_DOMAIN_CPU;
+ set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_CPU : 0;
+ do {
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
+ &set_domain);
+ } while (ret == -1 && errno == EINTR);
if (ret != 0) {
fprintf (stderr, "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
__FILE__, __LINE__,
@@ -525,7 +528,7 @@ dri_gem_bo_subdata (dri_bo *bo, unsigned long offset,
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
- struct drm_gem_pwrite pwrite;
+ struct drm_i915_gem_pwrite pwrite;
int ret;
memset (&pwrite, 0, sizeof (pwrite));
@@ -533,7 +536,9 @@ dri_gem_bo_subdata (dri_bo *bo, unsigned long offset,
pwrite.offset = offset;
pwrite.size = size;
pwrite.data_ptr = (uint64_t) (uintptr_t) data;
- ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_GEM_PWRITE, &pwrite);
+ do {
+ ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
+ } while (ret == -1 && errno == EINTR);
if (ret != 0) {
fprintf (stderr, "%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
__FILE__, __LINE__,
@@ -549,7 +554,7 @@ dri_gem_bo_get_subdata (dri_bo *bo, unsigned long offset,
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
- struct drm_gem_pread pread;
+ struct drm_i915_gem_pread pread;
int ret;
memset (&pread, 0, sizeof (pread));
@@ -557,7 +562,9 @@ dri_gem_bo_get_subdata (dri_bo *bo, unsigned long offset,
pread.offset = offset;
pread.size = size;
pread.data_ptr = (uint64_t) (uintptr_t) data;
- ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_GEM_PREAD, &pread);
+ do {
+ ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PREAD, &pread);
+ } while (ret == -1 && errno == EINTR);
if (ret != 0) {
fprintf (stderr, "%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
__FILE__, __LINE__,
@@ -572,13 +579,13 @@ dri_gem_bo_wait_rendering(dri_bo *bo)
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
- struct drm_gem_set_domain set_domain;
+ struct drm_i915_gem_set_domain set_domain;
int ret;
set_domain.handle = bo_gem->gem_handle;
- set_domain.read_domains = DRM_GEM_DOMAIN_CPU;
+ set_domain.read_domains = I915_GEM_DOMAIN_CPU;
set_domain.write_domain = 0;
- ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_GEM_SET_DOMAIN, &set_domain);
+ ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
if (ret != 0) {
fprintf (stderr, "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
__FILE__, __LINE__,
diff --git a/libdrm/intel/mm.c b/libdrm/intel/mm.c
index 2605d8ec..98146405 100644
--- a/libdrm/intel/mm.c
+++ b/libdrm/intel/mm.c
@@ -29,7 +29,7 @@
#include "mm.h"
void
-drmmmDumpMemInfo(const struct mem_block *heap)
+mmDumpMemInfo(const struct mem_block *heap)
{
drmMsg("Memory heap %p:\n", (void *)heap);
if (heap == 0) {
@@ -56,7 +56,7 @@ drmmmDumpMemInfo(const struct mem_block *heap)
}
struct mem_block *
-drmmmInit(int ofs, int size)
+mmInit(int ofs, int size)
{
struct mem_block *heap, *block;
@@ -163,7 +163,7 @@ SliceBlock(struct mem_block *p,
struct mem_block *
-drmmmAllocMem(struct mem_block *heap, int size, int align2, int startSearch)
+mmAllocMem(struct mem_block *heap, int size, int align2, int startSearch)
{
struct mem_block *p;
const int mask = (1 << align2)-1;
@@ -196,7 +196,7 @@ drmmmAllocMem(struct mem_block *heap, int size, int align2, int startSearch)
struct mem_block *
-drmmmFindBlock(struct mem_block *heap, int start)
+mmFindBlock(struct mem_block *heap, int start)
{
struct mem_block *p;
@@ -235,7 +235,7 @@ Join2Blocks(struct mem_block *p)
}
int
-drmmmFreeMem(struct mem_block *b)
+mmFreeMem(struct mem_block *b)
{
if (!b)
return 0;
@@ -264,7 +264,7 @@ drmmmFreeMem(struct mem_block *b)
void
-drmmmDestroy(struct mem_block *heap)
+mmDestroy(struct mem_block *heap)
{
struct mem_block *p;
diff --git a/libdrm/intel/mm.h b/libdrm/intel/mm.h
index 965bb0cd..49e3eecc 100644
--- a/libdrm/intel/mm.h
+++ b/libdrm/intel/mm.h
@@ -40,13 +40,21 @@ struct mem_block {
unsigned int reserved:1;
};
-
+/* Rename the variables in the drm copy of this code so that it doesn't
+ * conflict with mesa or whoever else has copied it around.
+ */
+#define mmInit drm_mmInit
+#define mmAllocMem drm_mmAllocMem
+#define mmFreeMem drm_mmFreeMem
+#define mmFindBlock drm_mmFindBlock
+#define mmDestroy drm_mmDestroy
+#define mmDumpMemInfo drm_mmDumpMemInfo
/**
* input: total size in bytes
* return: a heap pointer if OK, NULL if error
*/
-extern struct mem_block *drmmmInit(int ofs, int size);
+extern struct mem_block *mmInit(int ofs, int size);
/**
* Allocate 'size' bytes with 2^align2 bytes alignment,
@@ -58,7 +66,7 @@ extern struct mem_block *drmmmInit(int ofs, int size);
* startSearch = linear offset from start of heap to begin search
* return: pointer to the allocated block, 0 if error
*/
-extern struct mem_block *drmmmAllocMem(struct mem_block *heap, int size,
+extern struct mem_block *mmAllocMem(struct mem_block *heap, int size,
int align2, int startSearch);
/**
@@ -66,23 +74,23 @@ extern struct mem_block *drmmmAllocMem(struct mem_block *heap, int size,
* input: pointer to a block
* return: 0 if OK, -1 if error
*/
-extern int drmmmFreeMem(struct mem_block *b);
+extern int mmFreeMem(struct mem_block *b);
/**
* Free block starts at offset
* input: pointer to a heap, start offset
* return: pointer to a block
*/
-extern struct mem_block *drmmmFindBlock(struct mem_block *heap, int start);
+extern struct mem_block *mmFindBlock(struct mem_block *heap, int start);
/**
* destroy MM
*/
-extern void drmmmDestroy(struct mem_block *mmInit);
+extern void mmDestroy(struct mem_block *mmInit);
/**
* For debuging purpose.
*/
-extern void drmmmDumpMemInfo(const struct mem_block *mmInit);
+extern void mmDumpMemInfo(const struct mem_block *mmInit);
#endif
diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c
index e44706ed..4b4d009b 100644
--- a/libdrm/xf86drm.c
+++ b/libdrm/xf86drm.c
@@ -174,6 +174,19 @@ static char *drmStrdup(const char *s)
return retval;
}
+/**
+ * Call ioctl, restarting if it is interupted
+ */
+static int
+drmIoctl(int fd, int request, void *arg)
+{
+ int ret;
+
+ do {
+ ret = ioctl(fd, request, arg);
+ } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
+ return ret;
+}
static unsigned long drmGetKeyFromFd(int fd)
{
@@ -675,7 +688,7 @@ drmVersionPtr drmGetVersion(int fd)
version->desc_len = 0;
version->desc = NULL;
- if (ioctl(fd, DRM_IOCTL_VERSION, version)) {
+ if (drmIoctl(fd, DRM_IOCTL_VERSION, version)) {
drmFreeKernelVersion(version);
return NULL;
}
@@ -687,7 +700,7 @@ drmVersionPtr drmGetVersion(int fd)
if (version->desc_len)
version->desc = drmMalloc(version->desc_len + 1);
- if (ioctl(fd, DRM_IOCTL_VERSION, version)) {
+ if (drmIoctl(fd, DRM_IOCTL_VERSION, version)) {
drmMsg("DRM_IOCTL_VERSION: %s\n", strerror(errno));
drmFreeKernelVersion(version);
return NULL;
@@ -773,10 +786,10 @@ char *drmGetBusid(int fd)
u.unique_len = 0;
u.unique = NULL;
- if (ioctl(fd, DRM_IOCTL_GET_UNIQUE, &u))
+ if (drmIoctl(fd, DRM_IOCTL_GET_UNIQUE, &u))
return NULL;
u.unique = drmMalloc(u.unique_len + 1);
- if (ioctl(fd, DRM_IOCTL_GET_UNIQUE, &u))
+ if (drmIoctl(fd, DRM_IOCTL_GET_UNIQUE, &u))
return NULL;
u.unique[u.unique_len] = '\0';
@@ -803,7 +816,7 @@ int drmSetBusid(int fd, const char *busid)
u.unique = (char *)busid;
u.unique_len = strlen(busid);
- if (ioctl(fd, DRM_IOCTL_SET_UNIQUE, &u)) {
+ if (drmIoctl(fd, DRM_IOCTL_SET_UNIQUE, &u)) {
return -errno;
}
return 0;
@@ -814,7 +827,7 @@ int drmGetMagic(int fd, drm_magic_t * magic)
drm_auth_t auth;
*magic = 0;
- if (ioctl(fd, DRM_IOCTL_GET_MAGIC, &auth))
+ if (drmIoctl(fd, DRM_IOCTL_GET_MAGIC, &auth))
return -errno;
*magic = auth.magic;
return 0;
@@ -825,7 +838,7 @@ int drmAuthMagic(int fd, drm_magic_t magic)
drm_auth_t auth;
auth.magic = magic;
- if (ioctl(fd, DRM_IOCTL_AUTH_MAGIC, &auth))
+ if (drmIoctl(fd, DRM_IOCTL_AUTH_MAGIC, &auth))
return -errno;
return 0;
}
@@ -890,7 +903,7 @@ int drmAddMap(int fd, drm_handle_t offset, drmSize size, drmMapType type,
map.handle = 0;
map.type = type;
map.flags = flags;
- if (ioctl(fd, DRM_IOCTL_ADD_MAP, &map))
+ if (drmIoctl(fd, DRM_IOCTL_ADD_MAP, &map))
return -errno;
if (handle)
*handle = (drm_handle_t)map.handle;
@@ -903,7 +916,7 @@ int drmRmMap(int fd, drm_handle_t handle)
map.handle = (void *)handle;
- if(ioctl(fd, DRM_IOCTL_RM_MAP, &map))
+ if(drmIoctl(fd, DRM_IOCTL_RM_MAP, &map))
return -errno;
return 0;
}
@@ -936,7 +949,7 @@ int drmAddBufs(int fd, int count, int size, drmBufDescFlags flags,
request.flags = flags;
request.agp_start = agp_offset;
- if (ioctl(fd, DRM_IOCTL_ADD_BUFS, &request))
+ if (drmIoctl(fd, DRM_IOCTL_ADD_BUFS, &request))
return -errno;
return request.count;
}
@@ -949,7 +962,7 @@ int drmMarkBufs(int fd, double low, double high)
info.count = 0;
info.list = NULL;
- if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info))
+ if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info))
return -EINVAL;
if (!info.count)
@@ -958,7 +971,7 @@ int drmMarkBufs(int fd, double low, double high)
if (!(info.list = drmMalloc(info.count * sizeof(*info.list))))
return -ENOMEM;
- if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info)) {
+ if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info)) {
int retval = -errno;
drmFree(info.list);
return retval;
@@ -967,7 +980,7 @@ int drmMarkBufs(int fd, double low, double high)
for (i = 0; i < info.count; i++) {
info.list[i].low_mark = low * info.list[i].count;
info.list[i].high_mark = high * info.list[i].count;
- if (ioctl(fd, DRM_IOCTL_MARK_BUFS, &info.list[i])) {
+ if (drmIoctl(fd, DRM_IOCTL_MARK_BUFS, &info.list[i])) {
int retval = -errno;
drmFree(info.list);
return retval;
@@ -999,7 +1012,7 @@ int drmFreeBufs(int fd, int count, int *list)
request.count = count;
request.list = list;
- if (ioctl(fd, DRM_IOCTL_FREE_BUFS, &request))
+ if (drmIoctl(fd, DRM_IOCTL_FREE_BUFS, &request))
return -errno;
return 0;
}
@@ -1088,14 +1101,14 @@ drmBufInfoPtr drmGetBufInfo(int fd)
info.count = 0;
info.list = NULL;
- if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info))
+ if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info))
return NULL;
if (info.count) {
if (!(info.list = drmMalloc(info.count * sizeof(*info.list))))
return NULL;
- if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info)) {
+ if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info)) {
drmFree(info.list);
return NULL;
}
@@ -1139,7 +1152,7 @@ drmBufMapPtr drmMapBufs(int fd)
bufs.count = 0;
bufs.list = NULL;
bufs.virtual = NULL;
- if (ioctl(fd, DRM_IOCTL_MAP_BUFS, &bufs))
+ if (drmIoctl(fd, DRM_IOCTL_MAP_BUFS, &bufs))
return NULL;
if (!bufs.count)
@@ -1148,7 +1161,7 @@ drmBufMapPtr drmMapBufs(int fd)
if (!(bufs.list = drmMalloc(bufs.count * sizeof(*bufs.list))))
return NULL;
- if (ioctl(fd, DRM_IOCTL_MAP_BUFS, &bufs)) {
+ if (drmIoctl(fd, DRM_IOCTL_MAP_BUFS, &bufs)) {
drmFree(bufs.list);
return NULL;
}
@@ -1263,7 +1276,7 @@ int drmGetLock(int fd, drm_context_t context, drmLockFlags flags)
if (flags & DRM_HALT_ALL_QUEUES) lock.flags |= _DRM_HALT_ALL_QUEUES;
if (flags & DRM_HALT_CUR_QUEUES) lock.flags |= _DRM_HALT_CUR_QUEUES;
- while (ioctl(fd, DRM_IOCTL_LOCK, &lock))
+ while (drmIoctl(fd, DRM_IOCTL_LOCK, &lock))
;
return 0;
}
@@ -1286,7 +1299,7 @@ int drmUnlock(int fd, drm_context_t context)
lock.context = context;
lock.flags = 0;
- return ioctl(fd, DRM_IOCTL_UNLOCK, &lock);
+ return drmIoctl(fd, DRM_IOCTL_UNLOCK, &lock);
}
drm_context_t *drmGetReservedContextList(int fd, int *count)
@@ -1298,7 +1311,7 @@ drm_context_t *drmGetReservedContextList(int fd, int *count)
res.count = 0;
res.contexts = NULL;
- if (ioctl(fd, DRM_IOCTL_RES_CTX, &res))
+ if (drmIoctl(fd, DRM_IOCTL_RES_CTX, &res))
return NULL;
if (!res.count)
@@ -1312,7 +1325,7 @@ drm_context_t *drmGetReservedContextList(int fd, int *count)
}
res.contexts = list;
- if (ioctl(fd, DRM_IOCTL_RES_CTX, &res))
+ if (drmIoctl(fd, DRM_IOCTL_RES_CTX, &res))
return NULL;
for (i = 0; i < res.count; i++)
@@ -1351,7 +1364,7 @@ int drmCreateContext(int fd, drm_context_t *handle)
drm_ctx_t ctx;
ctx.flags = 0; /* Modified with functions below */
- if (ioctl(fd, DRM_IOCTL_ADD_CTX, &ctx))
+ if (drmIoctl(fd, DRM_IOCTL_ADD_CTX, &ctx))
return -errno;
*handle = ctx.handle;
return 0;
@@ -1362,7 +1375,7 @@ int drmSwitchToContext(int fd, drm_context_t context)
drm_ctx_t ctx;
ctx.handle = context;
- if (ioctl(fd, DRM_IOCTL_SWITCH_CTX, &ctx))
+ if (drmIoctl(fd, DRM_IOCTL_SWITCH_CTX, &ctx))
return -errno;
return 0;
}
@@ -1383,7 +1396,7 @@ int drmSetContextFlags(int fd, drm_context_t context, drm_context_tFlags flags)
ctx.flags |= _DRM_CONTEXT_PRESERVED;
if (flags & DRM_CONTEXT_2DONLY)
ctx.flags |= _DRM_CONTEXT_2DONLY;
- if (ioctl(fd, DRM_IOCTL_MOD_CTX, &ctx))
+ if (drmIoctl(fd, DRM_IOCTL_MOD_CTX, &ctx))
return -errno;
return 0;
}
@@ -1394,7 +1407,7 @@ int drmGetContextFlags(int fd, drm_context_t context,
drm_ctx_t ctx;
ctx.handle = context;
- if (ioctl(fd, DRM_IOCTL_GET_CTX, &ctx))
+ if (drmIoctl(fd, DRM_IOCTL_GET_CTX, &ctx))
return -errno;
*flags = 0;
if (ctx.flags & _DRM_CONTEXT_PRESERVED)
@@ -1425,7 +1438,7 @@ int drmDestroyContext(int fd, drm_context_t handle)
{
drm_ctx_t ctx;
ctx.handle = handle;
- if (ioctl(fd, DRM_IOCTL_RM_CTX, &ctx))
+ if (drmIoctl(fd, DRM_IOCTL_RM_CTX, &ctx))
return -errno;
return 0;
}
@@ -1433,7 +1446,7 @@ int drmDestroyContext(int fd, drm_context_t handle)
int drmCreateDrawable(int fd, drm_drawable_t *handle)
{
drm_draw_t draw;
- if (ioctl(fd, DRM_IOCTL_ADD_DRAW, &draw))
+ if (drmIoctl(fd, DRM_IOCTL_ADD_DRAW, &draw))
return -errno;
*handle = draw.handle;
return 0;
@@ -1443,7 +1456,7 @@ int drmDestroyDrawable(int fd, drm_drawable_t handle)
{
drm_draw_t draw;
draw.handle = handle;
- if (ioctl(fd, DRM_IOCTL_RM_DRAW, &draw))
+ if (drmIoctl(fd, DRM_IOCTL_RM_DRAW, &draw))
return -errno;
return 0;
}
@@ -1459,7 +1472,7 @@ int drmUpdateDrawableInfo(int fd, drm_drawable_t handle,
update.num = num;
update.data = (unsigned long long)(unsigned long)data;
- if (ioctl(fd, DRM_IOCTL_UPDATE_DRAW, &update))
+ if (drmIoctl(fd, DRM_IOCTL_UPDATE_DRAW, &update))
return -errno;
return 0;
@@ -1479,7 +1492,7 @@ int drmUpdateDrawableInfo(int fd, drm_drawable_t handle,
*/
int drmAgpAcquire(int fd)
{
- if (ioctl(fd, DRM_IOCTL_AGP_ACQUIRE, NULL))
+ if (drmIoctl(fd, DRM_IOCTL_AGP_ACQUIRE, NULL))
return -errno;
return 0;
}
@@ -1497,7 +1510,7 @@ int drmAgpAcquire(int fd)
*/
int drmAgpRelease(int fd)
{
- if (ioctl(fd, DRM_IOCTL_AGP_RELEASE, NULL))
+ if (drmIoctl(fd, DRM_IOCTL_AGP_RELEASE, NULL))
return -errno;
return 0;
}
@@ -1520,7 +1533,7 @@ int drmAgpEnable(int fd, unsigned long mode)
drm_agp_mode_t m;
m.mode = mode;
- if (ioctl(fd, DRM_IOCTL_AGP_ENABLE, &m))
+ if (drmIoctl(fd, DRM_IOCTL_AGP_ENABLE, &m))
return -errno;
return 0;
}
@@ -1551,7 +1564,7 @@ int drmAgpAlloc(int fd, unsigned long size, unsigned long type,
b.size = size;
b.handle = 0;
b.type = type;
- if (ioctl(fd, DRM_IOCTL_AGP_ALLOC, &b))
+ if (drmIoctl(fd, DRM_IOCTL_AGP_ALLOC, &b))
return -errno;
if (address != 0UL)
*address = b.physical;
@@ -1578,7 +1591,7 @@ int drmAgpFree(int fd, drm_handle_t handle)
b.size = 0;
b.handle = handle;
- if (ioctl(fd, DRM_IOCTL_AGP_FREE, &b))
+ if (drmIoctl(fd, DRM_IOCTL_AGP_FREE, &b))
return -errno;
return 0;
}
@@ -1603,7 +1616,7 @@ int drmAgpBind(int fd, drm_handle_t handle, unsigned long offset)
b.handle = handle;
b.offset = offset;
- if (ioctl(fd, DRM_IOCTL_AGP_BIND, &b))
+ if (drmIoctl(fd, DRM_IOCTL_AGP_BIND, &b))
return -errno;
return 0;
}
@@ -1627,7 +1640,7 @@ int drmAgpUnbind(int fd, drm_handle_t handle)
b.handle = handle;
b.offset = 0;
- if (ioctl(fd, DRM_IOCTL_AGP_UNBIND, &b))
+ if (drmIoctl(fd, DRM_IOCTL_AGP_UNBIND, &b))
return -errno;
return 0;
}
@@ -1648,7 +1661,7 @@ int drmAgpVersionMajor(int fd)
{
drm_agp_info_t i;
- if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
+ if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
return -errno;
return i.agp_version_major;
}
@@ -1669,7 +1682,7 @@ int drmAgpVersionMinor(int fd)
{
drm_agp_info_t i;
- if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
+ if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
return -errno;
return i.agp_version_minor;
}
@@ -1690,7 +1703,7 @@ unsigned long drmAgpGetMode(int fd)
{
drm_agp_info_t i;
- if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
+ if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
return 0;
return i.mode;
}
@@ -1711,7 +1724,7 @@ unsigned long drmAgpBase(int fd)
{
drm_agp_info_t i;
- if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
+ if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
return 0;
return i.aperture_base;
}
@@ -1732,7 +1745,7 @@ unsigned long drmAgpSize(int fd)
{
drm_agp_info_t i;
- if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
+ if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
return 0;
return i.aperture_size;
}
@@ -1753,7 +1766,7 @@ unsigned long drmAgpMemoryUsed(int fd)
{
drm_agp_info_t i;
- if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
+ if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
return 0;
return i.memory_used;
}
@@ -1774,7 +1787,7 @@ unsigned long drmAgpMemoryAvail(int fd)
{
drm_agp_info_t i;
- if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
+ if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
return 0;
return i.memory_allowed;
}
@@ -1795,7 +1808,7 @@ unsigned int drmAgpVendorId(int fd)
{
drm_agp_info_t i;
- if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
+ if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
return 0;
return i.id_vendor;
}
@@ -1816,7 +1829,7 @@ unsigned int drmAgpDeviceId(int fd)
{
drm_agp_info_t i;
- if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
+ if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
return 0;
return i.id_device;
}
@@ -1828,7 +1841,7 @@ int drmScatterGatherAlloc(int fd, unsigned long size, drm_handle_t *handle)
*handle = 0;
sg.size = size;
sg.handle = 0;
- if (ioctl(fd, DRM_IOCTL_SG_ALLOC, &sg))
+ if (drmIoctl(fd, DRM_IOCTL_SG_ALLOC, &sg))
return -errno;
*handle = sg.handle;
return 0;
@@ -1840,7 +1853,7 @@ int drmScatterGatherFree(int fd, drm_handle_t handle)
sg.size = 0;
sg.handle = handle;
- if (ioctl(fd, DRM_IOCTL_SG_FREE, &sg))
+ if (drmIoctl(fd, DRM_IOCTL_SG_FREE, &sg))
return -errno;
return 0;
}
@@ -1861,7 +1874,7 @@ int drmWaitVBlank(int fd, drmVBlankPtr vbl)
int ret;
do {
- ret = ioctl(fd, DRM_IOCTL_WAIT_VBLANK, vbl);
+ ret = drmIoctl(fd, DRM_IOCTL_WAIT_VBLANK, vbl);
vbl->request.type &= ~DRM_VBLANK_RELATIVE;
} while (ret && errno == EINTR);
@@ -1911,7 +1924,7 @@ int drmCtlInstHandler(int fd, int irq)
ctl.func = DRM_INST_HANDLER;
ctl.irq = irq;
- if (ioctl(fd, DRM_IOCTL_CONTROL, &ctl))
+ if (drmIoctl(fd, DRM_IOCTL_CONTROL, &ctl))
return -errno;
return 0;
}
@@ -1934,7 +1947,7 @@ int drmCtlUninstHandler(int fd)
ctl.func = DRM_UNINST_HANDLER;
ctl.irq = 0;
- if (ioctl(fd, DRM_IOCTL_CONTROL, &ctl))
+ if (drmIoctl(fd, DRM_IOCTL_CONTROL, &ctl))
return -errno;
return 0;
}
@@ -1951,7 +1964,7 @@ int drmFinish(int fd, int context, drmLockFlags flags)
if (flags & DRM_LOCK_FLUSH_ALL) lock.flags |= _DRM_LOCK_FLUSH_ALL;
if (flags & DRM_HALT_ALL_QUEUES) lock.flags |= _DRM_HALT_ALL_QUEUES;
if (flags & DRM_HALT_CUR_QUEUES) lock.flags |= _DRM_HALT_CUR_QUEUES;
- if (ioctl(fd, DRM_IOCTL_FINISH, &lock))
+ if (drmIoctl(fd, DRM_IOCTL_FINISH, &lock))
return -errno;
return 0;
}
@@ -1977,7 +1990,7 @@ int drmGetInterruptFromBusID(int fd, int busnum, int devnum, int funcnum)
p.busnum = busnum;
p.devnum = devnum;
p.funcnum = funcnum;
- if (ioctl(fd, DRM_IOCTL_IRQ_BUSID, &p))
+ if (drmIoctl(fd, DRM_IOCTL_IRQ_BUSID, &p))
return -errno;
return p.irq;
}
@@ -2019,7 +2032,7 @@ int drmAddContextPrivateMapping(int fd, drm_context_t ctx_id,
map.ctx_id = ctx_id;
map.handle = (void *)handle;
- if (ioctl(fd, DRM_IOCTL_SET_SAREA_CTX, &map))
+ if (drmIoctl(fd, DRM_IOCTL_SET_SAREA_CTX, &map))
return -errno;
return 0;
}
@@ -2031,7 +2044,7 @@ int drmGetContextPrivateMapping(int fd, drm_context_t ctx_id,
map.ctx_id = ctx_id;
- if (ioctl(fd, DRM_IOCTL_GET_SAREA_CTX, &map))
+ if (drmIoctl(fd, DRM_IOCTL_GET_SAREA_CTX, &map))
return -errno;
if (handle)
*handle = (drm_handle_t)map.handle;
@@ -2046,7 +2059,7 @@ int drmGetMap(int fd, int idx, drm_handle_t *offset, drmSize *size,
drm_map_t map;
map.offset = idx;
- if (ioctl(fd, DRM_IOCTL_GET_MAP, &map))
+ if (drmIoctl(fd, DRM_IOCTL_GET_MAP, &map))
return -errno;
*offset = map.offset;
*size = map.size;
@@ -2063,7 +2076,7 @@ int drmGetClient(int fd, int idx, int *auth, int *pid, int *uid,
drm_client_t client;
client.idx = idx;
- if (ioctl(fd, DRM_IOCTL_GET_CLIENT, &client))
+ if (drmIoctl(fd, DRM_IOCTL_GET_CLIENT, &client))
return -errno;
*auth = client.auth;
*pid = client.pid;
@@ -2078,7 +2091,7 @@ int drmGetStats(int fd, drmStatsT *stats)
drm_stats_t s;
int i;
- if (ioctl(fd, DRM_IOCTL_GET_STATS, &s))
+ if (drmIoctl(fd, DRM_IOCTL_GET_STATS, &s))
return -errno;
stats->count = 0;
@@ -2220,7 +2233,7 @@ int drmSetInterfaceVersion(int fd, drmSetVersion *version)
sv.drm_dd_major = version->drm_dd_major;
sv.drm_dd_minor = version->drm_dd_minor;
- if (ioctl(fd, DRM_IOCTL_SET_VERSION, &sv)) {
+ if (drmIoctl(fd, DRM_IOCTL_SET_VERSION, &sv)) {
retcode = -errno;
}
@@ -2251,7 +2264,7 @@ int drmCommandNone(int fd, unsigned long drmCommandIndex)
request = DRM_IO( DRM_COMMAND_BASE + drmCommandIndex);
- if (ioctl(fd, request, data)) {
+ if (drmIoctl(fd, request, data)) {
return -errno;
}
return 0;
@@ -2280,7 +2293,7 @@ int drmCommandRead(int fd, unsigned long drmCommandIndex, void *data,
request = DRM_IOC( DRM_IOC_READ, DRM_IOCTL_BASE,
DRM_COMMAND_BASE + drmCommandIndex, size);
- if (ioctl(fd, request, data)) {
+ if (drmIoctl(fd, request, data)) {
return -errno;
}
return 0;
@@ -2309,7 +2322,7 @@ int drmCommandWrite(int fd, unsigned long drmCommandIndex, void *data,
request = DRM_IOC( DRM_IOC_WRITE, DRM_IOCTL_BASE,
DRM_COMMAND_BASE + drmCommandIndex, size);
- if (ioctl(fd, request, data)) {
+ if (drmIoctl(fd, request, data)) {
return -errno;
}
return 0;
@@ -2338,9 +2351,8 @@ int drmCommandWriteRead(int fd, unsigned long drmCommandIndex, void *data,
request = DRM_IOC( DRM_IOC_READ|DRM_IOC_WRITE, DRM_IOCTL_BASE,
DRM_COMMAND_BASE + drmCommandIndex, size);
- if (ioctl(fd, request, data)) {
+ if (drmIoctl(fd, request, data))
return -errno;
- }
return 0;
}
@@ -2362,7 +2374,7 @@ int drmFenceCreate(int fd, unsigned flags, int fence_class, unsigned type,
arg.type = type;
arg.fence_class = fence_class;
- if (ioctl(fd, DRM_IOCTL_FENCE_CREATE, &arg))
+ if (drmIoctl(fd, DRM_IOCTL_FENCE_CREATE, &arg))
return -errno;
fence->handle = arg.handle;
fence->fence_class = arg.fence_class;
@@ -2386,7 +2398,7 @@ int drmFenceBuffers(int fd, unsigned flags, uint32_t fence_class, drmFence *fenc
arg.flags = flags;
arg.fence_class = fence_class;
- if (ioctl(fd, DRM_IOCTL_FENCE_BUFFERS, &arg))
+ if (drmIoctl(fd, DRM_IOCTL_FENCE_BUFFERS, &arg))
return -errno;
fence->handle = arg.handle;
fence->fence_class = arg.fence_class;
@@ -2404,7 +2416,7 @@ int drmFenceReference(int fd, unsigned handle, drmFence *fence)
memset(&arg, 0, sizeof(arg));
arg.handle = handle;
- if (ioctl(fd, DRM_IOCTL_FENCE_REFERENCE, &arg))
+ if (drmIoctl(fd, DRM_IOCTL_FENCE_REFERENCE, &arg))
return -errno;
fence->handle = arg.handle;
fence->fence_class = arg.fence_class;
@@ -2421,7 +2433,7 @@ int drmFenceUnreference(int fd, const drmFence *fence)
memset(&arg, 0, sizeof(arg));
arg.handle = fence->handle;
- if (ioctl(fd, DRM_IOCTL_FENCE_UNREFERENCE, &arg))
+ if (drmIoctl(fd, DRM_IOCTL_FENCE_UNREFERENCE, &arg))
return -errno;
return 0;
}
@@ -2434,7 +2446,7 @@ int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type)
arg.handle = fence->handle;
arg.type = flush_type;
- if (ioctl(fd, DRM_IOCTL_FENCE_FLUSH, &arg))
+ if (drmIoctl(fd, DRM_IOCTL_FENCE_FLUSH, &arg))
return -errno;
fence->fence_class = arg.fence_class;
fence->type = arg.type;
@@ -2449,7 +2461,7 @@ int drmFenceUpdate(int fd, drmFence *fence)
memset(&arg, 0, sizeof(arg));
arg.handle = fence->handle;
- if (ioctl(fd, DRM_IOCTL_FENCE_SIGNALED, &arg))
+ if (drmIoctl(fd, DRM_IOCTL_FENCE_SIGNALED, &arg))
return -errno;
fence->fence_class = arg.fence_class;
fence->type = arg.type;
@@ -2489,7 +2501,7 @@ int drmFenceEmit(int fd, unsigned flags, drmFence *fence, unsigned emit_type)
arg.handle = fence->handle;
arg.type = emit_type;
- if (ioctl(fd, DRM_IOCTL_FENCE_EMIT, &arg))
+ if (drmIoctl(fd, DRM_IOCTL_FENCE_EMIT, &arg))
return -errno;
fence->fence_class = arg.fence_class;
fence->type = arg.type;
@@ -2527,7 +2539,7 @@ drmIoctlTimeout(int fd, unsigned long request, void *argp)
int ret;
do {
- ret = ioctl(fd, request, argp);
+ ret = drmIoctl(fd, request, argp);
if (ret != 0 && errno == EAGAIN) {
if (!haveThen) {
gettimeofday(&then, NULL);
@@ -2637,7 +2649,7 @@ int drmBOReference(int fd, unsigned handle, drmBO *buf)
memset(&arg, 0, sizeof(arg));
req->handle = handle;
- if (ioctl(fd, DRM_IOCTL_BO_REFERENCE, &arg))
+ if (drmIoctl(fd, DRM_IOCTL_BO_REFERENCE, &arg))
return -errno;
drmBOCopyReply(rep, buf);
@@ -2661,7 +2673,7 @@ int drmBOUnreference(int fd, drmBO *buf)
memset(&arg, 0, sizeof(arg));
arg.handle = buf->handle;
- if (ioctl(fd, DRM_IOCTL_BO_UNREFERENCE, &arg))
+ if (drmIoctl(fd, DRM_IOCTL_BO_UNREFERENCE, &arg))
return -errno;
buf->handle = 0;
@@ -2731,7 +2743,7 @@ int drmBOUnmap(int fd, drmBO *buf)
memset(&arg, 0, sizeof(arg));
arg.handle = buf->handle;
- if (ioctl(fd, DRM_IOCTL_BO_UNMAP, &arg)) {
+ if (drmIoctl(fd, DRM_IOCTL_BO_UNMAP, &arg)) {
return -errno;
}
buf->mapCount--;
@@ -2777,7 +2789,7 @@ int drmBOInfo(int fd, drmBO *buf)
memset(&arg, 0, sizeof(arg));
req->handle = buf->handle;
- ret = ioctl(fd, DRM_IOCTL_BO_INFO, &arg);
+ ret = drmIoctl(fd, DRM_IOCTL_BO_INFO, &arg);
if (ret)
return -errno;
@@ -2832,7 +2844,7 @@ int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize,
arg.p_size = pSize;
arg.mem_type = memType;
- if (ioctl(fd, DRM_IOCTL_MM_INIT, &arg))
+ if (drmIoctl(fd, DRM_IOCTL_MM_INIT, &arg))
return -errno;
return 0;
}
@@ -2844,7 +2856,7 @@ int drmMMTakedown(int fd, unsigned memType)
memset(&arg, 0, sizeof(arg));
arg.mem_type = memType;
- if (ioctl(fd, DRM_IOCTL_MM_TAKEDOWN, &arg))
+ if (drmIoctl(fd, DRM_IOCTL_MM_TAKEDOWN, &arg))
return -errno;
return 0;
}
@@ -2886,7 +2898,7 @@ int drmMMInfo(int fd, unsigned memType, uint64_t *size)
arg.mem_type = memType;
- if (ioctl(fd, DRM_IOCTL_MM_INFO, &arg))
+ if (drmIoctl(fd, DRM_IOCTL_MM_INFO, &arg))
return -errno;
*size = arg.p_size;
@@ -2901,7 +2913,7 @@ int drmBOVersion(int fd, unsigned int *major,
int ret;
memset(&arg, 0, sizeof(arg));
- ret = ioctl(fd, DRM_IOCTL_BO_VERSION, &arg);
+ ret = drmIoctl(fd, DRM_IOCTL_BO_VERSION, &arg);
if (ret)
return -errno;
diff --git a/linux-core/Makefile b/linux-core/Makefile
index 846386a5..a359f775 100644
--- a/linux-core/Makefile
+++ b/linux-core/Makefile
@@ -30,7 +30,6 @@
#
# make DRM_MODULES="r128 radeon"
#
-DRM_MODULES=i915
SHELL=/bin/sh
@@ -118,7 +117,7 @@ V := $(shell if [ -f $(BOOTVERSION_PREFIX)version.h ]; then \
ifeq ($(V),"$(RUNNING_REL)")
HEADERFROMBOOT := 1
-GETCONFIG := MAKEFILES=$(shell pwd)/.config
+GETCONFIG := MAKEFILES=$(shell /bin/pwd)/.config
HAVECONFIG := y
endif
@@ -165,7 +164,7 @@ endif
all: modules
modules: includes
- +make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`pwd` DRMSRCDIR=`pwd` modules
+ +make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`/bin/pwd` DRMSRCDIR=`/bin/pwd` modules
ifeq ($(HEADERFROMBOOT),1)
@@ -241,7 +240,7 @@ drmstat: drmstat.c
$(CC) $(PRGCFLAGS) $< -o $@ $(DRMSTATLIBS)
install:
- make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`pwd` DRMSRCDIR=`pwd` modules_install
+ make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`/bin/pwd` DRMSRCDIR=`/bin/pwd` modules_install
else
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index ffe8b8ef..4a9cc761 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -819,21 +819,6 @@ struct drm_driver {
int (*gem_init_object) (struct drm_gem_object *obj);
void (*gem_free_object) (struct drm_gem_object *obj);
- /**
- * Driver-specific callback to set memory domains from userspace
- */
- int (*gem_set_domain) (struct drm_gem_object *obj,
- struct drm_file *file_priv,
- uint32_t read_domains,
- uint32_t write_domain);
-
- /**
- * Driver-specific callback to flush pwrite through chipset
- */
- int (*gem_flush_pwrite) (struct drm_gem_object *obj,
- uint64_t offset,
- uint64_t size);
-
struct drm_fence_driver *fence_driver;
struct drm_bo_driver *bo_driver;
@@ -1037,6 +1022,12 @@ struct drm_device {
spinlock_t object_name_lock;
struct idr object_name_idr;
atomic_t object_count;
+ atomic_t object_memory;
+ atomic_t pin_count;
+ atomic_t pin_memory;
+ atomic_t gtt_count;
+ atomic_t gtt_memory;
+ uint32_t gtt_total;
uint32_t invalidate_domains; /* domains pending invalidation */
uint32_t flush_domains; /* domains pending flush */
/*@} */
@@ -1252,10 +1243,6 @@ extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context);
extern void drm_idlelock_take(struct drm_lock_data *lock_data);
extern void drm_idlelock_release(struct drm_lock_data *lock_data);
-extern int drm_client_lock_take(struct drm_device *dev,
- struct drm_file *file_priv);
-extern void drm_client_lock_release(struct drm_device *dev,
- struct drm_file *file_priv);
/*
* These are exported to drivers so that they can implement fencing using
@@ -1472,6 +1459,11 @@ static inline void drm_gem_object_unreference(struct drm_gem_object *obj)
kref_put (&obj->refcount, drm_gem_object_free);
}
+int
+drm_gem_handle_create(struct drm_file *file_priv,
+ struct drm_gem_object *obj,
+ int *handlep);
+
static inline void drm_gem_object_handle_reference (struct drm_gem_object *obj)
{
drm_gem_object_reference (obj);
@@ -1495,37 +1487,16 @@ static inline void drm_gem_object_handle_unreference (struct drm_gem_object *obj
struct drm_gem_object *
drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
int handle);
-int drm_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
int drm_gem_close_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int drm_gem_pread_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_gem_pwrite_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_gem_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int drm_gem_set_domain_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
-
-/*
- * Given the new read/write domains for an object,
- * compute the invalidate/flush domains for the whole device.
- *
- */
-int drm_gem_object_set_domain (struct drm_gem_object *object,
- uint32_t read_domains,
- uint32_t write_domains);
-
-
extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c
index df09e72b..64ad067a 100644
--- a/linux-core/drm_drv.c
+++ b/linux-core/drm_drv.c
@@ -175,15 +175,6 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_BO_VERSION, drm_bo_version_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MM_INFO, drm_mm_info_ioctl, 0),
-
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_CREATE, drm_gem_create_ioctl, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_PREAD, drm_gem_pread_ioctl, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_PWRITE, drm_gem_pwrite_ioctl, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_MMAP, drm_gem_mmap_ioctl, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_SET_DOMAIN, drm_gem_set_domain_ioctl, DRM_AUTH),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
diff --git a/linux-core/drm_gem.c b/linux-core/drm_gem.c
index a8ecaf76..76d7aa94 100644
--- a/linux-core/drm_gem.c
+++ b/linux-core/drm_gem.c
@@ -74,6 +74,11 @@ drm_gem_init(struct drm_device *dev)
spin_lock_init(&dev->object_name_lock);
idr_init(&dev->object_name_idr);
atomic_set(&dev->object_count, 0);
+ atomic_set(&dev->object_memory, 0);
+ atomic_set(&dev->pin_count, 0);
+ atomic_set(&dev->pin_memory, 0);
+ atomic_set(&dev->gtt_count, 0);
+ atomic_set(&dev->gtt_memory, 0);
return 0;
}
@@ -99,15 +104,6 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
kref_init(&obj->refcount);
kref_init(&obj->handlecount);
obj->size = size;
-
- /*
- * We've just allocated pages from the kernel,
- * so they've just been written by the CPU with
- * zeros. They'll need to be clflushed before we
- * use them with the GPU.
- */
- obj->write_domain = DRM_GEM_DOMAIN_CPU;
- obj->read_domains = DRM_GEM_DOMAIN_CPU;
if (dev->driver->gem_init_object != NULL &&
dev->driver->gem_init_object(obj) != 0) {
fput(obj->filp);
@@ -115,55 +111,17 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
return NULL;
}
atomic_inc(&dev->object_count);
+ atomic_add(obj->size, &dev->object_memory);
return obj;
}
EXPORT_SYMBOL(drm_gem_object_alloc);
/**
- * Removes the mapping from handle to filp for this object.
- */
-static int
-drm_gem_handle_delete(struct drm_file *filp, int handle)
-{
- struct drm_device *dev;
- struct drm_gem_object *obj;
-
- /* This is gross. The idr system doesn't let us try a delete and
- * return an error code. It just spews if you fail at deleting.
- * So, we have to grab a lock around finding the object and then
- * doing the delete on it and dropping the refcount, or the user
- * could race us to double-decrement the refcount and cause a
- * use-after-free later. Given the frequency of our handle lookups,
- * we may want to use ida for number allocation and a hash table
- * for the pointers, anyway.
- */
- spin_lock(&filp->table_lock);
-
- /* Check if we currently have a reference on the object */
- obj = idr_find(&filp->object_idr, handle);
- if (obj == NULL) {
- spin_unlock(&filp->table_lock);
- return -EINVAL;
- }
- dev = obj->dev;
-
- /* Release reference and decrement refcount. */
- idr_remove(&filp->object_idr, handle);
- spin_unlock(&filp->table_lock);
-
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_handle_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-/**
* Create a handle for this object. This adds a handle reference
* to the object, which includes a regular reference count. Callers
* will likely want to dereference the object afterwards.
*/
-static int
+int
drm_gem_handle_create(struct drm_file *file_priv,
struct drm_gem_object *obj,
int *handlep)
@@ -191,6 +149,7 @@ again:
drm_gem_object_handle_reference(obj);
return 0;
}
+EXPORT_SYMBOL(drm_gem_handle_create);
/** Returns a reference to the object named by the handle. */
struct drm_gem_object *
@@ -217,334 +176,6 @@ drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
EXPORT_SYMBOL(drm_gem_object_lookup);
/**
- * Creates a new mm object and returns a handle to it.
- */
-int
-drm_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_gem_create *args = data;
- struct drm_gem_object *obj;
- int handle, ret;
-
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
- args->size = roundup(args->size, PAGE_SIZE);
-
- /* Allocate the new object */
- obj = drm_gem_object_alloc(dev, args->size);
- if (obj == NULL)
- return -ENOMEM;
-
- ret = drm_gem_handle_create(file_priv, obj, &handle);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_handle_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
-
- if (ret)
- return ret;
-
- args->handle = handle;
-
- return 0;
-}
-
-/**
- * Releases the handle to an mm object.
- */
-int
-drm_gem_close_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_gem_close *args = data;
- int ret;
-
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
- ret = drm_gem_handle_delete(file_priv, args->handle);
-
- return ret;
-}
-
-/**
- * Reads data from the object referenced by handle.
- *
- * On error, the contents of *data are undefined.
- */
-int
-drm_gem_pread_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_gem_pread *args = data;
- struct drm_gem_object *obj;
- ssize_t read;
- loff_t offset;
- int ret;
-
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -EINVAL;
-
- mutex_lock(&dev->struct_mutex);
- if (dev->driver->gem_set_domain) {
- ret = dev->driver->gem_set_domain(obj, file_priv,
- DRM_GEM_DOMAIN_CPU,
- 0);
- if (ret) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
- }
- offset = args->offset;
-
- read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
- args->size, &offset);
- if (read != args->size) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- if (read < 0)
- return read;
- else
- return -EINVAL;
- }
-
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-/**
- * Maps the contents of an object, returning the address it is mapped
- * into.
- *
- * While the mapping holds a reference on the contents of the object, it doesn't
- * imply a ref on the object itself.
- */
-int
-drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_gem_mmap *args = data;
- struct drm_gem_object *obj;
- loff_t offset;
- unsigned long addr;
-
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -EINVAL;
-
- offset = args->offset;
-
- down_write(&current->mm->mmap_sem);
- addr = do_mmap(obj->filp, 0, args->size,
- PROT_READ | PROT_WRITE, MAP_SHARED,
- args->offset);
- up_write(&current->mm->mmap_sem);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- if (IS_ERR((void *)addr))
- return addr;
-
- args->addr_ptr = (uint64_t) addr;
-
- return 0;
-}
-
-/**
- * Writes data to the object referenced by handle.
- *
- * On error, the contents of the buffer that were to be modified are undefined.
- */
-int
-drm_gem_pwrite_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_gem_pwrite *args = data;
- struct drm_gem_object *obj;
- ssize_t written;
- loff_t offset;
- int ret;
-
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -EINVAL;
-
- mutex_lock(&dev->struct_mutex);
- if (dev->driver->gem_set_domain) {
- ret = dev->driver->gem_set_domain(obj, file_priv,
- DRM_GEM_DOMAIN_CPU,
- DRM_GEM_DOMAIN_CPU);
- if (ret) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
- }
- offset = args->offset;
-
- written = vfs_write(obj->filp,
- (char __user *)(uintptr_t) args->data_ptr,
- args->size, &offset);
-
- if (written != args->size) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- if (written < 0)
- return written;
- else
- return -EINVAL;
- }
-
- if (dev->driver->gem_flush_pwrite)
- dev->driver->gem_flush_pwrite(obj,
- args->offset,
- args->size);
-
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-/**
- * Create a global name for an object, returning the name.
- *
- * Note that the name does not hold a reference; when the object
- * is freed, the name goes away.
- */
-int
-drm_gem_flink_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_gem_flink *args = data;
- struct drm_gem_object *obj;
- int ret;
-
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -EINVAL;
-
-again:
- if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0)
- return -ENOMEM;
-
- spin_lock(&dev->object_name_lock);
- if (obj->name) {
- spin_unlock(&dev->object_name_lock);
- return -EEXIST;
- }
- ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
- &obj->name);
- spin_unlock(&dev->object_name_lock);
- if (ret == -EAGAIN)
- goto again;
-
- if (ret != 0) {
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
-
- /*
- * Leave the reference from the lookup around as the
- * name table now holds one
- */
- args->name = (uint64_t) obj->name;
-
- return 0;
-}
-
-/**
- * Open an object using the global name, returning a handle and the size.
- *
- * This handle (of course) holds a reference to the object, so the object
- * will not go away until the handle is deleted.
- */
-int
-drm_gem_open_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_gem_open *args = data;
- struct drm_gem_object *obj;
- int ret;
- int handle;
-
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
- spin_lock(&dev->object_name_lock);
- obj = idr_find(&dev->object_name_idr, (int) args->name);
- if (obj)
- drm_gem_object_reference(obj);
- spin_unlock(&dev->object_name_lock);
- if (!obj)
- return -ENOENT;
-
- ret = drm_gem_handle_create(file_priv, obj, &handle);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- if (ret)
- return ret;
-
- args->handle = handle;
- args->size = obj->size;
-
- return 0;
-}
-
-/**
- * Called when user space prepares to use an object
- */
-int
-drm_gem_set_domain_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_gem_set_domain *args = data;
- struct drm_gem_object *obj;
- int ret;
-
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -EINVAL;
-
- mutex_lock(&dev->struct_mutex);
- if (dev->driver->gem_set_domain) {
- ret = dev->driver->gem_set_domain(obj, file_priv,
- args->read_domains,
- args->write_domain);
- } else {
- obj->read_domains = args->read_domains;
- obj->write_domain = args->write_domain;
- ret = 0;
- }
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
-}
-
-/**
* Called at device open time, sets up the structure for handling refcounting
* of mm objects.
*/
@@ -603,6 +234,7 @@ drm_gem_object_free(struct kref *kref)
fput(obj->filp);
atomic_dec(&dev->object_count);
+ atomic_sub(obj->size, &dev->object_memory);
kfree(obj);
}
EXPORT_SYMBOL(drm_gem_object_free);
diff --git a/linux-core/drm_irq.c b/linux-core/drm_irq.c
index c6024d95..0dfbe57a 100644
--- a/linux-core/drm_irq.c
+++ b/linux-core/drm_irq.c
@@ -63,7 +63,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
return -EINVAL;
- p->irq = dev->irq;
+ p->irq = dev->pdev->irq;
DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
p->irq);
@@ -285,7 +285,7 @@ int drm_irq_install(struct drm_device * dev)
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
- if (dev->irq == 0)
+ if (dev->pdev->irq == 0)
return -EINVAL;
mutex_lock(&dev->struct_mutex);
@@ -303,7 +303,7 @@ int drm_irq_install(struct drm_device * dev)
dev->irq_enabled = 1;
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("irq=%d\n", dev->irq);
+ DRM_DEBUG("irq=%d\n", dev->pdev->irq);
/* Before installing handler */
dev->driver->irq_preinstall(dev);
@@ -312,7 +312,7 @@ int drm_irq_install(struct drm_device * dev)
if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
sh_flags = IRQF_SHARED;
- ret = request_irq(dev->irq, dev->driver->irq_handler,
+ ret = request_irq(dev->pdev->irq, dev->driver->irq_handler,
sh_flags, dev->devname, dev);
if (ret < 0) {
mutex_lock(&dev->struct_mutex);
@@ -320,6 +320,10 @@ int drm_irq_install(struct drm_device * dev)
mutex_unlock(&dev->struct_mutex);
return ret;
}
+ /* Expose the device irq to device drivers that want to export it for
+ * whatever reason.
+ */
+ dev->irq = dev->pdev->irq;
/* After installing handler */
ret = dev->driver->irq_postinstall(dev);
@@ -355,11 +359,11 @@ int drm_irq_uninstall(struct drm_device * dev)
if (!irq_enabled)
return -EINVAL;
- DRM_DEBUG("irq=%d\n", dev->irq);
+ DRM_DEBUG("irq=%d\n", dev->pdev->irq);
dev->driver->irq_uninstall(dev);
- free_irq(dev->irq, dev);
+ free_irq(dev->pdev->irq, dev);
drm_vblank_cleanup(dev);
@@ -397,7 +401,7 @@ int drm_control(struct drm_device *dev, void *data,
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
if (dev->if_version < DRM_IF_VERSION(1, 2) &&
- ctl->irq != dev->irq)
+ ctl->irq != dev->pdev->irq)
return -EINVAL;
return drm_irq_install(dev);
case DRM_UNINST_HANDLER:
@@ -580,7 +584,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
int ret = 0;
unsigned int flags, seq, crtc;
- if ((!dev->irq) || (!dev->irq_enabled))
+ if ((!dev->pdev->irq) || (!dev->irq_enabled))
return -EINVAL;
if (vblwait->request.type &
diff --git a/linux-core/drm_lock.c b/linux-core/drm_lock.c
index d2fb1feb..e8bdad92 100644
--- a/linux-core/drm_lock.c
+++ b/linux-core/drm_lock.c
@@ -384,65 +384,6 @@ void drm_idlelock_release(struct drm_lock_data *lock_data)
}
EXPORT_SYMBOL(drm_idlelock_release);
-/**
- * Takes the lock on behalf of the client if needed, using the kernel context.
- *
- * This allows us to hide the hardware lock when it's required for protection
- * of data structures (such as command ringbuffer) shared with the X Server, an
-
- * a way for us to transition to lockless for those requests when the X Server
- * stops accessing the ringbuffer directly, without having to update the
- * other userland clients.
- */
-int drm_client_lock_take(struct drm_device *dev, struct drm_file *file_priv)
-{
- struct drm_master *master = file_priv->master;
- int ret;
- unsigned long irqflags;
-
- /* If the client has the lock, we're already done. */
- if (drm_i_have_hw_lock(dev, file_priv))
- return 0;
-
- mutex_unlock (&dev->struct_mutex);
- /* Client doesn't hold the lock. Block taking the lock with the kernel
- * context on behalf of the client, and return whether we were
- * successful.
- */
- spin_lock_irqsave(&master->lock.spinlock, irqflags);
- master->lock.user_waiters++;
- spin_unlock_irqrestore(&master->lock.spinlock, irqflags);
- ret = wait_event_interruptible(master->lock.lock_queue,
- drm_lock_take(&master->lock,
- DRM_KERNEL_CONTEXT));
- spin_lock_irqsave(&master->lock.spinlock, irqflags);
- master->lock.user_waiters--;
- if (ret != 0) {
- spin_unlock_irqrestore(&master->lock.spinlock, irqflags);
- } else {
- master->lock.file_priv = file_priv;
- master->lock.lock_time = jiffies;
- master->lock.kernel_held = 1;
- file_priv->lock_count++;
- spin_unlock_irqrestore(&master->lock.spinlock, irqflags);
- }
- mutex_lock (&dev->struct_mutex);
- return ret;
-}
-EXPORT_SYMBOL(drm_client_lock_take);
-
-void drm_client_lock_release(struct drm_device *dev, struct drm_file *file_priv)
-{
- struct drm_master *master = file_priv->master;
-
- if (master->lock.kernel_held) {
- master->lock.kernel_held = 0;
- master->lock.file_priv = NULL;
- drm_lock_free(&master->lock, DRM_KERNEL_CONTEXT);
- }
-}
-EXPORT_SYMBOL(drm_client_lock_release);
-
int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_master *master = file_priv->master;
diff --git a/linux-core/drm_proc.c b/linux-core/drm_proc.c
index 690e081c..127a7987 100644
--- a/linux-core/drm_proc.c
+++ b/linux-core/drm_proc.c
@@ -658,7 +658,12 @@ static int drm_gem_object_info(char *buf, char **start, off_t offset,
*start = &buf[offset];
*eof = 0;
- DRM_PROC_PRINT ("%d objects\n", atomic_read (&dev->object_count));
+ DRM_PROC_PRINT("%d objects\n", atomic_read (&dev->object_count));
+ DRM_PROC_PRINT("%d object bytes\n", atomic_read (&dev->object_memory));
+ DRM_PROC_PRINT("%d pinned\n", atomic_read (&dev->pin_count));
+ DRM_PROC_PRINT("%d pin bytes\n", atomic_read (&dev->pin_memory));
+ DRM_PROC_PRINT("%d gtt bytes\n", atomic_read (&dev->gtt_memory));
+ DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total);
if (len > request + offset)
return request;
*eof = 1;
diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c
index 574282b2..1817c964 100644
--- a/linux-core/i915_drv.c
+++ b/linux-core/i915_drv.c
@@ -569,6 +569,8 @@ static int i915_resume(struct drm_device *dev)
}
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void remove(struct pci_dev *pdev);
+
static struct drm_driver driver = {
/* don't use mtrr's here, the Xserver or user space app should
* deal with them for intel hardware.
@@ -579,8 +581,10 @@ static struct drm_driver driver = {
.load = i915_driver_load,
.unload = i915_driver_unload,
.firstopen = i915_driver_firstopen,
+ .open = i915_driver_open,
.lastclose = i915_driver_lastclose,
.preclose = i915_driver_preclose,
+ .postclose = i915_driver_postclose,
.suspend = i915_suspend,
.resume = i915_resume,
.device_is_agp = i915_driver_device_is_agp,
@@ -599,8 +603,6 @@ static struct drm_driver driver = {
.ioctls = i915_ioctls,
.gem_init_object = i915_gem_init_object,
.gem_free_object = i915_gem_free_object,
- .gem_set_domain = i915_gem_set_domain,
- .gem_flush_pwrite = i915_gem_flush_pwrite,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -617,7 +619,7 @@ static struct drm_driver driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = probe,
- .remove = __devexit_p(drm_cleanup_pci),
+ .remove = remove,
},
#if defined(I915_HAVE_FENCE) && defined(I915_TTM)
.fence_driver = &i915_fence_driver,
@@ -635,7 +637,28 @@ static struct drm_driver driver = {
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- return drm_get_dev(pdev, ent, &driver);
+ int ret;
+
+ /* On the 945G/GM, the chipset reports the MSI capability on the
+ * integrated graphics even though the support isn't actually there
+ * according to the published specs. It doesn't appear to function
+ * correctly in testing on 945G.
+ * This may be a side effect of MSI having been made available for PEG
+ * and the registers being closely associated.
+ */
+ if (pdev->device != 0x2772 && pdev->device != 0x27A2)
+ (void )pci_enable_msi(pdev);
+
+ ret = drm_get_dev(pdev, ent, &driver);
+ if (ret && pdev->msi_enabled)
+ pci_disable_msi(pdev);
+ return ret;
+}
+static void remove(struct pci_dev *pdev)
+{
+ if (pdev->msi_enabled)
+ pci_disable_msi(pdev);
+ drm_cleanup_pci(pdev);
}
static int __init i915_init(void)
diff --git a/linux-core/i915_gem.c b/linux-core/i915_gem.c
index 47745010..787251f5 100644
--- a/linux-core/i915_gem.c
+++ b/linux-core/i915_gem.c
@@ -35,11 +35,17 @@
#define WATCH_EXEC 0
#define WATCH_LRU 0
#define WATCH_RELOC 0
+#define WATCH_INACTIVE 0
static int
i915_gem_object_set_domain(struct drm_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
+int
+i915_gem_set_domain(struct drm_gem_object *obj,
+ struct drm_file *file_priv,
+ uint32_t read_domains,
+ uint32_t write_domain);
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
unsigned long end)
@@ -55,6 +61,8 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start,
drm_memrange_init(&dev_priv->mm.gtt_space, start,
end - start);
+ dev->gtt_total = (uint32_t) (end - start);
+
return 0;
}
@@ -72,6 +80,199 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
return ret;
}
+
+/**
+ * Creates a new mm object and returns a handle to it.
+ */
+int
+i915_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_create *args = data;
+ struct drm_gem_object *obj;
+ int handle, ret;
+
+ args->size = roundup(args->size, PAGE_SIZE);
+
+ /* Allocate the new object */
+ obj = drm_gem_object_alloc(dev, args->size);
+ if (obj == NULL)
+ return -ENOMEM;
+
+ ret = drm_gem_handle_create(file_priv, obj, &handle);
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_handle_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+
+ return 0;
+}
+
+/**
+ * Reads data from the object referenced by handle.
+ *
+ * On error, the contents of *data are undefined.
+ */
+int
+i915_gem_pread_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_pread *args = data;
+ struct drm_gem_object *obj;
+ ssize_t read;
+ loff_t offset;
+ int ret;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EINVAL;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_gem_set_domain(obj, file_priv,
+ I915_GEM_DOMAIN_CPU, 0);
+ if (ret) {
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+ offset = args->offset;
+
+ read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
+ args->size, &offset);
+ if (read != args->size) {
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ if (read < 0)
+ return read;
+ else
+ return -EINVAL;
+ }
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+/**
+ * Writes data to the object referenced by handle.
+ *
+ * On error, the contents of the buffer that were to be modified are undefined.
+ */
+int
+i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_pwrite *args = data;
+ struct drm_gem_object *obj;
+ ssize_t written;
+ loff_t offset;
+ int ret;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EINVAL;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_gem_set_domain(obj, file_priv,
+ I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+ if (ret) {
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+ offset = args->offset;
+
+ written = vfs_write(obj->filp,
+ (char __user *)(uintptr_t) args->data_ptr,
+ args->size, &offset);
+
+ if (written != args->size) {
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ if (written < 0)
+ return written;
+ else
+ return -EINVAL;
+ }
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+/**
+ * Called when user space prepares to use an object
+ */
+int
+i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_set_domain *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EINVAL;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_gem_set_domain(obj, file_priv,
+ args->read_domains, args->write_domain);
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+/**
+ * Maps the contents of an object, returning the address it is mapped
+ * into.
+ *
+ * While the mapping holds a reference on the contents of the object, it doesn't
+ * imply a ref on the object itself.
+ */
+int
+i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_mmap *args = data;
+ struct drm_gem_object *obj;
+ loff_t offset;
+ unsigned long addr;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EINVAL;
+
+ offset = args->offset;
+
+ down_write(&current->mm->mmap_sem);
+ addr = do_mmap(obj->filp, 0, args->size,
+ PROT_READ | PROT_WRITE, MAP_SHARED,
+ args->offset);
+ up_write(&current->mm->mmap_sem);
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ if (IS_ERR((void *)addr))
+ return addr;
+
+ args->addr_ptr = (uint64_t) addr;
+
+ return 0;
+}
+
static void
i915_gem_object_free_page_list(struct drm_gem_object *obj)
{
@@ -110,6 +311,26 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj)
&dev_priv->mm.active_list);
}
+#if WATCH_INACTIVE
+static void
+i915_verify_inactive(struct drm_device *dev, char *file, int line)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+ obj = obj_priv->obj;
+ if (obj_priv->pin_count || obj_priv->active || (obj->write_domain & ~I915_GEM_DOMAIN_CPU))
+ DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n",
+ obj,
+ obj_priv->pin_count, obj_priv->active, obj->write_domain, file, line);
+ }
+}
+#else
+#define i915_verify_inactive(dev,file,line)
+#endif
+
static void
i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
{
@@ -117,6 +338,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ i915_verify_inactive(dev, __FILE__, __LINE__);
if (obj_priv->pin_count != 0)
list_del_init(&obj_priv->list);
else
@@ -126,6 +348,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
obj_priv->active = 0;
drm_gem_object_unreference(obj);
}
+ i915_verify_inactive(dev, __FILE__, __LINE__);
}
/**
@@ -142,6 +365,7 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_request *request;
uint32_t seqno;
+ int was_empty;
RING_LOCALS;
request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
@@ -169,11 +393,11 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
request->seqno = seqno;
request->emitted_jiffies = jiffies;
request->flush_domains = flush_domains;
- if (list_empty(&dev_priv->mm.request_list))
- mod_timer(&dev_priv->mm.retire_timer, jiffies + HZ);
-
+ was_empty = list_empty(&dev_priv->mm.request_list);
list_add_tail(&request->list, &dev_priv->mm.request_list);
+ if (was_empty)
+ schedule_delayed_work (&dev_priv->mm.retire_work, HZ);
return seqno;
}
@@ -194,7 +418,7 @@ i915_retire_commands(struct drm_device *dev)
/* The sampler always gets flushed on i965 (sigh) */
if (IS_I965G(dev))
- flush_domains |= DRM_GEM_DOMAIN_I915_SAMPLER;
+ flush_domains |= I915_GEM_DOMAIN_SAMPLER;
BEGIN_LP_RING(2);
OUT_RING(cmd);
OUT_RING(0); /* noop */
@@ -306,33 +530,24 @@ i915_gem_retire_requests(struct drm_device *dev)
list_del(&request->list);
drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
} else
- break;
+ break;
}
}
void
-i915_gem_retire_timeout(unsigned long data)
-{
- struct drm_device *dev = (struct drm_device *) data;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- schedule_work(&dev_priv->mm.retire_task);
-}
-
-void
-i915_gem_retire_handler(struct work_struct *work)
+i915_gem_retire_work_handler(struct work_struct *work)
{
struct drm_i915_private *dev_priv;
struct drm_device *dev;
dev_priv = container_of(work, struct drm_i915_private,
- mm.retire_task);
+ mm.retire_work.work);
dev = dev_priv->dev;
mutex_lock(&dev->struct_mutex);
i915_gem_retire_requests(dev);
if (!list_empty(&dev_priv->mm.request_list))
- mod_timer(&dev_priv->mm.retire_timer, jiffies + HZ);
+ schedule_delayed_work (&dev_priv->mm.retire_work, HZ);
mutex_unlock(&dev->struct_mutex);
}
@@ -356,8 +571,8 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
i915_user_irq_off(dev);
}
if (ret)
- DRM_ERROR ("%s returns %d (awaiting %d at %d)\n",
- __func__, ret, seqno, i915_get_gem_seqno(dev));
+ DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
+ __func__, ret, seqno, i915_get_gem_seqno(dev));
/* Directly dispatch request retiring. While we have the work queue
* to handle this, the waiter on a request often wants an associated
@@ -384,51 +599,51 @@ i915_gem_flush(struct drm_device *dev,
invalidate_domains, flush_domains);
#endif
- if (flush_domains & DRM_GEM_DOMAIN_CPU)
+ if (flush_domains & I915_GEM_DOMAIN_CPU)
drm_agp_chipset_flush(dev);
- if ((invalidate_domains|flush_domains) & ~DRM_GEM_DOMAIN_CPU) {
+ if ((invalidate_domains|flush_domains) & ~I915_GEM_DOMAIN_CPU) {
/*
* read/write caches:
*
- * DRM_GEM_DOMAIN_I915_RENDER is always invalidated, but is
+ * I915_GEM_DOMAIN_RENDER is always invalidated, but is
* only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
* also flushed at 2d versus 3d pipeline switches.
*
* read-only caches:
*
- * DRM_GEM_DOMAIN_I915_SAMPLER is flushed on pre-965 if
+ * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
* MI_READ_FLUSH is set, and is always flushed on 965.
*
- * DRM_GEM_DOMAIN_I915_COMMAND may not exist?
+ * I915_GEM_DOMAIN_COMMAND may not exist?
*
- * DRM_GEM_DOMAIN_I915_INSTRUCTION, which exists on 965, is
+ * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
* invalidated when MI_EXE_FLUSH is set.
*
- * DRM_GEM_DOMAIN_I915_VERTEX, which exists on 965, is
+ * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
* invalidated with every MI_FLUSH.
*
* TLBs:
*
- * On 965, TLBs associated with DRM_GEM_DOMAIN_I915_COMMAND
- * and DRM_GEM_DOMAIN_CPU in are invalidated at PTE write and
- * DRM_GEM_DOMAIN_I915_RENDER and DRM_GEM_DOMAIN_I915_SAMPLER
+ * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
+ * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
+ * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
* are flushed at any MI_FLUSH.
*/
cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
if ((invalidate_domains|flush_domains) &
- DRM_GEM_DOMAIN_I915_RENDER)
+ I915_GEM_DOMAIN_RENDER)
cmd &= ~MI_NO_WRITE_FLUSH;
if (!IS_I965G(dev)) {
/*
* On the 965, the sampler cache always gets flushed
* and this bit is reserved.
*/
- if (invalidate_domains & DRM_GEM_DOMAIN_I915_SAMPLER)
+ if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
cmd |= MI_READ_FLUSH;
}
- if (invalidate_domains & DRM_GEM_DOMAIN_I915_INSTRUCTION)
+ if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
cmd |= MI_EXE_FLUSH;
#if WATCH_EXEC
@@ -455,7 +670,7 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
/* If there are writes queued to the buffer, flush and
* create a new seqno to wait for.
*/
- if (obj->write_domain & ~(DRM_GEM_DOMAIN_CPU)) {
+ if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU)) {
uint32_t write_domain = obj->write_domain;
#if WATCH_BUF
DRM_INFO("%s: flushing object %p from write domain %08x\n",
@@ -494,6 +709,7 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
static int
i915_gem_object_unbind(struct drm_gem_object *obj)
{
+ struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int ret = 0;
@@ -504,16 +720,31 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
if (obj_priv->gtt_space == NULL)
return 0;
+ if (obj_priv->pin_count != 0) {
+ DRM_ERROR("Attempting to unbind pinned buffer\n");
+ return -EINVAL;
+ }
+
+ /* Wait for any rendering to complete
+ */
+ ret = i915_gem_object_wait_rendering(obj);
+ if (ret) {
+ DRM_ERROR ("wait_rendering failed: %d\n", ret);
+ return ret;
+ }
+
/* Move the object to the CPU domain to ensure that
* any possible CPU writes while it's not in the GTT
* are flushed when we go to remap it. This will
* also ensure that all pending GPU writes are finished
* before we unbind.
*/
- ret = i915_gem_object_set_domain (obj, DRM_GEM_DOMAIN_CPU,
- DRM_GEM_DOMAIN_CPU);
- if (ret)
+ ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU,
+ I915_GEM_DOMAIN_CPU);
+ if (ret) {
+ DRM_ERROR("set_domain failed: %d\n", ret);
return ret;
+ }
if (obj_priv->agp_mem != NULL) {
drm_unbind_agp(obj_priv->agp_mem);
@@ -521,21 +752,20 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
obj_priv->agp_mem = NULL;
}
+ BUG_ON(obj_priv->active);
+
i915_gem_object_free_page_list(obj);
+ atomic_dec(&dev->gtt_count);
+ atomic_sub(obj->size, &dev->gtt_memory);
+
drm_memrange_put_block(obj_priv->gtt_space);
obj_priv->gtt_space = NULL;
/* Remove ourselves from the LRU list if present. */
- if (!list_empty(&obj_priv->list)) {
+ if (!list_empty(&obj_priv->list))
list_del_init(&obj_priv->list);
- if (obj_priv->active) {
- DRM_ERROR("Failed to wait on buffer when unbinding, "
- "continued anyway.\n");
- obj_priv->active = 0;
- drm_gem_object_unreference(obj);
- }
- }
+
return 0;
}
@@ -622,7 +852,7 @@ i915_gem_evict_something(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
- int ret;
+ int ret = 0;
for (;;) {
/* If there's an inactive buffer available now, grab it
@@ -634,6 +864,13 @@ i915_gem_evict_something(struct drm_device *dev)
list);
obj = obj_priv->obj;
BUG_ON(obj_priv->pin_count != 0);
+#if WATCH_LRU
+ DRM_INFO("%s: evicting %p\n", __func__, obj);
+#endif
+ BUG_ON(obj_priv->active);
+
+ /* Wait on the rendering and unbind the buffer. */
+ ret = i915_gem_object_unbind(obj);
break;
}
@@ -643,17 +880,21 @@ i915_gem_evict_something(struct drm_device *dev)
*/
if (!list_empty(&dev_priv->mm.request_list)) {
struct drm_i915_gem_request *request;
- int ret;
request = list_first_entry(&dev_priv->mm.request_list,
struct drm_i915_gem_request,
list);
ret = i915_wait_request(dev, request->seqno);
- if (ret != 0)
- return ret;
- continue;
+ /* if waiting caused an object to become inactive,
+ * then loop around and wait for it. Otherwise, we
+ * assume that waiting freed and unbound something,
+ * so there should now be some space in the GTT
+ */
+ if (!list_empty(&dev_priv->mm.inactive_list))
+ continue;
+ break;
}
/* If we didn't have anything on the request list but there
@@ -676,21 +917,15 @@ i915_gem_evict_something(struct drm_device *dev)
continue;
}
+ DRM_ERROR("inactive empty %d request empty %d flushing empty %d\n",
+ list_empty(&dev_priv->mm.inactive_list),
+ list_empty(&dev_priv->mm.request_list),
+ list_empty(&dev_priv->mm.flushing_list));
/* If we didn't do any of the above, there's nothing to be done
* and we just can't fit it in.
*/
return -ENOMEM;
}
-
-#if WATCH_LRU
- DRM_INFO("%s: evicting %p\n", __func__, obj);
-#endif
-
- BUG_ON(obj_priv->active);
-
- /* Wait on the rendering and unbind the buffer. */
- ret = i915_gem_object_unbind(obj);
-
return ret;
}
@@ -776,7 +1011,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
ret = i915_gem_evict_something(dev);
if (ret != 0) {
- DRM_ERROR("Failed to evict a buffer\n");
+ DRM_ERROR("Failed to evict a buffer %d\n", ret);
return ret;
}
goto search_free;
@@ -807,13 +1042,15 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
obj_priv->gtt_space = NULL;
return -ENOMEM;
}
+ atomic_inc(&dev->gtt_count);
+ atomic_add(obj->size, &dev->gtt_memory);
/* Assert that the object is not currently in any GPU domain. As it
* wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache
*/
- BUG_ON(obj->read_domains & ~DRM_GEM_DOMAIN_CPU);
- BUG_ON(obj->write_domain & ~DRM_GEM_DOMAIN_CPU);
+ BUG_ON(obj->read_domains & ~I915_GEM_DOMAIN_CPU);
+ BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU);
return 0;
}
@@ -980,7 +1217,7 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
* stale data. That is, any new read domains.
*/
invalidate_domains |= read_domains & ~obj->read_domains;
- if ((flush_domains | invalidate_domains) & DRM_GEM_DOMAIN_CPU) {
+ if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
#if WATCH_BUF
DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
__func__, flush_domains, invalidate_domains);
@@ -990,8 +1227,8 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
* then pause for rendering so that the GPU caches will be
* flushed before the cpu cache is invalidated
*/
- if ((invalidate_domains & DRM_GEM_DOMAIN_CPU) &&
- (flush_domains & ~DRM_GEM_DOMAIN_CPU)) {
+ if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
+ (flush_domains & ~I915_GEM_DOMAIN_CPU)) {
ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
@@ -1051,7 +1288,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
int bad_count = 0;
DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n",
- __FUNCTION__, obj, obj_priv->gtt_offset, handle,
+ __func__, obj, obj_priv->gtt_offset, handle,
obj->size / 1024);
gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
@@ -1110,29 +1347,25 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
#endif
/**
- * Bind an object to the GTT and evaluate the relocations landing in it
- *
- *
+ * Pin an object to the GTT and evaluate the relocations landing in it.
*/
static int
-i915_gem_object_bind_and_relocate(struct drm_gem_object *obj,
- struct drm_file *file_priv,
- struct drm_i915_gem_exec_object *entry)
+i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
+ struct drm_file *file_priv,
+ struct drm_i915_gem_exec_object *entry)
{
struct drm_device *dev = obj->dev;
struct drm_i915_gem_relocation_entry reloc;
struct drm_i915_gem_relocation_entry __user *relocs;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int i;
+ int i, ret;
uint32_t last_reloc_offset = -1;
void *reloc_page = NULL;
/* Choose the GTT offset for our buffer and put it there. */
- if (obj_priv->gtt_space == NULL) {
- i915_gem_object_bind_to_gtt(obj, (unsigned) entry->alignment);
- if (obj_priv->gtt_space == NULL)
- return -ENOMEM;
- }
+ ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
+ if (ret)
+ return ret;
entry->offset = obj_priv->gtt_offset;
@@ -1148,13 +1381,17 @@ i915_gem_object_bind_and_relocate(struct drm_gem_object *obj,
int ret;
ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
- if (ret != 0)
+ if (ret != 0) {
+ i915_gem_object_unpin(obj);
return ret;
+ }
target_obj = drm_gem_object_lookup(obj->dev, file_priv,
reloc.target_handle);
- if (target_obj == NULL)
+ if (target_obj == NULL) {
+ i915_gem_object_unpin(obj);
return -EINVAL;
+ }
target_obj_priv = target_obj->driver_private;
/* The target buffer should have appeared before us in the
@@ -1164,6 +1401,7 @@ i915_gem_object_bind_and_relocate(struct drm_gem_object *obj,
DRM_ERROR("No GTT space found for object %d\n",
reloc.target_handle);
drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
return -EINVAL;
}
@@ -1173,6 +1411,7 @@ i915_gem_object_bind_and_relocate(struct drm_gem_object *obj,
obj, reloc.target_handle,
(int) reloc.offset, (int) obj->size);
drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
return -EINVAL;
}
if (reloc.offset & 3) {
@@ -1181,6 +1420,7 @@ i915_gem_object_bind_and_relocate(struct drm_gem_object *obj,
obj, reloc.target_handle,
(int) reloc.offset);
drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
return -EINVAL;
}
@@ -1194,6 +1434,7 @@ i915_gem_object_bind_and_relocate(struct drm_gem_object *obj,
reloc.write_domain,
target_obj->pending_write_domain);
drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
return -EINVAL;
}
@@ -1232,7 +1473,7 @@ i915_gem_object_bind_and_relocate(struct drm_gem_object *obj,
/* As we're writing through the gtt, flush
* any CPU writes before we write the relocations
*/
- if (obj->write_domain & DRM_GEM_DOMAIN_CPU) {
+ if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
i915_gem_clflush_object(obj);
drm_agp_chipset_flush(dev);
obj->write_domain = 0;
@@ -1254,6 +1495,7 @@ i915_gem_object_bind_and_relocate(struct drm_gem_object *obj,
last_reloc_offset = reloc_offset;
if (reloc_page == NULL) {
drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
return -ENOMEM;
}
}
@@ -1276,6 +1518,7 @@ i915_gem_object_bind_and_relocate(struct drm_gem_object *obj,
ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
if (ret != 0) {
drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
return ret;
}
@@ -1362,32 +1605,17 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
* relatively low latency when blocking on a particular request to finish.
*/
static int
-i915_gem_ring_throttle(struct drm_device *dev)
+i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
int ret = 0;
+ uint32_t seqno;
mutex_lock(&dev->struct_mutex);
- while (!list_empty(&dev_priv->mm.request_list)) {
- struct drm_i915_gem_request *request;
-
- request = list_first_entry(&dev_priv->mm.request_list,
- struct drm_i915_gem_request,
- list);
-
- /* Break out if we're close enough. */
- if ((long) (jiffies - request->emitted_jiffies) <= (20 * HZ) / 1000) {
- mutex_unlock(&dev->struct_mutex);
- return 0;
- }
-
- /* Wait on the last request if not. */
- ret = i915_wait_request(dev, request->seqno);
- if (ret != 0) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
- }
+ seqno = i915_file_priv->mm.last_gem_throttle_seqno;
+ i915_file_priv->mm.last_gem_throttle_seqno = i915_file_priv->mm.last_gem_seqno;
+ if (seqno)
+ ret = i915_wait_request(dev, seqno);
mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -1397,21 +1625,19 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
struct drm_i915_gem_execbuffer *args = data;
struct drm_i915_gem_exec_object *exec_list = NULL;
struct drm_gem_object **object_list = NULL;
struct drm_gem_object *batch_obj;
- int ret, i;
+ int ret, i, pinned = 0;
uint64_t exec_offset;
uint32_t seqno, flush_domains;
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
#if WATCH_EXEC
DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
(int) args->buffers_ptr, args->buffer_count, args->batch_len);
#endif
- i915_kernel_lost_context(dev);
/* Copy in the exec list from userland */
exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
@@ -1437,6 +1663,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
+ i915_verify_inactive(dev, __FILE__, __LINE__);
if (dev_priv->mm.suspended) {
DRM_ERROR("Execbuf while VT-switched.\n");
mutex_unlock(&dev->struct_mutex);
@@ -1463,20 +1690,23 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
object_list[i]->pending_read_domains = 0;
object_list[i]->pending_write_domain = 0;
- ret = i915_gem_object_bind_and_relocate(object_list[i],
- file_priv,
- &exec_list[i]);
+ ret = i915_gem_object_pin_and_relocate(object_list[i],
+ file_priv,
+ &exec_list[i]);
if (ret) {
DRM_ERROR("object bind and relocate failed %d\n", ret);
goto err;
}
+ pinned = i + 1;
}
/* Set the pending read domains for the batch buffer to COMMAND */
batch_obj = object_list[args->buffer_count-1];
- batch_obj->pending_read_domains = DRM_GEM_DOMAIN_I915_COMMAND;
+ batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
batch_obj->pending_write_domain = 0;
+ i915_verify_inactive(dev, __FILE__, __LINE__);
+
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
struct drm_i915_gem_object *obj_priv = obj->driver_private;
@@ -1499,9 +1729,13 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
goto err;
}
+ i915_verify_inactive(dev, __FILE__, __LINE__);
+
/* Flush/invalidate caches and chipset buffer */
flush_domains = i915_gem_dev_set_domain(dev);
+ i915_verify_inactive(dev, __FILE__, __LINE__);
+
#if WATCH_COHERENCY
for (i = 0; i < args->buffer_count; i++) {
i915_gem_object_check_coherency(object_list[i],
@@ -1531,6 +1765,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
*/
flush_domains |= i915_retire_commands(dev);
+ i915_verify_inactive(dev, __FILE__, __LINE__);
+
/*
* Get a seqno representing the execution of the current buffer,
* which we can wait on. We would like to mitigate these interrupts,
@@ -1540,6 +1776,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
*/
seqno = i915_add_request(dev, flush_domains);
BUG_ON(seqno == 0);
+ i915_file_priv->mm.last_gem_seqno = seqno;
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
struct drm_i915_gem_object *obj_priv = obj->driver_private;
@@ -1554,6 +1791,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
i915_dump_lru(dev, __func__);
#endif
+ i915_verify_inactive(dev, __FILE__, __LINE__);
+
/* Copy the new buffer offsets back to the user's exec list. */
ret = copy_to_user((struct drm_i915_relocation_entry __user *)
(uintptr_t) args->buffers_ptr,
@@ -1565,6 +1804,9 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
args->buffer_count, ret);
err:
if (object_list != NULL) {
+ for (i = 0; i < pinned; i++)
+ i915_gem_object_unpin(object_list[i]);
+
for (i = 0; i < args->buffer_count; i++)
drm_gem_object_unreference(object_list[i]);
}
@@ -1586,28 +1828,55 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int ret;
+ i915_verify_inactive(dev, __FILE__, __LINE__);
if (obj_priv->gtt_space == NULL) {
ret = i915_gem_object_bind_to_gtt(obj, alignment);
if (ret != 0) {
- DRM_ERROR("Failure to bind in "
- "i915_gem_pin_ioctl(): %d\n",
- ret);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ DRM_ERROR("Failure to bind: %d", ret);
return ret;
}
}
-
obj_priv->pin_count++;
+
+ /* If the object is not active and not pending a flush,
+ * remove it from the inactive list
+ */
+ if (obj_priv->pin_count == 1) {
+ atomic_inc(&dev->pin_count);
+ atomic_add(obj->size, &dev->pin_memory);
+ if (!obj_priv->active && (obj->write_domain & ~I915_GEM_DOMAIN_CPU) == 0 &&
+ !list_empty(&obj_priv->list))
+ list_del_init(&obj_priv->list);
+ }
+ i915_verify_inactive(dev, __FILE__, __LINE__);
+
return 0;
}
void
i915_gem_object_unpin(struct drm_gem_object *obj)
{
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ i915_verify_inactive(dev, __FILE__, __LINE__);
obj_priv->pin_count--;
+ BUG_ON(obj_priv->pin_count < 0);
+ BUG_ON(obj_priv->gtt_space == NULL);
+
+ /* If the object is no longer pinned, and is
+ * neither active nor being flushed, then stick it on
+ * the inactive list
+ */
+ if (obj_priv->pin_count == 0) {
+ if (!obj_priv->active && (obj->write_domain & ~I915_GEM_DOMAIN_CPU) == 0)
+ list_move_tail(&obj_priv->list,
+ &dev_priv->mm.inactive_list);
+ atomic_dec(&dev->pin_count);
+ atomic_sub(obj->size, &dev->pin_memory);
+ }
+ i915_verify_inactive(dev, __FILE__, __LINE__);
}
int
@@ -1621,7 +1890,6 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
- i915_kernel_lost_context(dev);
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL) {
DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
@@ -1654,7 +1922,6 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
- i915_kernel_lost_context(dev);
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL) {
DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
@@ -1689,7 +1956,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
obj_priv = obj->driver_private;
args->busy = obj_priv->active;
-
+
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -1699,7 +1966,7 @@ int
i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- return i915_gem_ring_throttle(dev);
+ return i915_gem_ring_throttle(dev, file_priv);
}
int i915_gem_init_object(struct drm_gem_object *obj)
@@ -1710,6 +1977,15 @@ int i915_gem_init_object(struct drm_gem_object *obj)
if (obj_priv == NULL)
return -ENOMEM;
+ /*
+ * We've just allocated pages from the kernel,
+ * so they've just been written by the CPU with
+ * zeros. They'll need to be clflushed before we
+ * use them with the GPU.
+ */
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
+
obj->driver_private = obj_priv;
obj_priv->obj = obj;
INIT_LIST_HEAD(&obj_priv->list);
@@ -1718,7 +1994,6 @@ int i915_gem_init_object(struct drm_gem_object *obj)
void i915_gem_free_object(struct drm_gem_object *obj)
{
- i915_kernel_lost_context(obj->dev);
i915_gem_object_unbind(obj);
drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
@@ -1735,41 +2010,11 @@ i915_gem_set_domain(struct drm_gem_object *obj,
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
- drm_client_lock_take(dev, file_priv);
- i915_kernel_lost_context(dev);
ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
- if (ret) {
- drm_client_lock_release(dev, file_priv);
+ if (ret)
return ret;
- }
i915_gem_dev_set_domain(obj->dev);
- drm_client_lock_release(dev, file_priv);
- return 0;
-}
-
-int
-i915_gem_flush_pwrite(struct drm_gem_object *obj,
- uint64_t offset, uint64_t size)
-{
-#if 0
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
-
- /*
- * For writes much less than the size of the object and
- * which are already pinned in memory, do the flush right now
- */
-
- if ((size < obj->size >> 1) && obj_priv->page_list != NULL) {
- unsigned long first_page = offset / PAGE_SIZE;
- unsigned long beyond_page = roundup(offset + size, PAGE_SIZE) / PAGE_SIZE;
- drm_ttm_cache_flush(obj_priv->page_list + first_page,
- beyond_page - first_page);
- drm_agp_chipset_flush(dev);
- obj->write_domain = 0;
- }
-#endif
return 0;
}
@@ -1817,8 +2062,10 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
obj_priv = obj->driver_private;
ret = i915_gem_object_pin(obj, 4096);
- if (ret != 0)
+ if (ret != 0) {
+ drm_gem_object_unreference(obj);
return ret;
+ }
/* Set up the kernel mapping for the ring. */
dev_priv->ring.Size = obj->size;
@@ -1886,6 +2133,10 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
return ret;
mutex_lock(&dev->struct_mutex);
+ BUG_ON(!list_empty(&dev_priv->mm.active_list));
+ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+ BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
+ BUG_ON(!list_empty(&dev_priv->mm.request_list));
dev_priv->mm.suspended = 0;
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -1929,6 +2180,8 @@ i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t seqno;
+ int ret;
mutex_lock(&dev->struct_mutex);
/* Hack! Don't let anybody do execbuf while we don't control the chip.
@@ -1936,32 +2189,40 @@ i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
*/
dev_priv->mm.suspended = 1;
- /* Move all buffers out of the GTT. */
- i915_gem_evict_from_list(dev, &dev_priv->mm.active_list);
- i915_gem_evict_from_list(dev, &dev_priv->mm.flushing_list);
- i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
+ i915_kernel_lost_context(dev);
- /* Make sure the harware's idle. */
- while (!list_empty(&dev_priv->mm.request_list)) {
- struct drm_i915_gem_request *request;
- int ret;
+ /* Flush the GPU along with all non-CPU write domains
+ */
+ i915_gem_flush(dev, ~I915_GEM_DOMAIN_CPU, ~I915_GEM_DOMAIN_CPU);
+ seqno = i915_add_request(dev, ~I915_GEM_DOMAIN_CPU);
+ if (seqno == 0) {
+ mutex_unlock(&dev->struct_mutex);
+ return -ENOMEM;
+ }
+ ret = i915_wait_request(dev, seqno);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
- request = list_first_entry(&dev_priv->mm.request_list,
- struct drm_i915_gem_request,
- list);
+ /* Active and flushing should now be empty as we've
+ * waited for a sequence higher than any pending execbuffer
+ */
+ BUG_ON(!list_empty(&dev_priv->mm.active_list));
+ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
- ret = i915_wait_request(dev, request->seqno);
- if (ret != 0) {
- DRM_ERROR("Error waiting for idle at LeaveVT: %d\n",
- ret);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
- }
+ /* Request should now be empty as we've also waited
+ * for the last request in the list
+ */
+ BUG_ON(!list_empty(&dev_priv->mm.request_list));
+
+ /* Move all buffers out of the GTT. */
+ i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
BUG_ON(!list_empty(&dev_priv->mm.active_list));
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
+ BUG_ON(!list_empty(&dev_priv->mm.request_list));
i915_gem_cleanup_ringbuffer(dev);
diff --git a/shared-core/drm.h b/shared-core/drm.h
index 59cbbdd9..2d0f1f4d 100644
--- a/shared-core/drm.h
+++ b/shared-core/drm.h
@@ -993,84 +993,6 @@ struct drm_mm_info_arg {
uint64_t p_size;
};
-struct drm_gem_create {
- /**
- * Requested size for the object.
- *
- * The (page-aligned) allocated size for the object will be returned.
- */
- uint64_t size;
- /**
- * Returned handle for the object.
- *
- * Object handles are nonzero.
- */
- uint32_t handle;
- uint32_t pad;
-};
-
-struct drm_gem_close {
- /** Handle of the object to be closed. */
- uint32_t handle;
- uint32_t pad;
-};
-
-struct drm_gem_pread {
- /** Handle for the object being read. */
- uint32_t handle;
- uint32_t pad;
- /** Offset into the object to read from */
- uint64_t offset;
- /** Length of data to read */
- uint64_t size;
- /** Pointer to write the data into. */
- uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */
-};
-
-struct drm_gem_pwrite {
- /** Handle for the object being written to. */
- uint32_t handle;
- uint32_t pad;
- /** Offset into the object to write to */
- uint64_t offset;
- /** Length of data to write */
- uint64_t size;
- /** Pointer to read the data from. */
- uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */
-};
-
-struct drm_gem_mmap {
- /** Handle for the object being mapped. */
- uint32_t handle;
- uint32_t pad;
- /** Offset in the object to map. */
- uint64_t offset;
- /**
- * Length of data to map.
- *
- * The value will be page-aligned.
- */
- uint64_t size;
- /** Returned pointer the data was mapped at */
- uint64_t addr_ptr; /* void *, but pointers are not 32/64 compatible */
-};
-
-struct drm_gem_flink {
- /** Handle for the object being named */
- uint32_t handle;
- /** Returned global name */
- uint32_t name;
-};
-
-struct drm_gem_open {
- /** Name of object being opened */
- uint32_t name;
- /** Returned handle for the object */
- uint32_t handle;
- /** Returned size of the object */
- uint64_t size;
-};
-
struct drm_gem_set_domain {
/** Handle for the object */
uint32_t handle;
@@ -1298,6 +1220,10 @@ struct drm_mode_crtc_lut {
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
+#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
+#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
+#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
+
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block)
@@ -1348,15 +1274,6 @@ struct drm_mode_crtc_lut {
#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
-#define DRM_IOCTL_GEM_CREATE DRM_IOWR(0x09, struct drm_gem_create)
-#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x0a, struct drm_gem_close)
-#define DRM_IOCTL_GEM_PREAD DRM_IOW (0x0b, struct drm_gem_pread)
-#define DRM_IOCTL_GEM_PWRITE DRM_IOW (0x0c, struct drm_gem_pwrite)
-#define DRM_IOCTL_GEM_MMAP DRM_IOWR(0x0d, struct drm_gem_mmap)
-#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0e, struct drm_gem_flink)
-#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0f, struct drm_gem_open)
-#define DRM_IOCTL_GEM_SET_DOMAIN DRM_IOW (0xb7, struct drm_gem_set_domain)
-
#define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, struct drm_mm_init_arg)
#define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, struct drm_mm_type_arg)
#define DRM_IOCTL_MM_LOCK DRM_IOWR(0xc2, struct drm_mm_type_arg)
diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c
index 0cd920de..5a94c156 100644
--- a/shared-core/i915_dma.c
+++ b/shared-core/i915_dma.c
@@ -121,7 +121,7 @@ int i915_dma_cleanup(struct drm_device * dev)
* may not have been called from userspace and after dev_private
* is freed, it's too late.
*/
- if (dev->irq)
+ if (dev->irq_enabled)
drm_irq_uninstall(dev);
if (dev_priv->ring.virtual_start) {
@@ -885,7 +885,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
switch (param->param) {
case I915_PARAM_IRQ_ACTIVE:
- value = dev->irq ? 1 : 0;
+ value = dev->irq_enabled ? 1 : 0;
break;
case I915_PARAM_ALLOW_BATCHBUFFER:
value = dev_priv->allow_batchbuffer ? 1 : 0;
@@ -1053,9 +1053,8 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
memset(dev_priv->hws_vaddr, 0, PAGE_SIZE);
I915_WRITE(HWS_PGA, dev_priv->hws_agpoffset);
- DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
- dev_priv->hws_agpoffset);
DRM_DEBUG("load hws at %p\n", dev_priv->hws_vaddr);
+
return 0;
}
@@ -1089,6 +1088,11 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/shared-core/i915_drm.h b/shared-core/i915_drm.h
index 445bb279..d9ce2484 100644
--- a/shared-core/i915_drm.h
+++ b/shared-core/i915_drm.h
@@ -183,7 +183,12 @@ typedef struct drm_i915_sarea {
#define DRM_I915_GEM_BUSY 0x17
#define DRM_I915_GEM_THROTTLE 0x18
#define DRM_I915_GEM_ENTERVT 0x19
-#define DRM_I915_GEM_LEAVEVT 0x20
+#define DRM_I915_GEM_LEAVEVT 0x1a
+#define DRM_I915_GEM_CREATE 0x1b
+#define DRM_I915_GEM_PREAD 0x1c
+#define DRM_I915_GEM_PWRITE 0x1d
+#define DRM_I915_GEM_MMAP 0x1e
+#define DRM_I915_GEM_SET_DOMAIN 0x1f
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -211,6 +216,11 @@ typedef struct drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
+#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
+#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
+#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
+#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
+#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
/* Asynchronous page flipping:
*/
@@ -428,6 +438,73 @@ struct drm_i915_gem_init {
uint64_t gtt_end;
};
+struct drm_i915_gem_create {
+ /**
+ * Requested size for the object.
+ *
+ * The (page-aligned) allocated size for the object will be returned.
+ */
+ uint64_t size;
+ /**
+ * Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ uint32_t handle;
+ uint32_t pad;
+};
+
+struct drm_i915_gem_pread {
+ /** Handle for the object being read. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset into the object to read from */
+ uint64_t offset;
+ /** Length of data to read */
+ uint64_t size;
+ /** Pointer to write the data into. */
+ uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */
+};
+
+struct drm_i915_gem_pwrite {
+ /** Handle for the object being written to. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset into the object to write to */
+ uint64_t offset;
+ /** Length of data to write */
+ uint64_t size;
+ /** Pointer to read the data from. */
+ uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */
+};
+
+struct drm_i915_gem_mmap {
+ /** Handle for the object being mapped. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset in the object to map. */
+ uint64_t offset;
+ /**
+ * Length of data to map.
+ *
+ * The value will be page-aligned.
+ */
+ uint64_t size;
+ /** Returned pointer the data was mapped at */
+ uint64_t addr_ptr; /* void *, but pointers are not 32/64 compatible */
+};
+
+struct drm_i915_gem_set_domain {
+ /** Handle for the object */
+ uint32_t handle;
+
+ /** New read domains */
+ uint32_t read_domains;
+
+ /** New write domain */
+ uint32_t write_domain;
+};
+
struct drm_i915_gem_relocation_entry {
/**
* Handle of the buffer being pointed to by this relocation entry.
@@ -473,20 +550,26 @@ struct drm_i915_gem_relocation_entry {
uint32_t write_domain;
};
-/**
+/** @{
* Intel memory domains
*
* Most of these just align with the various caches in
* the system and are used to flush and invalidate as
* objects end up cached in different domains.
*/
-
-/* 0x00000001 is DRM_GEM_DOMAIN_CPU */
-#define DRM_GEM_DOMAIN_I915_RENDER 0x00000002 /* Render cache, used by 2D and 3D drawing */
-#define DRM_GEM_DOMAIN_I915_SAMPLER 0x00000004 /* Sampler cache, used by texture engine */
-#define DRM_GEM_DOMAIN_I915_COMMAND 0x00000008 /* Command queue, used to load batch buffers */
-#define DRM_GEM_DOMAIN_I915_INSTRUCTION 0x00000010 /* Instruction cache, used by shader programs */
-#define DRM_GEM_DOMAIN_I915_VERTEX 0x00000020 /* Vertex address cache */
+/** CPU cache */
+#define I915_GEM_DOMAIN_CPU 0x00000001
+/** Render cache, used by 2D and 3D drawing */
+#define I915_GEM_DOMAIN_RENDER 0x00000002
+/** Sampler cache, used by texture engine */
+#define I915_GEM_DOMAIN_SAMPLER 0x00000004
+/** Command queue, used to load batch buffers */
+#define I915_GEM_DOMAIN_COMMAND 0x00000008
+/** Instruction cache, used by shader programs */
+#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
+/** Vertex address cache */
+#define I915_GEM_DOMAIN_VERTEX 0x00000020
+/** @} */
struct drm_i915_gem_exec_object {
/**
@@ -494,11 +577,15 @@ struct drm_i915_gem_exec_object {
* operation.
*/
uint32_t handle;
-
- /** List of relocations to be performed on this buffer */
+
+ /** Number of relocations to be performed on this buffer */
uint32_t relocation_count;
- uint64_t relocs_ptr; /* struct drm_i915_gem_relocation_entry *relocs */
-
+ /**
+ * Pointer to array of struct drm_i915_gem_relocation_entry containing
+ * the relocations to be performed in this buffer.
+ */
+ uint64_t relocs_ptr;
+
/** Required alignment in graphics aperture */
uint64_t alignment;
@@ -514,11 +601,13 @@ struct drm_i915_gem_execbuffer {
* List of buffers to be validated with their relocations to be
* performend on them.
*
+ * This is a pointer to an array of struct drm_i915_gem_validate_entry.
+ *
* These buffers must be listed in an order such that all relocations
* a buffer is performing refer to buffers that have already appeared
* in the validate list.
*/
- uint64_t buffers_ptr; /* struct drm_i915_gem_validate_entry *buffers */
+ uint64_t buffers_ptr;
uint32_t buffer_count;
/** Offset in the batchbuffer to start execution from. */
@@ -535,7 +624,7 @@ struct drm_i915_gem_pin {
/** Handle of the buffer to be pinned. */
uint32_t handle;
uint32_t pad;
-
+
/** alignment required within the aperture */
uint64_t alignment;
@@ -552,7 +641,7 @@ struct drm_i915_gem_unpin {
struct drm_i915_gem_busy {
/** Handle of the buffer to check for busy */
uint32_t handle;
-
+
/** Return busy status (1 if busy, 0 if idle) */
uint32_t busy;
};
diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h
index 06aa00ab..ee5e9dfd 100644
--- a/shared-core/i915_drv.h
+++ b/shared-core/i915_drv.h
@@ -39,7 +39,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20080312"
+#define DRIVER_DATE "20080611"
#if defined(__linux__)
#define I915_HAVE_FENCE
@@ -63,7 +63,7 @@
*/
#define DRIVER_MAJOR 1
#if defined(I915_HAVE_FENCE) && defined(I915_HAVE_BUFFER)
-#define DRIVER_MINOR 13
+#define DRIVER_MINOR 14
#else
#define DRIVER_MINOR 6
#endif
@@ -162,7 +162,7 @@ struct drm_i915_private {
void *agp_iomap;
unsigned int max_validate_buffers;
struct mutex cmdbuf_mutex;
- size_t stolen_base;
+ u32 stolen_base;
struct drm_i915_validate_buffer *val_bufs;
#endif
@@ -231,8 +231,7 @@ struct drm_i915_private {
* fire periodically while the ring is running. When it
* fires, go retire requests.
*/
- struct timer_list retire_timer;
- struct work_struct retire_task;
+ struct delayed_work retire_work;
uint32_t next_gem_seqno;
@@ -339,6 +338,13 @@ struct drm_i915_private {
u8 saveCR[37];
};
+struct drm_i915_file_private {
+ struct {
+ uint32_t last_gem_seqno;
+ uint32_t last_gem_throttle_seqno;
+ } mm;
+};
+
enum intel_chip_family {
CHIP_I8XX = 0x01,
CHIP_I9XX = 0x02,
@@ -418,8 +424,11 @@ extern void i915_kernel_lost_context(struct drm_device * dev);
extern int i915_driver_load(struct drm_device *, unsigned long flags);
extern int i915_driver_unload(struct drm_device *dev);
extern void i915_driver_lastclose(struct drm_device * dev);
+extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
extern void i915_driver_preclose(struct drm_device *dev,
struct drm_file *file_priv);
+extern void i915_driver_postclose(struct drm_device *dev,
+ struct drm_file *file_priv);
extern int i915_driver_device_is_agp(struct drm_device * dev);
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
@@ -461,7 +470,6 @@ extern int i915_vblank_swap(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern void i915_user_irq_on(struct drm_device *dev);
extern void i915_user_irq_off(struct drm_device *dev);
-extern void i915_user_interrupt_handler(struct work_struct *work);
/* i915_mem.c */
extern int i915_mem_alloc(struct drm_device *dev, void *data,
@@ -503,6 +511,16 @@ int i915_execbuffer(struct drm_device *dev, void *data,
/* i915_gem.c */
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int i915_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
int i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
@@ -521,20 +539,13 @@ int i915_gem_init_object(struct drm_gem_object *obj);
void i915_gem_free_object(struct drm_gem_object *obj);
int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
void i915_gem_object_unpin(struct drm_gem_object *obj);
-int i915_gem_set_domain(struct drm_gem_object *obj,
- struct drm_file *file_priv,
- uint32_t read_domains,
- uint32_t write_domain);
-int i915_gem_flush_pwrite(struct drm_gem_object *obj,
- uint64_t offset, uint64_t size);
void i915_gem_lastclose(struct drm_device *dev);
void i915_gem_retire_requests(struct drm_device *dev);
-void i915_gem_retire_timeout(unsigned long data);
-void i915_gem_retire_handler(struct work_struct *work);
int i915_gem_init_ringbuffer(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
unsigned long end);
+void i915_gem_retire_work_handler(struct work_struct *work);
#endif
extern unsigned int i915_fbpercrtc;
diff --git a/shared-core/i915_init.c b/shared-core/i915_init.c
index 3f310770..3a652e8d 100644
--- a/shared-core/i915_init.c
+++ b/shared-core/i915_init.c
@@ -294,10 +294,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (IS_I965G(dev) || IS_G33(dev))
dev_priv->cursor_needs_physical = false;
- if (IS_I9XX(dev)) {
+ if (IS_I9XX(dev))
pci_read_config_dword(dev->pdev, 0x5C, &dev_priv->stolen_base);
- DRM_DEBUG("stolen base %p\n", (void*)dev_priv->stolen_base);
- }
if (IS_I9XX(dev)) {
dev_priv->mmiobase = drm_get_resource_start(dev, 0);
@@ -329,13 +327,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
INIT_LIST_HEAD(&dev_priv->mm.request_list);
- dev_priv->mm.retire_timer.function = i915_gem_retire_timeout;
- dev_priv->mm.retire_timer.data = (unsigned long) dev;
- init_timer_deferrable (&dev_priv->mm.retire_timer);
- INIT_WORK(&dev_priv->mm.retire_task,
- i915_gem_retire_handler);
- INIT_WORK(&dev_priv->user_interrupt_task,
- i915_user_interrupt_handler);
+ INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
+ i915_gem_retire_work_handler);
dev_priv->mm.next_gem_seqno = 1;
#ifdef __linux__
@@ -457,6 +450,32 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
master->driver_priv = NULL;
}
+int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
+{
+ struct drm_i915_file_private *i915_file_priv;
+
+ DRM_DEBUG("\n");
+ i915_file_priv = (struct drm_i915_file_private *)
+ drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES);
+
+ if (!i915_file_priv)
+ return -ENOMEM;
+
+ file_priv->driver_priv = i915_file_priv;
+
+ i915_file_priv->mm.last_gem_seqno = 0;
+ i915_file_priv->mm.last_gem_throttle_seqno = 0;
+
+ return 0;
+}
+
+void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
+{
+ struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+
+ drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES);
+}
+
void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c
index bd11d37a..0dea6e56 100644
--- a/shared-core/i915_irq.c
+++ b/shared-core/i915_irq.c
@@ -35,6 +35,13 @@
#define MAX_NOPID ((u32)~0)
+/*
+ * These are the interrupts used by the driver
+ */
+#define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
+
/**
* i915_get_pipe - return the the pipe associated with a given plane
* @dev: DRM device
@@ -493,28 +500,13 @@ static int i915_run_hotplug_tasklet(struct drm_device *dev, uint32_t stat)
return 0;
}
-void
-i915_user_interrupt_handler(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv;
- struct drm_device *dev;
-
- dev_priv = container_of(work, struct drm_i915_private,
- user_interrupt_task);
- dev = dev_priv->dev;
-
- mutex_lock(&dev->struct_mutex);
- i915_gem_retire_requests(dev);
- mutex_unlock(&dev->struct_mutex);
-}
-
irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
struct drm_i915_master_private *master_priv;
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
u32 iir;
- u32 pipea_stats = 0, pipeb_stats, tvdac;
+ u32 pipea_stats = 0, pipeb_stats = 0, tvdac;
int hotplug = 0;
int vblank = 0;
@@ -524,22 +516,11 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
else
iir = I915_READ16(IIR);
- iir &= (dev_priv->irq_mask_reg | I915_USER_INTERRUPT);
+ if (dev->pdev->msi_enabled)
+ I915_WRITE(IER, 0);
-#if 0
- DRM_DEBUG("flag=%08x\n", iir);
-#endif
- if (iir == 0) {
-#if 0
- DRM_DEBUG ("iir 0x%08x im 0x%08x ie 0x%08x pipea 0x%08x pipeb 0x%08x\n",
- iir,
- I915_READ(IMR),
- I915_READ(IER),
- I915_READ(PIPEASTAT),
- I915_READ(PIPEBSTAT));
-#endif
+ if (!iir)
return IRQ_NONE;
- }
/*
* Clear the PIPE(A|B)STAT regs before the IIR otherwise
@@ -598,10 +579,19 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
DRM_WAKEUP(&dev_priv->irq_queue);
#ifdef I915_HAVE_FENCE
i915_fence_handler(dev);
- schedule_work(&dev_priv->user_interrupt_task);
#endif
}
+ if (pipea_stats & (I915_START_VBLANK_INTERRUPT_STATUS|
+ I915_VBLANK_INTERRUPT_STATUS)) {
+ vblank = 1;
+ drm_handle_vblank(dev, i915_get_plane(dev, 0));
+ }
+ if (pipeb_stats & (I915_START_VBLANK_INTERRUPT_STATUS|
+ I915_VBLANK_INTERRUPT_STATUS)) {
+ vblank = 1;
+ drm_handle_vblank(dev, i915_get_plane(dev, 1));
+ }
if (vblank) {
if (dev_priv->swaps_pending > 0)
drm_locked_tasklet(dev, i915_vblank_tasklet);
@@ -626,6 +616,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
i915_run_hotplug_tasklet(dev, temp2);
}
+ if (dev->pdev->msi_enabled)
+ I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
+
return IRQ_HANDLED;
}
@@ -697,8 +690,17 @@ int i915_wait_irq(struct drm_device * dev, int irq_nr)
DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
READ_BREADCRUMB(dev_priv));
- if (READ_BREADCRUMB(dev_priv) >= irq_nr)
+ master_priv = dev->primary->master->driver_priv;
+
+ if (!master_priv) {
+ DRM_ERROR("no master priv?\n");
+ return -EINVAL;
+ }
+
+ if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
+ master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
return 0;
+ }
i915_user_irq_on(dev);
DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
@@ -710,10 +712,8 @@ int i915_wait_irq(struct drm_device * dev, int irq_nr)
READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
}
- if (dev->primary->master) {
- master_priv = dev->primary->master->driver_priv;
+ if (READ_BREADCRUMB(dev_priv) >= irq_nr)
master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
- }
return ret;
}
diff --git a/tests/gem_basic.c b/tests/gem_basic.c
index 8b8b63d0..b2176fba 100644
--- a/tests/gem_basic.c
+++ b/tests/gem_basic.c
@@ -34,6 +34,7 @@
#include <errno.h>
#include <sys/stat.h>
#include "drm.h"
+#include "i915_drm.h"
static void
test_bad_close(int fd)
@@ -52,7 +53,7 @@ test_bad_close(int fd)
static void
test_create_close(int fd)
{
- struct drm_gem_create create;
+ struct drm_i915_gem_create create;
struct drm_gem_close close;
int ret;
@@ -60,7 +61,7 @@ test_create_close(int fd)
memset(&create, 0, sizeof(create));
create.size = 16 * 1024;
- ret = ioctl(fd, DRM_IOCTL_GEM_CREATE, &create);
+ ret = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
assert(ret == 0);
close.handle = create.handle;
@@ -70,14 +71,14 @@ test_create_close(int fd)
static void
test_create_fd_close(int fd)
{
- struct drm_gem_create create;
+ struct drm_i915_gem_create create;
int ret;
printf("Testing closing with an object allocated.\n");
memset(&create, 0, sizeof(create));
create.size = 16 * 1024;
- ret = ioctl(fd, DRM_IOCTL_GEM_CREATE, &create);
+ ret = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
assert(ret == 0);
close(fd);
diff --git a/tests/gem_mmap.c b/tests/gem_mmap.c
index 3f8e27a0..c3a51883 100644
--- a/tests/gem_mmap.c
+++ b/tests/gem_mmap.c
@@ -34,12 +34,13 @@
#include <errno.h>
#include <sys/stat.h>
#include "drm.h"
+#include "i915_drm.h"
#define OBJECT_SIZE 16384
int do_read(int fd, int handle, void *buf, int offset, int size)
{
- struct drm_gem_pread read;
+ struct drm_i915_gem_pread read;
/* Ensure that we don't have any convenient data in buf in case
* we fail.
@@ -52,12 +53,12 @@ int do_read(int fd, int handle, void *buf, int offset, int size)
read.size = size;
read.offset = offset;
- return ioctl(fd, DRM_IOCTL_GEM_PREAD, &read);
+ return ioctl(fd, DRM_IOCTL_I915_GEM_PREAD, &read);
}
int do_write(int fd, int handle, void *buf, int offset, int size)
{
- struct drm_gem_pwrite write;
+ struct drm_i915_gem_pwrite write;
memset(&write, 0, sizeof(write));
write.handle = handle;
@@ -65,14 +66,14 @@ int do_write(int fd, int handle, void *buf, int offset, int size)
write.size = size;
write.offset = offset;
- return ioctl(fd, DRM_IOCTL_GEM_PWRITE, &write);
+ return ioctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &write);
}
int main(int argc, char **argv)
{
int fd;
- struct drm_gem_create create;
- struct drm_gem_mmap mmap;
+ struct drm_i915_gem_create create;
+ struct drm_i915_gem_mmap mmap;
struct drm_gem_close unref;
uint8_t expected[OBJECT_SIZE];
uint8_t buf[OBJECT_SIZE];
@@ -87,12 +88,12 @@ int main(int argc, char **argv)
mmap.offset = 0;
mmap.size = 4096;
printf("Testing mmaping of bad object.\n");
- ret = ioctl(fd, DRM_IOCTL_GEM_MMAP, &mmap);
+ ret = ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap);
assert(ret == -1 && errno == EINVAL);
memset(&create, 0, sizeof(create));
create.size = OBJECT_SIZE;
- ret = ioctl(fd, DRM_IOCTL_GEM_CREATE, &create);
+ ret = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
assert(ret == 0);
handle = create.handle;
@@ -100,7 +101,7 @@ int main(int argc, char **argv)
mmap.handle = handle;
mmap.offset = 0;
mmap.size = OBJECT_SIZE;
- ret = ioctl(fd, DRM_IOCTL_GEM_MMAP, &mmap);
+ ret = ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap);
assert(ret == 0);
addr = (uint8_t *)(uintptr_t)mmap.addr_ptr;
diff --git a/tests/gem_readwrite.c b/tests/gem_readwrite.c
index a48f9847..54b25ea3 100644
--- a/tests/gem_readwrite.c
+++ b/tests/gem_readwrite.c
@@ -34,12 +34,13 @@
#include <errno.h>
#include <sys/stat.h>
#include "drm.h"
+#include "i915_drm.h"
#define OBJECT_SIZE 16384
int do_read(int fd, int handle, void *buf, int offset, int size)
{
- struct drm_gem_pread read;
+ struct drm_i915_gem_pread read;
/* Ensure that we don't have any convenient data in buf in case
* we fail.
@@ -52,12 +53,12 @@ int do_read(int fd, int handle, void *buf, int offset, int size)
read.size = size;
read.offset = offset;
- return ioctl(fd, DRM_IOCTL_GEM_PREAD, &read);
+ return ioctl(fd, DRM_IOCTL_I915_GEM_PREAD, &read);
}
int do_write(int fd, int handle, void *buf, int offset, int size)
{
- struct drm_gem_pwrite write;
+ struct drm_i915_gem_pwrite write;
memset(&write, 0, sizeof(write));
write.handle = handle;
@@ -65,13 +66,13 @@ int do_write(int fd, int handle, void *buf, int offset, int size)
write.size = size;
write.offset = offset;
- return ioctl(fd, DRM_IOCTL_GEM_PWRITE, &write);
+ return ioctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &write);
}
int main(int argc, char **argv)
{
int fd;
- struct drm_gem_create create;
+ struct drm_i915_gem_create create;
uint8_t expected[OBJECT_SIZE];
uint8_t buf[OBJECT_SIZE];
int ret;
@@ -81,7 +82,7 @@ int main(int argc, char **argv)
memset(&create, 0, sizeof(create));
create.size = OBJECT_SIZE;
- ret = ioctl(fd, DRM_IOCTL_GEM_CREATE, &create);
+ ret = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
assert(ret == 0);
handle = create.handle;