summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore3
-rw-r--r--configure.ac70
-rw-r--r--libdrm/Makefile.am8
-rw-r--r--libdrm/dri_bufmgr.c141
-rw-r--r--libdrm/dri_bufmgr.h174
-rw-r--r--libdrm/intel/Makefile.am38
-rw-r--r--libdrm/intel/intel_bufmgr.h95
-rw-r--r--libdrm/intel/intel_bufmgr_fake.c1218
-rw-r--r--libdrm/intel/intel_bufmgr_gem.c853
-rw-r--r--libdrm/intel/mm.c281
-rw-r--r--libdrm/intel/mm.h96
-rw-r--r--libdrm/xf86drm.c174
-rw-r--r--libdrm/xf86drm.h1
-rw-r--r--libdrm/xf86mm.h12
-rw-r--r--linux-core/Makefile6
-rw-r--r--linux-core/Makefile.kernel7
-rw-r--r--linux-core/drm-gem.txt805
-rw-r--r--linux-core/drmP.h207
-rw-r--r--linux-core/drm_agpsupport.c43
-rw-r--r--linux-core/drm_bo.c38
-rw-r--r--linux-core/drm_bo_move.c2
-rw-r--r--linux-core/drm_compat.h2
-rw-r--r--linux-core/drm_drv.c8
-rw-r--r--linux-core/drm_fops.c6
-rw-r--r--linux-core/drm_gem.c420
-rw-r--r--linux-core/drm_irq.c21
-rw-r--r--linux-core/drm_lock.c21
-rw-r--r--linux-core/drm_memory.c2
-rw-r--r--linux-core/drm_memrange.c (renamed from linux-core/drm_mm.c)84
-rw-r--r--linux-core/drm_objects.h13
-rw-r--r--linux-core/drm_proc.c84
-rw-r--r--linux-core/drm_sman.c22
-rw-r--r--linux-core/drm_sman.h4
-rw-r--r--linux-core/drm_stub.c37
-rw-r--r--linux-core/i915_drv.c35
-rw-r--r--linux-core/i915_gem.c2501
-rw-r--r--linux-core/i915_gem_proc.c293
-rw-r--r--linux-core/i915_gem_tiling.c305
-rw-r--r--linux-core/i915_opregion.c19
-rw-r--r--linux-core/nouveau_bo.c2
-rw-r--r--linux-core/nouveau_sgdma.c2
-rw-r--r--shared-core/drm.h31
-rw-r--r--shared-core/i915_dma.c259
-rw-r--r--shared-core/i915_drm.h320
-rw-r--r--shared-core/i915_drv.h327
-rw-r--r--shared-core/i915_irq.c102
-rw-r--r--tests/Makefile.am5
-rw-r--r--tests/drmtest.c2
-rw-r--r--tests/gem_basic.c98
-rw-r--r--tests/gem_mmap.c132
-rw-r--r--tests/gem_readwrite.c135
51 files changed, 9187 insertions, 377 deletions
diff --git a/.gitignore b/.gitignore
index 0991da8c..c8a22ea3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -58,6 +58,9 @@ tests/getclient
tests/getstats
tests/getversion
tests/lock
+tests/gem_basic
+tests/gem_mmap
+tests/gem_readwrite
tests/openclose
tests/setversion
tests/updatedraw
diff --git a/configure.ac b/configure.ac
index 78203343..1cf877d5 100644
--- a/configure.ac
+++ b/configure.ac
@@ -19,7 +19,7 @@
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
AC_PREREQ(2.57)
-AC_INIT([libdrm], 2.3.1, [dri-devel@lists.sourceforge.net], libdrm)
+AC_INIT([libdrm], 2.4.0, [dri-devel@lists.sourceforge.net], libdrm)
AC_CONFIG_SRCDIR([Makefile.am])
AM_INIT_AUTOMAKE([dist-bzip2])
@@ -35,9 +35,77 @@ AC_SYS_LARGEFILE
pkgconfigdir=${libdir}/pkgconfig
AC_SUBST(pkgconfigdir)
+
+dnl ===========================================================================
+dnl check compiler flags
+AC_DEFUN([LIBDRM_CC_TRY_FLAG], [
+ AC_MSG_CHECKING([whether $CC supports $1])
+
+ libdrm_save_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS $1"
+
+ AC_COMPILE_IFELSE([ ], [libdrm_cc_flag=yes], [libdrm_cc_flag=no])
+ CFLAGS="$libdrm_save_CFLAGS"
+
+ if test "x$libdrm_cc_flag" = "xyes"; then
+ ifelse([$2], , :, [$2])
+ else
+ ifelse([$3], , :, [$3])
+ fi
+ AC_MSG_RESULT([$libdrm_cc_flag])
+])
+
+dnl Use lots of warning flags with with gcc and compatible compilers
+
+dnl Note: if you change the following variable, the cache is automatically
+dnl skipped and all flags rechecked. So there's no need to do anything
+dnl else. If for any reason you need to force a recheck, just change
+dnl MAYBE_WARN in an ignorable way (like adding whitespace)
+
+MAYBE_WARN="-Wall -Wextra \
+-Wsign-compare -Werror-implicit-function-declaration \
+-Wpointer-arith -Wwrite-strings -Wstrict-prototypes \
+-Wmissing-prototypes -Wmissing-declarations -Wnested-externs \
+-Wpacked -Wswitch-enum -Wmissing-format-attribute \
+-Wstrict-aliasing=2 -Winit-self -Wunsafe-loop-optimizations \
+-Wdeclaration-after-statement -Wold-style-definition \
+-Wno-missing-field-initializers -Wno-unused-parameter \
+-Wno-attributes -Wno-long-long -Winline"
+
+# invalidate cached value if MAYBE_WARN has changed
+if test "x$libdrm_cv_warn_maybe" != "x$MAYBE_WARN"; then
+ unset libdrm_cv_warn_cflags
+fi
+AC_CACHE_CHECK([for supported warning flags], libdrm_cv_warn_cflags, [
+ echo
+ WARN_CFLAGS=""
+
+ # Some warning options are not supported by all versions of
+ # gcc, so test all desired options against the current
+ # compiler.
+ #
+ # Note that there are some order dependencies
+ # here. Specifically, an option that disables a warning will
+ # have no net effect if a later option then enables that
+ # warnings, (perhaps implicitly). So we put some grouped
+ # options (-Wall and -Wextra) up front and the -Wno options
+ # last.
+
+ for W in $MAYBE_WARN; do
+ LIBDRM_CC_TRY_FLAG([$W], [WARN_CFLAGS="$WARN_CFLAGS $W"])
+ done
+
+ libdrm_cv_warn_cflags=$WARN_CFLAGS
+ libdrm_cv_warn_maybe=$MAYBE_WARN
+
+ AC_MSG_CHECKING([which warning flags were supported])])
+WARN_CFLAGS="$libdrm_cv_warn_cflags"
+
+AC_SUBST(WARN_CFLAGS)
AC_OUTPUT([
Makefile
libdrm/Makefile
+ libdrm/intel/Makefile
shared-core/Makefile
tests/Makefile
libdrm.pc])
diff --git a/libdrm/Makefile.am b/libdrm/Makefile.am
index e7e07e47..1187517a 100644
--- a/libdrm/Makefile.am
+++ b/libdrm/Makefile.am
@@ -18,14 +18,18 @@
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+SUBDIRS = intel
+
libdrm_la_LTLIBRARIES = libdrm.la
libdrm_ladir = $(libdir)
libdrm_la_LDFLAGS = -version-number 2:3:0 -no-undefined
AM_CFLAGS = -I$(top_srcdir)/shared-core
-libdrm_la_SOURCES = xf86drm.c xf86drmHash.c xf86drmRandom.c xf86drmSL.c
+libdrm_la_SOURCES = xf86drm.c xf86drmHash.c xf86drmRandom.c xf86drmSL.c \
+ dri_bufmgr.c
+libdrm_la_LIBADD = intel/libdrm_intel.la
libdrmincludedir = ${includedir}
-libdrminclude_HEADERS = xf86drm.h xf86mm.h
+libdrminclude_HEADERS = xf86drm.h xf86mm.h dri_bufmgr.h
EXTRA_DIST = ChangeLog TODO
diff --git a/libdrm/dri_bufmgr.c b/libdrm/dri_bufmgr.c
new file mode 100644
index 00000000..7657df61
--- /dev/null
+++ b/libdrm/dri_bufmgr.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright © 2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <string.h>
+#include <stdlib.h>
+#include <assert.h>
+#include "dri_bufmgr.h"
+
+/** @file dri_bufmgr.c
+ *
+ * Convenience functions for buffer management methods.
+ */
+
+dri_bo *
+dri_bo_alloc(dri_bufmgr *bufmgr, const char *name, unsigned long size,
+ unsigned int alignment)
+{
+ return bufmgr->bo_alloc(bufmgr, name, size, alignment);
+}
+
+void
+dri_bo_reference(dri_bo *bo)
+{
+ bo->bufmgr->bo_reference(bo);
+}
+
+void
+dri_bo_unreference(dri_bo *bo)
+{
+ if (bo == NULL)
+ return;
+
+ bo->bufmgr->bo_unreference(bo);
+}
+
+int
+dri_bo_map(dri_bo *buf, int write_enable)
+{
+ return buf->bufmgr->bo_map(buf, write_enable);
+}
+
+int
+dri_bo_unmap(dri_bo *buf)
+{
+ return buf->bufmgr->bo_unmap(buf);
+}
+
+int
+dri_bo_subdata(dri_bo *bo, unsigned long offset,
+ unsigned long size, const void *data)
+{
+ int ret;
+ if (bo->bufmgr->bo_subdata)
+ return bo->bufmgr->bo_subdata(bo, offset, size, data);
+ if (size == 0 || data == NULL)
+ return 0;
+
+ ret = dri_bo_map(bo, 1);
+ if (ret)
+ return ret;
+ memcpy((unsigned char *)bo->virtual + offset, data, size);
+ dri_bo_unmap(bo);
+ return 0;
+}
+
+int
+dri_bo_get_subdata(dri_bo *bo, unsigned long offset,
+ unsigned long size, void *data)
+{
+ int ret;
+ if (bo->bufmgr->bo_subdata)
+ return bo->bufmgr->bo_get_subdata(bo, offset, size, data);
+
+ if (size == 0 || data == NULL)
+ return 0;
+
+ ret = dri_bo_map(bo, 0);
+ if (ret)
+ return ret;
+ memcpy(data, (unsigned char *)bo->virtual + offset, size);
+ dri_bo_unmap(bo);
+ return 0;
+}
+
+void
+dri_bo_wait_rendering(dri_bo *bo)
+{
+ bo->bufmgr->bo_wait_rendering(bo);
+}
+
+void
+dri_bufmgr_destroy(dri_bufmgr *bufmgr)
+{
+ bufmgr->destroy(bufmgr);
+}
+
+void *dri_process_relocs(dri_bo *batch_buf)
+{
+ return batch_buf->bufmgr->process_relocs(batch_buf);
+}
+
+void dri_post_submit(dri_bo *batch_buf)
+{
+ batch_buf->bufmgr->post_submit(batch_buf);
+}
+
+void
+dri_bufmgr_set_debug(dri_bufmgr *bufmgr, int enable_debug)
+{
+ bufmgr->debug = enable_debug;
+}
+
+int
+dri_bufmgr_check_aperture_space(dri_bo *bo)
+{
+ return bo->bufmgr->check_aperture_space(bo);
+}
diff --git a/libdrm/dri_bufmgr.h b/libdrm/dri_bufmgr.h
new file mode 100644
index 00000000..a5ae6c0f
--- /dev/null
+++ b/libdrm/dri_bufmgr.h
@@ -0,0 +1,174 @@
+/**************************************************************************
+ *
+ * Copyright © 2007 Intel Corporation
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
+ * Eric Anholt <eric@anholt.net>
+ */
+
+#ifndef _DRI_BUFMGR_H_
+#define _DRI_BUFMGR_H_
+#include <xf86drm.h>
+
+typedef struct _dri_bufmgr dri_bufmgr;
+typedef struct _dri_bo dri_bo;
+
+struct _dri_bo {
+ /**
+ * Size in bytes of the buffer object.
+ *
+ * The size may be larger than the size originally requested for the
+ * allocation, such as being aligned to page size.
+ */
+ unsigned long size;
+ /**
+ * Card virtual address (offset from the beginning of the aperture) for the
+ * object. Only valid while validated.
+ */
+ unsigned long offset;
+ /**
+ * Virtual address for accessing the buffer data. Only valid while mapped.
+ */
+ void *virtual;
+ /** Buffer manager context associated with this buffer object */
+ dri_bufmgr *bufmgr;
+};
+
+/**
+ * Context for a buffer manager instance.
+ *
+ * Contains public methods followed by private storage for the buffer manager.
+ */
+struct _dri_bufmgr {
+ /**
+ * Allocate a buffer object.
+ *
+ * Buffer objects are not necessarily initially mapped into CPU virtual
+ * address space or graphics device aperture. They must be mapped using
+ * bo_map() to be used by the CPU, and validated for use using bo_validate()
+ * to be used from the graphics device.
+ */
+ dri_bo *(*bo_alloc)(dri_bufmgr *bufmgr_ctx, const char *name,
+ unsigned long size, unsigned int alignment);
+
+ /** Takes a reference on a buffer object */
+ void (*bo_reference)(dri_bo *bo);
+
+ /**
+ * Releases a reference on a buffer object, freeing the data if
+ * rerefences remain.
+ */
+ void (*bo_unreference)(dri_bo *bo);
+
+ /**
+ * Maps the buffer into userspace.
+ *
+ * This function will block waiting for any existing execution on the
+ * buffer to complete, first. The resulting mapping is available at
+ * buf->virtual.
+ */
+ int (*bo_map)(dri_bo *buf, int write_enable);
+
+ /** Reduces the refcount on the userspace mapping of the buffer object. */
+ int (*bo_unmap)(dri_bo *buf);
+
+ /**
+ * Write data into an object.
+ *
+ * This is an optional function, if missing,
+ * dri_bo will map/memcpy/unmap.
+ */
+ int (*bo_subdata) (dri_bo *buf, unsigned long offset,
+ unsigned long size, const void *data);
+
+ /**
+ * Read data from an object
+ *
+ * This is an optional function, if missing,
+ * dri_bo will map/memcpy/unmap.
+ */
+ int (*bo_get_subdata) (dri_bo *bo, unsigned long offset,
+ unsigned long size, void *data);
+
+ /**
+ * Waits for rendering to an object by the GPU to have completed.
+ *
+ * This is not required for any access to the BO by bo_map, bo_subdata, etc.
+ * It is merely a way for the driver to implement glFinish.
+ */
+ void (*bo_wait_rendering) (dri_bo *bo);
+
+ /**
+ * Tears down the buffer manager instance.
+ */
+ void (*destroy)(dri_bufmgr *bufmgr);
+
+ /**
+ * Processes the relocations, either in userland or by converting the list
+ * for use in batchbuffer submission.
+ *
+ * Kernel-based implementations will return a pointer to the arguments
+ * to be handed with batchbuffer submission to the kernel. The userland
+ * implementation performs the buffer validation and emits relocations
+ * into them the appopriate order.
+ *
+ * \param batch_buf buffer at the root of the tree of relocations
+ * \return argument to be completed and passed to the execbuffers ioctl
+ * (if any).
+ */
+ void *(*process_relocs)(dri_bo *batch_buf);
+
+ void (*post_submit)(dri_bo *batch_buf);
+
+ int (*check_aperture_space)(dri_bo *bo);
+ int debug; /**< Enables verbose debugging printouts */
+};
+
+dri_bo *dri_bo_alloc(dri_bufmgr *bufmgr, const char *name, unsigned long size,
+ unsigned int alignment);
+void dri_bo_reference(dri_bo *bo);
+void dri_bo_unreference(dri_bo *bo);
+int dri_bo_map(dri_bo *buf, int write_enable);
+int dri_bo_unmap(dri_bo *buf);
+
+int dri_bo_subdata(dri_bo *bo, unsigned long offset,
+ unsigned long size, const void *data);
+int dri_bo_get_subdata(dri_bo *bo, unsigned long offset,
+ unsigned long size, void *data);
+void dri_bo_wait_rendering(dri_bo *bo);
+
+void dri_bufmgr_set_debug(dri_bufmgr *bufmgr, int enable_debug);
+void dri_bufmgr_destroy(dri_bufmgr *bufmgr);
+
+void *dri_process_relocs(dri_bo *batch_buf);
+void dri_post_process_relocs(dri_bo *batch_buf);
+void dri_post_submit(dri_bo *batch_buf);
+int dri_bufmgr_check_aperture_space(dri_bo *bo);
+
+#endif
diff --git a/libdrm/intel/Makefile.am b/libdrm/intel/Makefile.am
new file mode 100644
index 00000000..111204b1
--- /dev/null
+++ b/libdrm/intel/Makefile.am
@@ -0,0 +1,38 @@
+# Copyright © 2008 Intel Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+# Authors:
+# Eric Anholt <eric@anholt.net>
+
+AM_CFLAGS = \
+ $(WARN_CFLAGS) \
+ -I$(top_srcdir)/shared-core
+
+noinst_LTLIBRARIES = libdrm_intel.la
+
+libdrm_intel_la_SOURCES = \
+ intel_bufmgr_fake.c \
+ intel_bufmgr_gem.c \
+ mm.c \
+ mm.h
+
+libdrm_intelincludedir = ${includedir}
+libdrm_intelinclude_HEADERS = intel_bufmgr.h
diff --git a/libdrm/intel/intel_bufmgr.h b/libdrm/intel/intel_bufmgr.h
new file mode 100644
index 00000000..1cf0d518
--- /dev/null
+++ b/libdrm/intel/intel_bufmgr.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+/**
+ * @file intel_bufmgr.h
+ *
+ * Public definitions of Intel-specific bufmgr functions.
+ */
+
+#ifndef INTEL_BUFMGR_GEM_H
+#define INTEL_BUFMGR_GEM_H
+
+#include "dri_bufmgr.h"
+
+/**
+ * Intel-specific bufmgr bits that follow immediately after the
+ * generic bufmgr structure.
+ */
+struct intel_bufmgr {
+ /**
+ * Add relocation entry in reloc_buf, which will be updated with the
+ * target buffer's real offset on on command submission.
+ *
+ * Relocations remain in place for the lifetime of the buffer object.
+ *
+ * \param reloc_buf Buffer to write the relocation into.
+ * \param read_domains GEM read domains which the buffer will be read into
+ * by the command that this relocation is part of.
+ * \param write_domains GEM read domains which the buffer will be dirtied
+ * in by the command that this relocation is part of.
+ * \param delta Constant value to be added to the relocation target's
+ * offset.
+ * \param offset Byte offset within batch_buf of the relocated pointer.
+ * \param target Buffer whose offset should be written into the relocation
+ * entry.
+ */
+ int (*emit_reloc)(dri_bo *reloc_buf,
+ uint32_t read_domains, uint32_t write_domain,
+ uint32_t delta, uint32_t offset, dri_bo *target);
+};
+
+/* intel_bufmgr_gem.c */
+dri_bufmgr *intel_bufmgr_gem_init(int fd, int batch_size);
+dri_bo *intel_bo_gem_create_from_name(dri_bufmgr *bufmgr, const char *name,
+ unsigned int handle);
+void intel_bufmgr_gem_enable_reuse(dri_bufmgr *bufmgr);
+
+/* intel_bufmgr_fake.c */
+dri_bufmgr *intel_bufmgr_fake_init(unsigned long low_offset, void *low_virtual,
+ unsigned long size,
+ unsigned int (*fence_emit)(void *private),
+ int (*fence_wait)(void *private,
+ unsigned int cookie),
+ void *driver_priv);
+dri_bo *intel_bo_fake_alloc_static(dri_bufmgr *bufmgr, const char *name,
+ unsigned long offset, unsigned long size,
+ void *virtual);
+
+void intel_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr);
+void intel_bo_fake_disable_backing_store(dri_bo *bo,
+ void (*invalidate_cb)(dri_bo *bo,
+ void *ptr),
+ void *ptr);
+void intel_bufmgr_fake_evict_all(dri_bufmgr *bufmgr);
+
+int intel_bo_emit_reloc(dri_bo *reloc_buf,
+ uint32_t read_domains, uint32_t write_domain,
+ uint32_t delta, uint32_t offset, dri_bo *target_buf);
+
+#endif /* INTEL_BUFMGR_GEM_H */
+
diff --git a/libdrm/intel/intel_bufmgr_fake.c b/libdrm/intel/intel_bufmgr_fake.c
new file mode 100644
index 00000000..e988eb58
--- /dev/null
+++ b/libdrm/intel/intel_bufmgr_fake.c
@@ -0,0 +1,1218 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Originally a fake version of the buffer manager so that we can
+ * prototype the changes in a driver fairly quickly, has been fleshed
+ * out to a fully functional interim solution.
+ *
+ * Basically wraps the old style memory management in the new
+ * programming interface, but is more expressive and avoids many of
+ * the bugs in the old texture manager.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include "dri_bufmgr.h"
+#include "intel_bufmgr.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "mm.h"
+
+#define DBG(...) do { \
+ if (bufmgr_fake->bufmgr.debug) \
+ drmMsg(__VA_ARGS__); \
+} while (0)
+
+/* Internal flags:
+ */
+#define BM_NO_BACKING_STORE 0x00000001
+#define BM_NO_FENCE_SUBDATA 0x00000002
+#define BM_PINNED 0x00000004
+
+/* Wrapper around mm.c's mem_block, which understands that you must
+ * wait for fences to expire before memory can be freed. This is
+ * specific to our use of memcpy for uploads - an upload that was
+ * processed through the command queue wouldn't need to care about
+ * fences.
+ */
+#define MAX_RELOCS 4096
+
+struct fake_buffer_reloc
+{
+ /** Buffer object that the relocation points at. */
+ dri_bo *target_buf;
+ /** Offset of the relocation entry within reloc_buf. */
+ uint32_t offset;
+ /** Cached value of the offset when we last performed this relocation. */
+ uint32_t last_target_offset;
+ /** Value added to target_buf's offset to get the relocation entry. */
+ uint32_t delta;
+ /** Cache domains the target buffer is read into. */
+ uint32_t read_domains;
+ /** Cache domain the target buffer will have dirty cachelines in. */
+ uint32_t write_domain;
+};
+
+struct block {
+ struct block *next, *prev;
+ struct mem_block *mem; /* BM_MEM_AGP */
+
+ /**
+ * Marks that the block is currently in the aperture and has yet to be
+ * fenced.
+ */
+ unsigned on_hardware:1;
+ /**
+ * Marks that the block is currently fenced (being used by rendering) and
+ * can't be freed until @fence is passed.
+ */
+ unsigned fenced:1;
+
+ /** Fence cookie for the block. */
+ unsigned fence; /* Split to read_fence, write_fence */
+
+ dri_bo *bo;
+ void *virtual;
+};
+
+typedef struct _bufmgr_fake {
+ dri_bufmgr bufmgr;
+ struct intel_bufmgr intel_bufmgr;
+
+ unsigned long low_offset;
+ unsigned long size;
+ void *virtual;
+
+ struct mem_block *heap;
+
+ unsigned buf_nr; /* for generating ids */
+
+ /**
+ * List of blocks which are currently in the GART but haven't been
+ * fenced yet.
+ */
+ struct block on_hardware;
+ /**
+ * List of blocks which are in the GART and have an active fence on them.
+ */
+ struct block fenced;
+ /**
+ * List of blocks which have an expired fence and are ready to be evicted.
+ */
+ struct block lru;
+
+ unsigned int last_fence;
+
+ unsigned fail:1;
+ unsigned need_fence:1;
+ int thrashing;
+
+ /**
+ * Driver callback to emit a fence, returning the cookie.
+ *
+ * Currently, this also requires that a write flush be emitted before
+ * emitting the fence, but this should change.
+ */
+ unsigned int (*fence_emit)(void *private);
+ /** Driver callback to wait for a fence cookie to have passed. */
+ int (*fence_wait)(void *private, unsigned int fence_cookie);
+ /** Driver-supplied argument to driver callbacks */
+ void *driver_priv;
+
+ int debug;
+
+ int performed_rendering;
+
+ /* keep track of the current total size of objects we have relocs for */
+ unsigned long current_total_size;
+} dri_bufmgr_fake;
+
+typedef struct _dri_bo_fake {
+ dri_bo bo;
+
+ unsigned id; /* debug only */
+ const char *name;
+
+ unsigned dirty:1;
+ unsigned size_accounted:1; /*this buffers size has been accounted against the aperture */
+ unsigned card_dirty:1; /* has the card written to this buffer - we make need to copy it back */
+ unsigned int refcount;
+ /* Flags may consist of any of the DRM_BO flags, plus
+ * DRM_BO_NO_BACKING_STORE and BM_NO_FENCE_SUBDATA, which are the first two
+ * driver private flags.
+ */
+ uint64_t flags;
+ /** Cache domains the target buffer is read into. */
+ uint32_t read_domains;
+ /** Cache domain the target buffer will have dirty cachelines in. */
+ uint32_t write_domain;
+
+ unsigned int alignment;
+ int is_static, validated;
+ unsigned int map_count;
+
+ /** relocation list */
+ struct fake_buffer_reloc *relocs;
+ int nr_relocs;
+
+ struct block *block;
+ void *backing_store;
+ void (*invalidate_cb)(dri_bo *bo, void *ptr);
+ void *invalidate_ptr;
+} dri_bo_fake;
+
+static int clear_fenced(dri_bufmgr_fake *bufmgr_fake,
+ unsigned int fence_cookie);
+
+static int dri_fake_check_aperture_space(dri_bo *bo);
+
+#define MAXFENCE 0x7fffffff
+
+static int FENCE_LTE( unsigned a, unsigned b )
+{
+ if (a == b)
+ return 1;
+
+ if (a < b && b - a < (1<<24))
+ return 1;
+
+ if (a > b && MAXFENCE - a + b < (1<<24))
+ return 1;
+
+ return 0;
+}
+
+static unsigned int
+_fence_emit_internal(dri_bufmgr_fake *bufmgr_fake)
+{
+ bufmgr_fake->last_fence = bufmgr_fake->fence_emit(bufmgr_fake->driver_priv);
+ return bufmgr_fake->last_fence;
+}
+
+static void
+_fence_wait_internal(dri_bufmgr_fake *bufmgr_fake, unsigned int cookie)
+{
+ int ret;
+
+ ret = bufmgr_fake->fence_wait(bufmgr_fake->driver_priv, cookie);
+ if (ret != 0) {
+ drmMsg("%s:%d: Error %d waiting for fence.\n", __FILE__, __LINE__);
+ abort();
+ }
+ clear_fenced(bufmgr_fake, cookie);
+}
+
+static int
+_fence_test(dri_bufmgr_fake *bufmgr_fake, unsigned fence)
+{
+ /* Slight problem with wrap-around:
+ */
+ return fence == 0 || FENCE_LTE(fence, bufmgr_fake->last_fence);
+}
+
+/**
+ * Allocate a memory manager block for the buffer.
+ */
+static int
+alloc_block(dri_bo *bo)
+{
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+ dri_bufmgr_fake *bufmgr_fake= (dri_bufmgr_fake *)bo->bufmgr;
+ struct block *block = (struct block *)calloc(sizeof *block, 1);
+ unsigned int align_log2 = ffs(bo_fake->alignment) - 1;
+ unsigned int sz;
+
+ if (!block)
+ return 1;
+
+ sz = (bo->size + bo_fake->alignment - 1) & ~(bo_fake->alignment - 1);
+
+ block->mem = mmAllocMem(bufmgr_fake->heap, sz, align_log2, 0);
+ if (!block->mem) {
+ free(block);
+ return 0;
+ }
+
+ DRMINITLISTHEAD(block);
+
+ /* Insert at head or at tail???
+ */
+ DRMLISTADDTAIL(block, &bufmgr_fake->lru);
+
+ block->virtual = (uint8_t *)bufmgr_fake->virtual +
+ block->mem->ofs - bufmgr_fake->low_offset;
+ block->bo = bo;
+
+ bo_fake->block = block;
+
+ return 1;
+}
+
+/* Release the card storage associated with buf:
+ */
+static void free_block(dri_bufmgr_fake *bufmgr_fake, struct block *block)
+{
+ dri_bo_fake *bo_fake;
+ DBG("free block %p %08x %d %d\n", block, block->mem->ofs, block->on_hardware, block->fenced);
+
+ if (!block)
+ return;
+
+ bo_fake = (dri_bo_fake *)block->bo;
+ if (!(bo_fake->flags & BM_NO_BACKING_STORE) && (bo_fake->card_dirty == 1)) {
+ memcpy(bo_fake->backing_store, block->virtual, block->bo->size);
+ bo_fake->card_dirty = 1;
+ bo_fake->dirty = 1;
+ }
+
+ if (block->on_hardware) {
+ block->bo = NULL;
+ }
+ else if (block->fenced) {
+ block->bo = NULL;
+ }
+ else {
+ DBG(" - free immediately\n");
+ DRMLISTDEL(block);
+
+ mmFreeMem(block->mem);
+ free(block);
+ }
+}
+
+static void
+alloc_backing_store(dri_bo *bo)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+ assert(!bo_fake->backing_store);
+ assert(!(bo_fake->flags & (BM_PINNED|BM_NO_BACKING_STORE)));
+
+ bo_fake->backing_store = malloc(bo->size);
+
+ DBG("alloc_backing - buf %d %p %d\n", bo_fake->id, bo_fake->backing_store, bo->size);
+ assert(bo_fake->backing_store);
+}
+
+static void
+free_backing_store(dri_bo *bo)
+{
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+
+ if (bo_fake->backing_store) {
+ assert(!(bo_fake->flags & (BM_PINNED|BM_NO_BACKING_STORE)));
+ free(bo_fake->backing_store);
+ bo_fake->backing_store = NULL;
+ }
+}
+
+static void
+set_dirty(dri_bo *bo)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+
+ if (bo_fake->flags & BM_NO_BACKING_STORE && bo_fake->invalidate_cb != NULL)
+ bo_fake->invalidate_cb(bo, bo_fake->invalidate_ptr);
+
+ assert(!(bo_fake->flags & BM_PINNED));
+
+ DBG("set_dirty - buf %d\n", bo_fake->id);
+ bo_fake->dirty = 1;
+}
+
+static int
+evict_lru(dri_bufmgr_fake *bufmgr_fake, unsigned int max_fence)
+{
+ struct block *block, *tmp;
+
+ DBG("%s\n", __FUNCTION__);
+
+ DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
+ dri_bo_fake *bo_fake = (dri_bo_fake *)block->bo;
+
+ if (bo_fake != NULL && (bo_fake->flags & BM_NO_FENCE_SUBDATA))
+ continue;
+
+ if (block->fence && max_fence && !FENCE_LTE(block->fence, max_fence))
+ return 0;
+
+ set_dirty(&bo_fake->bo);
+ bo_fake->block = NULL;
+
+ free_block(bufmgr_fake, block);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+evict_mru(dri_bufmgr_fake *bufmgr_fake)
+{
+ struct block *block, *tmp;
+
+ DBG("%s\n", __FUNCTION__);
+
+ DRMLISTFOREACHSAFEREVERSE(block, tmp, &bufmgr_fake->lru) {
+ dri_bo_fake *bo_fake = (dri_bo_fake *)block->bo;
+
+ if (bo_fake && (bo_fake->flags & BM_NO_FENCE_SUBDATA))
+ continue;
+
+ set_dirty(&bo_fake->bo);
+ bo_fake->block = NULL;
+
+ free_block(bufmgr_fake, block);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * Removes all objects from the fenced list older than the given fence.
+ */
+static int clear_fenced(dri_bufmgr_fake *bufmgr_fake,
+ unsigned int fence_cookie)
+{
+ struct block *block, *tmp;
+ int ret = 0;
+
+ DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->fenced) {
+ assert(block->fenced);
+
+ if (_fence_test(bufmgr_fake, block->fence)) {
+
+ block->fenced = 0;
+
+ if (!block->bo) {
+ DBG("delayed free: offset %x sz %x\n",
+ block->mem->ofs, block->mem->size);
+ DRMLISTDEL(block);
+ mmFreeMem(block->mem);
+ free(block);
+ }
+ else {
+ DBG("return to lru: offset %x sz %x\n",
+ block->mem->ofs, block->mem->size);
+ DRMLISTDEL(block);
+ DRMLISTADDTAIL(block, &bufmgr_fake->lru);
+ }
+
+ ret = 1;
+ }
+ else {
+ /* Blocks are ordered by fence, so if one fails, all from
+ * here will fail also:
+ */
+ DBG("fence not passed: offset %x sz %x %d %d \n",
+ block->mem->ofs, block->mem->size, block->fence, bufmgr_fake->last_fence);
+ break;
+ }
+ }
+
+ DBG("%s: %d\n", __FUNCTION__, ret);
+ return ret;
+}
+
+static void fence_blocks(dri_bufmgr_fake *bufmgr_fake, unsigned fence)
+{
+ struct block *block, *tmp;
+
+ DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->on_hardware) {
+ DBG("Fence block %p (sz 0x%x ofs %x buf %p) with fence %d\n", block,
+ block->mem->size, block->mem->ofs, block->bo, fence);
+ block->fence = fence;
+
+ block->on_hardware = 0;
+ block->fenced = 1;
+
+ /* Move to tail of pending list here
+ */
+ DRMLISTDEL(block);
+ DRMLISTADDTAIL(block, &bufmgr_fake->fenced);
+ }
+
+ assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
+}
+
+static int evict_and_alloc_block(dri_bo *bo)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+
+ assert(bo_fake->block == NULL);
+
+ /* Search for already free memory:
+ */
+ if (alloc_block(bo))
+ return 1;
+
+ /* If we're not thrashing, allow lru eviction to dig deeper into
+ * recently used textures. We'll probably be thrashing soon:
+ */
+ if (!bufmgr_fake->thrashing) {
+ while (evict_lru(bufmgr_fake, 0))
+ if (alloc_block(bo))
+ return 1;
+ }
+
+ /* Keep thrashing counter alive?
+ */
+ if (bufmgr_fake->thrashing)
+ bufmgr_fake->thrashing = 20;
+
+ /* Wait on any already pending fences - here we are waiting for any
+ * freed memory that has been submitted to hardware and fenced to
+ * become available:
+ */
+ while (!DRMLISTEMPTY(&bufmgr_fake->fenced)) {
+ uint32_t fence = bufmgr_fake->fenced.next->fence;
+ _fence_wait_internal(bufmgr_fake, fence);
+
+ if (alloc_block(bo))
+ return 1;
+ }
+
+ if (!DRMLISTEMPTY(&bufmgr_fake->on_hardware)) {
+ while (!DRMLISTEMPTY(&bufmgr_fake->fenced)) {
+ uint32_t fence = bufmgr_fake->fenced.next->fence;
+ _fence_wait_internal(bufmgr_fake, fence);
+ }
+
+ if (!bufmgr_fake->thrashing) {
+ DBG("thrashing\n");
+ }
+ bufmgr_fake->thrashing = 20;
+
+ if (alloc_block(bo))
+ return 1;
+ }
+
+ while (evict_mru(bufmgr_fake))
+ if (alloc_block(bo))
+ return 1;
+
+ DBG("%s 0x%x bytes failed\n", __FUNCTION__, bo->size);
+
+ return 0;
+}
+
+/***********************************************************************
+ * Public functions
+ */
+
+/**
+ * Wait for hardware idle by emitting a fence and waiting for it.
+ */
+static void
+dri_bufmgr_fake_wait_idle(dri_bufmgr_fake *bufmgr_fake)
+{
+ unsigned int cookie;
+
+ cookie = bufmgr_fake->fence_emit(bufmgr_fake->driver_priv);
+ _fence_wait_internal(bufmgr_fake, cookie);
+}
+
+/**
+ * Wait for rendering to a buffer to complete.
+ *
+ * It is assumed that the bathcbuffer which performed the rendering included
+ * the necessary flushing.
+ */
+static void
+dri_fake_bo_wait_rendering(dri_bo *bo)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+
+ if (bo_fake->block == NULL || !bo_fake->block->fenced)
+ return;
+
+ _fence_wait_internal(bufmgr_fake, bo_fake->block->fence);
+}
+
+/* Specifically ignore texture memory sharing.
+ * -- just evict everything
+ * -- and wait for idle
+ */
+void
+intel_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
+ struct block *block, *tmp;
+
+ bufmgr_fake->need_fence = 1;
+ bufmgr_fake->fail = 0;
+
+ /* Wait for hardware idle. We don't know where acceleration has been
+ * happening, so we'll need to wait anyway before letting anything get
+ * put on the card again.
+ */
+ dri_bufmgr_fake_wait_idle(bufmgr_fake);
+
+ /* Check that we hadn't released the lock without having fenced the last
+ * set of buffers.
+ */
+ assert(DRMLISTEMPTY(&bufmgr_fake->fenced));
+ assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
+
+ DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
+ assert(_fence_test(bufmgr_fake, block->fence));
+ set_dirty(block->bo);
+ }
+}
+
+static dri_bo *
+dri_fake_bo_alloc(dri_bufmgr *bufmgr, const char *name,
+ unsigned long size, unsigned int alignment)
+{
+ dri_bufmgr_fake *bufmgr_fake;
+ dri_bo_fake *bo_fake;
+
+ bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
+
+ assert(size != 0);
+
+ bo_fake = calloc(1, sizeof(*bo_fake));
+ if (!bo_fake)
+ return NULL;
+
+ bo_fake->bo.size = size;
+ bo_fake->bo.offset = -1;
+ bo_fake->bo.virtual = NULL;
+ bo_fake->bo.bufmgr = bufmgr;
+ bo_fake->refcount = 1;
+
+ /* Alignment must be a power of two */
+ assert((alignment & (alignment - 1)) == 0);
+ if (alignment == 0)
+ alignment = 1;
+ bo_fake->alignment = alignment;
+ bo_fake->id = ++bufmgr_fake->buf_nr;
+ bo_fake->name = name;
+ bo_fake->flags = 0;
+ bo_fake->is_static = 0;
+
+ DBG("drm_bo_alloc: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
+ bo_fake->bo.size / 1024);
+
+ return &bo_fake->bo;
+}
+
+dri_bo *
+intel_bo_fake_alloc_static(dri_bufmgr *bufmgr, const char *name,
+ unsigned long offset, unsigned long size,
+ void *virtual)
+{
+ dri_bufmgr_fake *bufmgr_fake;
+ dri_bo_fake *bo_fake;
+
+ bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
+
+ assert(size != 0);
+
+ bo_fake = calloc(1, sizeof(*bo_fake));
+ if (!bo_fake)
+ return NULL;
+
+ bo_fake->bo.size = size;
+ bo_fake->bo.offset = offset;
+ bo_fake->bo.virtual = virtual;
+ bo_fake->bo.bufmgr = bufmgr;
+ bo_fake->refcount = 1;
+ bo_fake->id = ++bufmgr_fake->buf_nr;
+ bo_fake->name = name;
+ bo_fake->flags = BM_PINNED | DRM_BO_FLAG_NO_MOVE;
+ bo_fake->is_static = 1;
+
+ DBG("drm_bo_alloc_static: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
+ bo_fake->bo.size / 1024);
+
+ return &bo_fake->bo;
+}
+
+static void
+dri_fake_bo_reference(dri_bo *bo)
+{
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+
+ bo_fake->refcount++;
+}
+
+static void
+dri_fake_bo_unreference(dri_bo *bo)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+ int i;
+
+ if (!bo)
+ return;
+
+ if (--bo_fake->refcount == 0) {
+ assert(bo_fake->map_count == 0);
+ /* No remaining references, so free it */
+ if (bo_fake->block)
+ free_block(bufmgr_fake, bo_fake->block);
+ free_backing_store(bo);
+
+ for (i = 0; i < bo_fake->nr_relocs; i++)
+ dri_bo_unreference(bo_fake->relocs[i].target_buf);
+
+ DBG("drm_bo_unreference: free buf %d %s\n", bo_fake->id, bo_fake->name);
+
+ free(bo_fake->relocs);
+ free(bo);
+
+ return;
+ }
+}
+
+/**
+ * Set the buffer as not requiring backing store, and instead get the callback
+ * invoked whenever it would be set dirty.
+ */
+void intel_bo_fake_disable_backing_store(dri_bo *bo,
+ void (*invalidate_cb)(dri_bo *bo,
+ void *ptr),
+ void *ptr)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+
+ if (bo_fake->backing_store)
+ free_backing_store(bo);
+
+ bo_fake->flags |= BM_NO_BACKING_STORE;
+
+ DBG("disable_backing_store set buf %d dirty\n", bo_fake->id);
+ bo_fake->dirty = 1;
+ bo_fake->invalidate_cb = invalidate_cb;
+ bo_fake->invalidate_ptr = ptr;
+
+ /* Note that it is invalid right from the start. Also note
+ * invalidate_cb is called with the bufmgr locked, so cannot
+ * itself make bufmgr calls.
+ */
+ if (invalidate_cb != NULL)
+ invalidate_cb(bo, ptr);
+}
+
+/**
+ * Map a buffer into bo->virtual, allocating either card memory space (If
+ * BM_NO_BACKING_STORE or BM_PINNED) or backing store, as necessary.
+ */
+static int
+dri_fake_bo_map(dri_bo *bo, int write_enable)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+
+ /* Static buffers are always mapped. */
+ if (bo_fake->is_static)
+ return 0;
+
+ /* Allow recursive mapping. Mesa may recursively map buffers with
+ * nested display loops, and it is used internally in bufmgr_fake
+ * for relocation.
+ */
+ if (bo_fake->map_count++ != 0)
+ return 0;
+
+ {
+ DBG("drm_bo_map: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
+ bo_fake->bo.size / 1024);
+
+ if (bo->virtual != NULL) {
+ drmMsg("%s: already mapped\n", __FUNCTION__);
+ abort();
+ }
+ else if (bo_fake->flags & (BM_NO_BACKING_STORE|BM_PINNED)) {
+
+ if (!bo_fake->block && !evict_and_alloc_block(bo)) {
+ DBG("%s: alloc failed\n", __FUNCTION__);
+ bufmgr_fake->fail = 1;
+ return 1;
+ }
+ else {
+ assert(bo_fake->block);
+ bo_fake->dirty = 0;
+
+ if (!(bo_fake->flags & BM_NO_FENCE_SUBDATA) &&
+ bo_fake->block->fenced) {
+ dri_fake_bo_wait_rendering(bo);
+ }
+
+ bo->virtual = bo_fake->block->virtual;
+ }
+ }
+ else {
+ if (write_enable)
+ set_dirty(bo);
+
+ if (bo_fake->backing_store == 0)
+ alloc_backing_store(bo);
+
+ bo->virtual = bo_fake->backing_store;
+ }
+ }
+
+ return 0;
+}
+
+static int
+dri_fake_bo_unmap(dri_bo *bo)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+
+ /* Static buffers are always mapped. */
+ if (bo_fake->is_static)
+ return 0;
+
+ assert(bo_fake->map_count != 0);
+ if (--bo_fake->map_count != 0)
+ return 0;
+
+ DBG("drm_bo_unmap: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
+ bo_fake->bo.size / 1024);
+
+ bo->virtual = NULL;
+
+ return 0;
+}
+
+static void
+dri_fake_kick_all(dri_bufmgr_fake *bufmgr_fake)
+{
+ struct block *block, *tmp;
+
+ bufmgr_fake->performed_rendering = 0;
+ /* okay for ever BO that is on the HW kick it off.
+ seriously not afraid of the POLICE right now */
+ DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->on_hardware) {
+ dri_bo_fake *bo_fake = (dri_bo_fake *)block->bo;
+
+ block->on_hardware = 0;
+ free_block(bufmgr_fake, block);
+ bo_fake->block = NULL;
+ bo_fake->validated = 0;
+ if (!(bo_fake->flags & BM_NO_BACKING_STORE))
+ bo_fake->dirty = 1;
+ }
+}
+
+static int
+dri_fake_bo_validate(dri_bo *bo)
+{
+ dri_bufmgr_fake *bufmgr_fake;
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+
+ /* XXX: Sanity-check whether we've already validated this one under
+ * different flags. See drmAddValidateItem().
+ */
+ bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+
+ DBG("drm_bo_validate: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
+ bo_fake->bo.size / 1024);
+
+ /* Sanity check: Buffers should be unmapped before being validated.
+ * This is not so much of a problem for bufmgr_fake, but TTM refuses,
+ * and the problem is harder to debug there.
+ */
+ assert(bo_fake->map_count == 0);
+
+ if (bo_fake->is_static) {
+ /* Add it to the needs-fence list */
+ bufmgr_fake->need_fence = 1;
+ return 0;
+ }
+
+ /* reset size accounted */
+ bo_fake->size_accounted = 0;
+
+ /* Allocate the card memory */
+ if (!bo_fake->block && !evict_and_alloc_block(bo)) {
+ bufmgr_fake->fail = 1;
+ DBG("Failed to validate buf %d:%s\n", bo_fake->id, bo_fake->name);
+ return -1;
+ }
+
+ assert(bo_fake->block);
+ assert(bo_fake->block->bo == &bo_fake->bo);
+
+ bo->offset = bo_fake->block->mem->ofs;
+
+ /* Upload the buffer contents if necessary */
+ if (bo_fake->dirty) {
+ DBG("Upload dirty buf %d:%s, sz %d offset 0x%x\n", bo_fake->id,
+ bo_fake->name, bo->size, bo_fake->block->mem->ofs);
+
+ assert(!(bo_fake->flags &
+ (BM_NO_BACKING_STORE|BM_PINNED)));
+
+ /* Actually, should be able to just wait for a fence on the memory,
+ * which we would be tracking when we free it. Waiting for idle is
+ * a sufficiently large hammer for now.
+ */
+ dri_bufmgr_fake_wait_idle(bufmgr_fake);
+
+ /* we may never have mapped this BO so it might not have any backing
+ * store if this happens it should be rare, but 0 the card memory
+ * in any case */
+ if (bo_fake->backing_store)
+ memcpy(bo_fake->block->virtual, bo_fake->backing_store, bo->size);
+ else
+ memset(bo_fake->block->virtual, 0, bo->size);
+
+ bo_fake->dirty = 0;
+ }
+
+ bo_fake->block->fenced = 0;
+ bo_fake->block->on_hardware = 1;
+ DRMLISTDEL(bo_fake->block);
+ DRMLISTADDTAIL(bo_fake->block, &bufmgr_fake->on_hardware);
+
+ bo_fake->validated = 1;
+ bufmgr_fake->need_fence = 1;
+
+ return 0;
+}
+
+static void
+dri_fake_fence_validated(dri_bufmgr *bufmgr)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
+ unsigned int cookie;
+
+ cookie = _fence_emit_internal(bufmgr_fake);
+ fence_blocks(bufmgr_fake, cookie);
+
+ DBG("drm_fence_validated: 0x%08x cookie\n", cookie);
+}
+
+static void
+dri_fake_destroy(dri_bufmgr *bufmgr)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
+
+ mmDestroy(bufmgr_fake->heap);
+ free(bufmgr);
+}
+
+static int
+dri_fake_emit_reloc(dri_bo *reloc_buf,
+ uint32_t read_domains, uint32_t write_domain,
+ uint32_t delta, uint32_t offset, dri_bo *target_buf)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)reloc_buf->bufmgr;
+ struct fake_buffer_reloc *r;
+ dri_bo_fake *reloc_fake = (dri_bo_fake *)reloc_buf;
+ dri_bo_fake *target_fake = (dri_bo_fake *)target_buf;
+ int i;
+
+ assert(reloc_buf);
+ assert(target_buf);
+
+ assert(target_fake->is_static || target_fake->size_accounted);
+
+ if (reloc_fake->relocs == NULL) {
+ reloc_fake->relocs = malloc(sizeof(struct fake_buffer_reloc) *
+ MAX_RELOCS);
+ }
+
+ r = &reloc_fake->relocs[reloc_fake->nr_relocs++];
+
+ assert(reloc_fake->nr_relocs <= MAX_RELOCS);
+
+ dri_bo_reference(target_buf);
+
+ r->target_buf = target_buf;
+ r->offset = offset;
+ r->last_target_offset = target_buf->offset;
+ r->delta = delta;
+ r->read_domains = read_domains;
+ r->write_domain = write_domain;
+
+ if (bufmgr_fake->debug) {
+ /* Check that a conflicting relocation hasn't already been emitted. */
+ for (i = 0; i < reloc_fake->nr_relocs - 1; i++) {
+ struct fake_buffer_reloc *r2 = &reloc_fake->relocs[i];
+
+ assert(r->offset != r2->offset);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Incorporates the validation flags associated with each relocation into
+ * the combined validation flags for the buffer on this batchbuffer submission.
+ */
+static void
+dri_fake_calculate_domains(dri_bo *bo)
+{
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+ int i;
+
+ for (i = 0; i < bo_fake->nr_relocs; i++) {
+ struct fake_buffer_reloc *r = &bo_fake->relocs[i];
+ dri_bo_fake *target_fake = (dri_bo_fake *)r->target_buf;
+
+ /* Do the same for the tree of buffers we depend on */
+ dri_fake_calculate_domains(r->target_buf);
+
+ target_fake->read_domains |= r->read_domains;
+ if (target_fake->write_domain != 0)
+ target_fake->write_domain = r->write_domain;
+ }
+}
+
+
+static int
+dri_fake_reloc_and_validate_buffer(dri_bo *bo)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+ int i, ret;
+
+ assert(bo_fake->map_count == 0);
+
+ for (i = 0; i < bo_fake->nr_relocs; i++) {
+ struct fake_buffer_reloc *r = &bo_fake->relocs[i];
+ dri_bo_fake *target_fake = (dri_bo_fake *)r->target_buf;
+ uint32_t reloc_data;
+
+ /* Validate the target buffer if that hasn't been done. */
+ if (!target_fake->validated) {
+ ret = dri_fake_reloc_and_validate_buffer(r->target_buf);
+ if (ret != 0) {
+ if (bo->virtual != NULL)
+ dri_bo_unmap(bo);
+ return ret;
+ }
+ }
+
+ /* Calculate the value of the relocation entry. */
+ if (r->target_buf->offset != r->last_target_offset) {
+ reloc_data = r->target_buf->offset + r->delta;
+
+ if (bo->virtual == NULL)
+ dri_bo_map(bo, 1);
+
+ *(uint32_t *)((uint8_t *)bo->virtual + r->offset) = reloc_data;
+
+ r->last_target_offset = r->target_buf->offset;
+ }
+ }
+
+ if (bo->virtual != NULL)
+ dri_bo_unmap(bo);
+
+ if (bo_fake->write_domain != 0) {
+ if (!(bo_fake->flags & (BM_NO_BACKING_STORE|BM_PINNED))) {
+ if (bo_fake->backing_store == 0)
+ alloc_backing_store(bo);
+
+ bo_fake->card_dirty = 1;
+ }
+ bufmgr_fake->performed_rendering = 1;
+ }
+
+ return dri_fake_bo_validate(bo);
+}
+
+static void *
+dri_fake_process_relocs(dri_bo *batch_buf)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)batch_buf->bufmgr;
+ dri_bo_fake *batch_fake = (dri_bo_fake *)batch_buf;
+ int ret;
+ int retry_count = 0;
+
+ bufmgr_fake->performed_rendering = 0;
+
+ dri_fake_calculate_domains(batch_buf);
+
+ batch_fake->read_domains = I915_GEM_DOMAIN_COMMAND;
+
+ /* we've ran out of RAM so blow the whole lot away and retry */
+ restart:
+ ret = dri_fake_reloc_and_validate_buffer(batch_buf);
+ if (bufmgr_fake->fail == 1) {
+ if (retry_count == 0) {
+ retry_count++;
+ dri_fake_kick_all(bufmgr_fake);
+ bufmgr_fake->fail = 0;
+ goto restart;
+ } else /* dump out the memory here */
+ mmDumpMemInfo(bufmgr_fake->heap);
+ }
+
+ assert(ret == 0);
+
+ bufmgr_fake->current_total_size = 0;
+ return NULL;
+}
+
+static void
+dri_bo_fake_post_submit(dri_bo *bo)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+ int i;
+
+ for (i = 0; i < bo_fake->nr_relocs; i++) {
+ struct fake_buffer_reloc *r = &bo_fake->relocs[i];
+ dri_bo_fake *target_fake = (dri_bo_fake *)r->target_buf;
+
+ if (target_fake->validated)
+ dri_bo_fake_post_submit(r->target_buf);
+
+ DBG("%s@0x%08x + 0x%08x -> %s@0x%08x + 0x%08x\n",
+ bo_fake->name, (uint32_t)bo->offset, r->offset,
+ target_fake->name, (uint32_t)r->target_buf->offset, r->delta);
+ }
+
+ assert(bo_fake->map_count == 0);
+ bo_fake->validated = 0;
+ bo_fake->read_domains = 0;
+ bo_fake->write_domain = 0;
+}
+
+
+static void
+dri_fake_post_submit(dri_bo *batch_buf)
+{
+ dri_fake_fence_validated(batch_buf->bufmgr);
+
+ dri_bo_fake_post_submit(batch_buf);
+}
+
+static int
+dri_fake_check_aperture_space(dri_bo *bo)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+ unsigned int sz;
+
+ sz = (bo->size + bo_fake->alignment - 1) & ~(bo_fake->alignment - 1);
+
+ if (bo_fake->size_accounted || bo_fake->is_static)
+ return 0;
+
+ if (bufmgr_fake->current_total_size + sz > bufmgr_fake->size) {
+ DBG("check_space: %s bo %d %d overflowed bufmgr size %d\n", bo_fake->name, bo_fake->id, sz, bufmgr_fake->size);
+ return -1;
+ }
+
+ bufmgr_fake->current_total_size += sz;
+ bo_fake->size_accounted = 1;
+ DBG("drm_check_space: buf %d, %s %d %d\n", bo_fake->id, bo_fake->name, bo->size, bufmgr_fake->current_total_size);
+ return 0;
+}
+
+/**
+ * Evicts all buffers, waiting for fences to pass and copying contents out
+ * as necessary.
+ *
+ * Used by the X Server on LeaveVT, when the card memory is no longer our
+ * own.
+ */
+void
+intel_bufmgr_fake_evict_all(dri_bufmgr *bufmgr)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
+ struct block *block, *tmp;
+
+ bufmgr_fake->need_fence = 1;
+ bufmgr_fake->fail = 0;
+
+ /* Wait for hardware idle. We don't know where acceleration has been
+ * happening, so we'll need to wait anyway before letting anything get
+ * put on the card again.
+ */
+ dri_bufmgr_fake_wait_idle(bufmgr_fake);
+
+ /* Check that we hadn't released the lock without having fenced the last
+ * set of buffers.
+ */
+ assert(DRMLISTEMPTY(&bufmgr_fake->fenced));
+ assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
+
+ DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
+ /* Releases the memory, and memcpys dirty contents out if necessary. */
+ free_block(bufmgr_fake, block);
+ }
+}
+
+dri_bufmgr *
+intel_bufmgr_fake_init(unsigned long low_offset, void *low_virtual,
+ unsigned long size,
+ unsigned int (*fence_emit)(void *private),
+ int (*fence_wait)(void *private, unsigned int cookie),
+ void *driver_priv)
+{
+ dri_bufmgr_fake *bufmgr_fake;
+
+ bufmgr_fake = calloc(1, sizeof(*bufmgr_fake));
+
+ /* Initialize allocator */
+ DRMINITLISTHEAD(&bufmgr_fake->fenced);
+ DRMINITLISTHEAD(&bufmgr_fake->on_hardware);
+ DRMINITLISTHEAD(&bufmgr_fake->lru);
+
+ bufmgr_fake->low_offset = low_offset;
+ bufmgr_fake->virtual = low_virtual;
+ bufmgr_fake->size = size;
+ bufmgr_fake->heap = mmInit(low_offset, size);
+
+ /* Hook in methods */
+ bufmgr_fake->bufmgr.bo_alloc = dri_fake_bo_alloc;
+ bufmgr_fake->bufmgr.bo_reference = dri_fake_bo_reference;
+ bufmgr_fake->bufmgr.bo_unreference = dri_fake_bo_unreference;
+ bufmgr_fake->bufmgr.bo_map = dri_fake_bo_map;
+ bufmgr_fake->bufmgr.bo_unmap = dri_fake_bo_unmap;
+ bufmgr_fake->bufmgr.bo_wait_rendering = dri_fake_bo_wait_rendering;
+ bufmgr_fake->bufmgr.destroy = dri_fake_destroy;
+ bufmgr_fake->bufmgr.process_relocs = dri_fake_process_relocs;
+ bufmgr_fake->bufmgr.post_submit = dri_fake_post_submit;
+ bufmgr_fake->bufmgr.check_aperture_space = dri_fake_check_aperture_space;
+ bufmgr_fake->bufmgr.debug = 0;
+ bufmgr_fake->intel_bufmgr.emit_reloc = dri_fake_emit_reloc;
+
+ bufmgr_fake->fence_emit = fence_emit;
+ bufmgr_fake->fence_wait = fence_wait;
+ bufmgr_fake->driver_priv = driver_priv;
+
+ return &bufmgr_fake->bufmgr;
+}
+
diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c
new file mode 100644
index 00000000..cdc2a7ac
--- /dev/null
+++ b/libdrm/intel/intel_bufmgr_gem.c
@@ -0,0 +1,853 @@
+/**************************************************************************
+ *
+ * Copyright © 2007 Red Hat Inc.
+ * Copyright © 2007 Intel Corporation
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ */
+
+#include <xf86drm.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <assert.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+
+#include "errno.h"
+#include "dri_bufmgr.h"
+#include "intel_bufmgr.h"
+#include "string.h"
+
+#include "i915_drm.h"
+
+#define DBG(...) do { \
+ if (bufmgr_gem->bufmgr.debug) \
+ fprintf(stderr, __VA_ARGS__); \
+} while (0)
+
+typedef struct _dri_bo_gem dri_bo_gem;
+
+struct dri_gem_bo_bucket {
+ dri_bo_gem *head, **tail;
+ /**
+ * Limit on the number of entries in this bucket.
+ *
+ * 0 means that this caching at this bucket size is disabled.
+ * -1 means that there is no limit to caching at this size.
+ */
+ int max_entries;
+ int num_entries;
+};
+
+/* Arbitrarily chosen, 16 means that the maximum size we'll cache for reuse
+ * is 1 << 16 pages, or 256MB.
+ */
+#define INTEL_GEM_BO_BUCKETS 16
+typedef struct _dri_bufmgr_gem {
+ dri_bufmgr bufmgr;
+
+ struct intel_bufmgr intel_bufmgr;
+
+ int fd;
+
+ int max_relocs;
+
+ struct drm_i915_gem_exec_object *exec_objects;
+ dri_bo **exec_bos;
+ int exec_size;
+ int exec_count;
+
+ /** Array of lists of cached gem objects of power-of-two sizes */
+ struct dri_gem_bo_bucket cache_bucket[INTEL_GEM_BO_BUCKETS];
+
+ struct drm_i915_gem_execbuffer exec_arg;
+} dri_bufmgr_gem;
+
+struct _dri_bo_gem {
+ dri_bo bo;
+
+ int refcount;
+ /** Boolean whether the mmap ioctl has been called for this buffer yet. */
+ int mapped;
+ uint32_t gem_handle;
+ const char *name;
+
+ /**
+ * Index of the buffer within the validation list while preparing a
+ * batchbuffer execution.
+ */
+ int validate_index;
+
+ /**
+ * Boolean whether we've started swrast
+ * Set when the buffer has been mapped
+ * Cleared when the buffer is unmapped
+ */
+ int swrast;
+
+ /** Array passed to the DRM containing relocation information. */
+ struct drm_i915_gem_relocation_entry *relocs;
+ /** Array of bos corresponding to relocs[i].target_handle */
+ dri_bo **reloc_target_bo;
+ /** Number of entries in relocs */
+ int reloc_count;
+ /** Mapped address for the buffer */
+ void *virtual;
+
+ /** free list */
+ dri_bo_gem *next;
+};
+
+static int
+logbase2(int n)
+{
+ int i = 1;
+ int log2 = 0;
+
+ while (n > i) {
+ i *= 2;
+ log2++;
+ }
+
+ return log2;
+}
+
+static struct dri_gem_bo_bucket *
+dri_gem_bo_bucket_for_size(dri_bufmgr_gem *bufmgr_gem, unsigned long size)
+{
+ int i;
+
+ /* We only do buckets in power of two increments */
+ if ((size & (size - 1)) != 0)
+ return NULL;
+
+ /* We should only see sizes rounded to pages. */
+ assert((size % 4096) == 0);
+
+ /* We always allocate in units of pages */
+ i = ffs(size / 4096) - 1;
+ if (i >= INTEL_GEM_BO_BUCKETS)
+ return NULL;
+
+ return &bufmgr_gem->cache_bucket[i];
+}
+
+
+static void dri_gem_dump_validation_list(dri_bufmgr_gem *bufmgr_gem)
+{
+ int i, j;
+
+ for (i = 0; i < bufmgr_gem->exec_count; i++) {
+ dri_bo *bo = bufmgr_gem->exec_bos[i];
+ dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+
+ if (bo_gem->relocs == NULL) {
+ DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle, bo_gem->name);
+ continue;
+ }
+
+ for (j = 0; j < bo_gem->reloc_count; j++) {
+ dri_bo *target_bo = bo_gem->reloc_target_bo[j];
+ dri_bo_gem *target_gem = (dri_bo_gem *)target_bo;
+
+ DBG("%2d: %d (%s)@0x%08llx -> %d (%s)@0x%08lx + 0x%08x\n",
+ i,
+ bo_gem->gem_handle, bo_gem->name, bo_gem->relocs[j].offset,
+ target_gem->gem_handle, target_gem->name, target_bo->offset,
+ bo_gem->relocs[j].delta);
+ }
+ }
+}
+
+/**
+ * Adds the given buffer to the list of buffers to be validated (moved into the
+ * appropriate memory type) with the next batch submission.
+ *
+ * If a buffer is validated multiple times in a batch submission, it ends up
+ * with the intersection of the memory type flags and the union of the
+ * access flags.
+ */
+static void
+intel_add_validate_buffer(dri_bo *bo)
+{
+ dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+ dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ int index;
+
+ if (bo_gem->validate_index != -1)
+ return;
+
+ /* Extend the array of validation entries as necessary. */
+ if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
+ int new_size = bufmgr_gem->exec_size * 2;
+
+ if (new_size == 0)
+ new_size = 5;
+
+ bufmgr_gem->exec_objects =
+ realloc(bufmgr_gem->exec_objects,
+ sizeof(*bufmgr_gem->exec_objects) * new_size);
+ bufmgr_gem->exec_bos =
+ realloc(bufmgr_gem->exec_bos,
+ sizeof(*bufmgr_gem->exec_bos) * new_size);
+ bufmgr_gem->exec_size = new_size;
+ }
+
+ index = bufmgr_gem->exec_count;
+ bo_gem->validate_index = index;
+ /* Fill in array entry */
+ bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
+ bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
+ bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
+ bufmgr_gem->exec_objects[index].alignment = 0;
+ bufmgr_gem->exec_objects[index].offset = 0;
+ bufmgr_gem->exec_bos[index] = bo;
+ dri_bo_reference(bo);
+ bufmgr_gem->exec_count++;
+}
+
+
+#define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
+ sizeof(uint32_t))
+
+static int
+intel_setup_reloc_list(dri_bo *bo)
+{
+ dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+
+ bo_gem->relocs = malloc(bufmgr_gem->max_relocs *
+ sizeof(struct drm_i915_gem_relocation_entry));
+ bo_gem->reloc_target_bo = malloc(bufmgr_gem->max_relocs * sizeof(dri_bo *));
+
+ return 0;
+}
+
+static dri_bo *
+dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name,
+ unsigned long size, unsigned int alignment)
+{
+ dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
+ dri_bo_gem *bo_gem;
+ unsigned int page_size = getpagesize();
+ int ret;
+ struct dri_gem_bo_bucket *bucket;
+ int alloc_from_cache = 0;
+ unsigned long bo_size;
+
+ /* Round the allocated size up to a power of two number of pages. */
+ bo_size = 1 << logbase2(size);
+ if (bo_size < page_size)
+ bo_size = page_size;
+ bucket = dri_gem_bo_bucket_for_size(bufmgr_gem, bo_size);
+
+ /* If we don't have caching at this size, don't actually round the
+ * allocation up.
+ */
+ if (bucket == NULL || bucket->max_entries == 0) {
+ bo_size = size;
+ if (bo_size < page_size)
+ bo_size = page_size;
+ }
+
+ /* Get a buffer out of the cache if available */
+ if (bucket != NULL && bucket->num_entries > 0) {
+ struct drm_i915_gem_busy busy;
+
+ bo_gem = bucket->head;
+ busy.handle = bo_gem->gem_handle;
+
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
+ alloc_from_cache = (ret == 0 && busy.busy == 0);
+
+ if (alloc_from_cache) {
+ bucket->head = bo_gem->next;
+ if (bo_gem->next == NULL)
+ bucket->tail = &bucket->head;
+ bucket->num_entries--;
+ }
+ }
+
+ if (!alloc_from_cache) {
+ struct drm_i915_gem_create create;
+
+ bo_gem = calloc(1, sizeof(*bo_gem));
+ if (!bo_gem)
+ return NULL;
+
+ bo_gem->bo.size = bo_size;
+ memset(&create, 0, sizeof(create));
+ create.size = bo_size;
+
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
+ bo_gem->gem_handle = create.handle;
+ if (ret != 0) {
+ free(bo_gem);
+ return NULL;
+ }
+ bo_gem->bo.bufmgr = bufmgr;
+ }
+
+ bo_gem->name = name;
+ bo_gem->refcount = 1;
+ bo_gem->validate_index = -1;
+
+ DBG("bo_create: buf %d (%s) %ldb\n",
+ bo_gem->gem_handle, bo_gem->name, size);
+
+ return &bo_gem->bo;
+}
+
+/**
+ * Returns a dri_bo wrapping the given buffer object handle.
+ *
+ * This can be used when one application needs to pass a buffer object
+ * to another.
+ */
+dri_bo *
+intel_bo_gem_create_from_name(dri_bufmgr *bufmgr, const char *name,
+ unsigned int handle)
+{
+ dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
+ dri_bo_gem *bo_gem;
+ int ret;
+ struct drm_gem_open open_arg;
+
+ bo_gem = calloc(1, sizeof(*bo_gem));
+ if (!bo_gem)
+ return NULL;
+
+ memset(&open_arg, 0, sizeof(open_arg));
+ open_arg.name = handle;
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
+ if (ret != 0) {
+ fprintf(stderr, "Couldn't reference %s handle 0x%08x: %s\n",
+ name, handle, strerror(-ret));
+ free(bo_gem);
+ return NULL;
+ }
+ bo_gem->bo.size = open_arg.size;
+ bo_gem->bo.offset = 0;
+ bo_gem->bo.virtual = NULL;
+ bo_gem->bo.bufmgr = bufmgr;
+ bo_gem->name = name;
+ bo_gem->refcount = 1;
+ bo_gem->validate_index = -1;
+ bo_gem->gem_handle = open_arg.handle;
+
+ DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
+
+ return &bo_gem->bo;
+}
+
+static void
+dri_gem_bo_reference(dri_bo *bo)
+{
+ dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+
+ bo_gem->refcount++;
+}
+
+static void
+dri_gem_bo_free(dri_bo *bo)
+{
+ dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+ dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ struct drm_gem_close close;
+ int ret;
+
+ if (bo_gem->mapped)
+ munmap (bo_gem->virtual, bo_gem->bo.size);
+
+ /* Close this object */
+ close.handle = bo_gem->gem_handle;
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
+ if (ret != 0) {
+ fprintf(stderr,
+ "DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
+ bo_gem->gem_handle, bo_gem->name, strerror(-ret));
+ }
+ free(bo);
+}
+
+static void
+dri_gem_bo_unreference(dri_bo *bo)
+{
+ dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+ dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+
+ if (!bo)
+ return;
+
+ if (--bo_gem->refcount == 0) {
+ struct dri_gem_bo_bucket *bucket;
+
+ if (bo_gem->relocs != NULL) {
+ int i;
+
+ /* Unreference all the target buffers */
+ for (i = 0; i < bo_gem->reloc_count; i++)
+ dri_bo_unreference(bo_gem->reloc_target_bo[i]);
+ free(bo_gem->reloc_target_bo);
+ free(bo_gem->relocs);
+ }
+
+ DBG("bo_unreference final: %d (%s)\n",
+ bo_gem->gem_handle, bo_gem->name);
+
+ bucket = dri_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
+ /* Put the buffer into our internal cache for reuse if we can. */
+ if (bucket != NULL &&
+ (bucket->max_entries == -1 ||
+ (bucket->max_entries > 0 &&
+ bucket->num_entries < bucket->max_entries)))
+ {
+ bo_gem->name = 0;
+ bo_gem->validate_index = -1;
+ bo_gem->relocs = NULL;
+ bo_gem->reloc_target_bo = NULL;
+ bo_gem->reloc_count = 0;
+
+ bo_gem->next = NULL;
+ *bucket->tail = bo_gem;
+ bucket->tail = &bo_gem->next;
+ bucket->num_entries++;
+ } else {
+ dri_gem_bo_free(bo);
+ }
+
+ return;
+ }
+}
+
+static int
+dri_gem_bo_map(dri_bo *bo, int write_enable)
+{
+ dri_bufmgr_gem *bufmgr_gem;
+ dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ struct drm_i915_gem_set_domain set_domain;
+ int ret;
+
+ bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+
+ /* Allow recursive mapping. Mesa may recursively map buffers with
+ * nested display loops.
+ */
+ if (!bo_gem->mapped) {
+
+ assert(bo->virtual == NULL);
+
+ DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
+
+ if (bo_gem->virtual == NULL) {
+ struct drm_i915_gem_mmap mmap_arg;
+
+ memset(&mmap_arg, 0, sizeof(mmap_arg));
+ mmap_arg.handle = bo_gem->gem_handle;
+ mmap_arg.offset = 0;
+ mmap_arg.size = bo->size;
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
+ if (ret != 0) {
+ fprintf(stderr, "%s:%d: Error mapping buffer %d (%s): %s .\n",
+ __FILE__, __LINE__,
+ bo_gem->gem_handle, bo_gem->name, strerror(errno));
+ }
+ bo_gem->virtual = (void *)(uintptr_t)mmap_arg.addr_ptr;
+ }
+ bo->virtual = bo_gem->virtual;
+ bo_gem->swrast = 0;
+ bo_gem->mapped = 1;
+ DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, bo_gem->virtual);
+ }
+
+ if (!bo_gem->swrast) {
+ set_domain.handle = bo_gem->gem_handle;
+ set_domain.read_domains = I915_GEM_DOMAIN_CPU;
+ if (write_enable)
+ set_domain.write_domain = I915_GEM_DOMAIN_CPU;
+ else
+ set_domain.write_domain = 0;
+ do {
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
+ &set_domain);
+ } while (ret == -1 && errno == EINTR);
+ if (ret != 0) {
+ fprintf (stderr, "%s:%d: Error setting swrast %d: %s\n",
+ __FILE__, __LINE__, bo_gem->gem_handle, strerror (errno));
+ }
+ bo_gem->swrast = 1;
+ }
+
+ return 0;
+}
+
+static int
+dri_gem_bo_unmap(dri_bo *bo)
+{
+ dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+ dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ struct drm_i915_gem_sw_finish sw_finish;
+ int ret;
+
+ if (bo == NULL)
+ return 0;
+
+ assert(bo_gem->mapped);
+
+ if (bo_gem->swrast) {
+ sw_finish.handle = bo_gem->gem_handle;
+ do {
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SW_FINISH,
+ &sw_finish);
+ } while (ret == -1 && errno == EINTR);
+ bo_gem->swrast = 0;
+ }
+ return 0;
+}
+
+static int
+dri_gem_bo_subdata (dri_bo *bo, unsigned long offset,
+ unsigned long size, const void *data)
+{
+ dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+ dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ struct drm_i915_gem_pwrite pwrite;
+ int ret;
+
+ memset (&pwrite, 0, sizeof (pwrite));
+ pwrite.handle = bo_gem->gem_handle;
+ pwrite.offset = offset;
+ pwrite.size = size;
+ pwrite.data_ptr = (uint64_t) (uintptr_t) data;
+ do {
+ ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
+ } while (ret == -1 && errno == EINTR);
+ if (ret != 0) {
+ fprintf (stderr, "%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
+ __FILE__, __LINE__,
+ bo_gem->gem_handle, (int) offset, (int) size,
+ strerror (errno));
+ }
+ return 0;
+}
+
+static int
+dri_gem_bo_get_subdata (dri_bo *bo, unsigned long offset,
+ unsigned long size, void *data)
+{
+ dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+ dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ struct drm_i915_gem_pread pread;
+ int ret;
+
+ memset (&pread, 0, sizeof (pread));
+ pread.handle = bo_gem->gem_handle;
+ pread.offset = offset;
+ pread.size = size;
+ pread.data_ptr = (uint64_t) (uintptr_t) data;
+ do {
+ ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PREAD, &pread);
+ } while (ret == -1 && errno == EINTR);
+ if (ret != 0) {
+ fprintf (stderr, "%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
+ __FILE__, __LINE__,
+ bo_gem->gem_handle, (int) offset, (int) size,
+ strerror (errno));
+ }
+ return 0;
+}
+
+static void
+dri_gem_bo_wait_rendering(dri_bo *bo)
+{
+ dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+ dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ struct drm_i915_gem_set_domain set_domain;
+ int ret;
+
+ set_domain.handle = bo_gem->gem_handle;
+ set_domain.read_domains = I915_GEM_DOMAIN_GTT;
+ set_domain.write_domain = 0;
+ ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
+ if (ret != 0) {
+ fprintf (stderr, "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
+ __FILE__, __LINE__,
+ bo_gem->gem_handle, set_domain.read_domains, set_domain.write_domain,
+ strerror (errno));
+ }
+}
+
+static void
+dri_bufmgr_gem_destroy(dri_bufmgr *bufmgr)
+{
+ dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
+ int i;
+
+ free(bufmgr_gem->exec_objects);
+ free(bufmgr_gem->exec_bos);
+
+ /* Free any cached buffer objects we were going to reuse */
+ for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++) {
+ struct dri_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i];
+ dri_bo_gem *bo_gem;
+
+ while ((bo_gem = bucket->head) != NULL) {
+ bucket->head = bo_gem->next;
+ if (bo_gem->next == NULL)
+ bucket->tail = &bucket->head;
+ bucket->num_entries--;
+
+ dri_gem_bo_free(&bo_gem->bo);
+ }
+ }
+
+ free(bufmgr);
+}
+
+/**
+ * Adds the target buffer to the validation list and adds the relocation
+ * to the reloc_buffer's relocation list.
+ *
+ * The relocation entry at the given offset must already contain the
+ * precomputed relocation value, because the kernel will optimize out
+ * the relocation entry write when the buffer hasn't moved from the
+ * last known offset in target_bo.
+ */
+static int
+dri_gem_emit_reloc(dri_bo *bo, uint32_t read_domains, uint32_t write_domain,
+ uint32_t delta, uint32_t offset, dri_bo *target_bo)
+{
+ dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+ dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ dri_bo_gem *target_bo_gem = (dri_bo_gem *)target_bo;
+
+ /* Create a new relocation list if needed */
+ if (bo_gem->relocs == NULL)
+ intel_setup_reloc_list(bo);
+
+ /* Check overflow */
+ assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
+
+ /* Check args */
+ assert (offset <= bo->size - 4);
+ assert ((write_domain & (write_domain-1)) == 0);
+
+ bo_gem->relocs[bo_gem->reloc_count].offset = offset;
+ bo_gem->relocs[bo_gem->reloc_count].delta = delta;
+ bo_gem->relocs[bo_gem->reloc_count].target_handle =
+ target_bo_gem->gem_handle;
+ bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
+ bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
+ bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
+
+ bo_gem->reloc_target_bo[bo_gem->reloc_count] = target_bo;
+ dri_bo_reference(target_bo);
+
+ bo_gem->reloc_count++;
+ return 0;
+}
+
+/**
+ * Walk the tree of relocations rooted at BO and accumulate the list of
+ * validations to be performed and update the relocation buffers with
+ * index values into the validation list.
+ */
+static void
+dri_gem_bo_process_reloc(dri_bo *bo)
+{
+ dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ int i;
+
+ if (bo_gem->relocs == NULL)
+ return;
+
+ for (i = 0; i < bo_gem->reloc_count; i++) {
+ dri_bo *target_bo = bo_gem->reloc_target_bo[i];
+
+ /* Continue walking the tree depth-first. */
+ dri_gem_bo_process_reloc(target_bo);
+
+ /* Add the target to the validate list */
+ intel_add_validate_buffer(target_bo);
+ }
+}
+
+static void *
+dri_gem_process_reloc(dri_bo *batch_buf)
+{
+ dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *) batch_buf->bufmgr;
+
+ /* Update indices and set up the validate list. */
+ dri_gem_bo_process_reloc(batch_buf);
+
+ /* Add the batch buffer to the validation list. There are no relocations
+ * pointing to it.
+ */
+ intel_add_validate_buffer(batch_buf);
+
+ bufmgr_gem->exec_arg.buffers_ptr = (uintptr_t)bufmgr_gem->exec_objects;
+ bufmgr_gem->exec_arg.buffer_count = bufmgr_gem->exec_count;
+ bufmgr_gem->exec_arg.batch_start_offset = 0;
+ bufmgr_gem->exec_arg.batch_len = 0; /* written in intel_exec_ioctl */
+
+ return &bufmgr_gem->exec_arg;
+}
+
+static void
+intel_update_buffer_offsets (dri_bufmgr_gem *bufmgr_gem)
+{
+ int i;
+
+ for (i = 0; i < bufmgr_gem->exec_count; i++) {
+ dri_bo *bo = bufmgr_gem->exec_bos[i];
+ dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+
+ /* Update the buffer offset */
+ if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
+ DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
+ bo_gem->gem_handle, bo_gem->name, bo->offset,
+ bufmgr_gem->exec_objects[i].offset);
+ bo->offset = bufmgr_gem->exec_objects[i].offset;
+ }
+ }
+}
+
+static void
+dri_gem_post_submit(dri_bo *batch_buf)
+{
+ dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)batch_buf->bufmgr;
+ int i;
+
+ intel_update_buffer_offsets (bufmgr_gem);
+
+ if (bufmgr_gem->bufmgr.debug)
+ dri_gem_dump_validation_list(bufmgr_gem);
+
+ for (i = 0; i < bufmgr_gem->exec_count; i++) {
+ dri_bo *bo = bufmgr_gem->exec_bos[i];
+ dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+
+ /* Need to call swrast on next bo_map */
+ bo_gem->swrast = 0;
+
+ /* Disconnect the buffer from the validate list */
+ bo_gem->validate_index = -1;
+ dri_bo_unreference(bo);
+ bufmgr_gem->exec_bos[i] = NULL;
+ }
+ bufmgr_gem->exec_count = 0;
+}
+
+/**
+ * Enables unlimited caching of buffer objects for reuse.
+ *
+ * This is potentially very memory expensive, as the cache at each bucket
+ * size is only bounded by how many buffers of that size we've managed to have
+ * in flight at once.
+ */
+void
+intel_bufmgr_gem_enable_reuse(dri_bufmgr *bufmgr)
+{
+ dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
+ int i;
+
+ for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++) {
+ bufmgr_gem->cache_bucket[i].max_entries = -1;
+ }
+}
+
+/*
+ *
+ */
+static int
+dri_gem_check_aperture_space(dri_bo *bo)
+{
+ return 0;
+}
+
+/**
+ * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
+ * and manage map buffer objections.
+ *
+ * \param fd File descriptor of the opened DRM device.
+ */
+dri_bufmgr *
+intel_bufmgr_gem_init(int fd, int batch_size)
+{
+ dri_bufmgr_gem *bufmgr_gem;
+ int i;
+
+ bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
+ bufmgr_gem->fd = fd;
+
+ /* Let's go with one relocation per every 2 dwords (but round down a bit
+ * since a power of two will mean an extra page allocation for the reloc
+ * buffer).
+ *
+ * Every 4 was too few for the blender benchmark.
+ */
+ bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
+
+ bufmgr_gem->bufmgr.bo_alloc = dri_gem_bo_alloc;
+ bufmgr_gem->bufmgr.bo_reference = dri_gem_bo_reference;
+ bufmgr_gem->bufmgr.bo_unreference = dri_gem_bo_unreference;
+ bufmgr_gem->bufmgr.bo_map = dri_gem_bo_map;
+ bufmgr_gem->bufmgr.bo_unmap = dri_gem_bo_unmap;
+ bufmgr_gem->bufmgr.bo_subdata = dri_gem_bo_subdata;
+ bufmgr_gem->bufmgr.bo_get_subdata = dri_gem_bo_get_subdata;
+ bufmgr_gem->bufmgr.bo_wait_rendering = dri_gem_bo_wait_rendering;
+ bufmgr_gem->bufmgr.destroy = dri_bufmgr_gem_destroy;
+ bufmgr_gem->bufmgr.process_relocs = dri_gem_process_reloc;
+ bufmgr_gem->bufmgr.post_submit = dri_gem_post_submit;
+ bufmgr_gem->bufmgr.debug = 0;
+ bufmgr_gem->bufmgr.check_aperture_space = dri_gem_check_aperture_space;
+ bufmgr_gem->intel_bufmgr.emit_reloc = dri_gem_emit_reloc;
+ /* Initialize the linked lists for BO reuse cache. */
+ for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++)
+ bufmgr_gem->cache_bucket[i].tail = &bufmgr_gem->cache_bucket[i].head;
+
+ return &bufmgr_gem->bufmgr;
+}
+
+int
+intel_bo_emit_reloc(dri_bo *reloc_buf,
+ uint32_t read_domains, uint32_t write_domain,
+ uint32_t delta, uint32_t offset, dri_bo *target_buf)
+{
+ struct intel_bufmgr *intel_bufmgr;
+
+ intel_bufmgr = (struct intel_bufmgr *)(reloc_buf->bufmgr + 1);
+
+ return intel_bufmgr->emit_reloc(reloc_buf, read_domains, write_domain,
+ delta, offset, target_buf);
+}
diff --git a/libdrm/intel/mm.c b/libdrm/intel/mm.c
new file mode 100644
index 00000000..98146405
--- /dev/null
+++ b/libdrm/intel/mm.c
@@ -0,0 +1,281 @@
+/*
+ * GLX Hardware Device Driver common code
+ * Copyright (C) 1999 Wittawat Yamwong
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * WITTAWAT YAMWONG, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <stdlib.h>
+#include <assert.h>
+
+#include "xf86drm.h"
+#include "mm.h"
+
+void
+mmDumpMemInfo(const struct mem_block *heap)
+{
+ drmMsg("Memory heap %p:\n", (void *)heap);
+ if (heap == 0) {
+ drmMsg(" heap == 0\n");
+ } else {
+ const struct mem_block *p;
+
+ for(p = heap->next; p != heap; p = p->next) {
+ drmMsg(" Offset:%08x, Size:%08x, %c%c\n",p->ofs,p->size,
+ p->free ? 'F':'.',
+ p->reserved ? 'R':'.');
+ }
+
+ drmMsg("\nFree list:\n");
+
+ for(p = heap->next_free; p != heap; p = p->next_free) {
+ drmMsg(" FREE Offset:%08x, Size:%08x, %c%c\n",p->ofs,p->size,
+ p->free ? 'F':'.',
+ p->reserved ? 'R':'.');
+ }
+
+ }
+ drmMsg("End of memory blocks\n");
+}
+
+struct mem_block *
+mmInit(int ofs, int size)
+{
+ struct mem_block *heap, *block;
+
+ if (size <= 0)
+ return NULL;
+
+ heap = (struct mem_block *) calloc(1, sizeof(struct mem_block));
+ if (!heap)
+ return NULL;
+
+ block = (struct mem_block *) calloc(1, sizeof(struct mem_block));
+ if (!block) {
+ free(heap);
+ return NULL;
+ }
+
+ heap->next = block;
+ heap->prev = block;
+ heap->next_free = block;
+ heap->prev_free = block;
+
+ block->heap = heap;
+ block->next = heap;
+ block->prev = heap;
+ block->next_free = heap;
+ block->prev_free = heap;
+
+ block->ofs = ofs;
+ block->size = size;
+ block->free = 1;
+
+ return heap;
+}
+
+
+static struct mem_block *
+SliceBlock(struct mem_block *p,
+ int startofs, int size,
+ int reserved, int alignment)
+{
+ struct mem_block *newblock;
+
+ /* break left [p, newblock, p->next], then p = newblock */
+ if (startofs > p->ofs) {
+ newblock = (struct mem_block*) calloc(1, sizeof(struct mem_block));
+ if (!newblock)
+ return NULL;
+ newblock->ofs = startofs;
+ newblock->size = p->size - (startofs - p->ofs);
+ newblock->free = 1;
+ newblock->heap = p->heap;
+
+ newblock->next = p->next;
+ newblock->prev = p;
+ p->next->prev = newblock;
+ p->next = newblock;
+
+ newblock->next_free = p->next_free;
+ newblock->prev_free = p;
+ p->next_free->prev_free = newblock;
+ p->next_free = newblock;
+
+ p->size -= newblock->size;
+ p = newblock;
+ }
+
+ /* break right, also [p, newblock, p->next] */
+ if (size < p->size) {
+ newblock = (struct mem_block*) calloc(1, sizeof(struct mem_block));
+ if (!newblock)
+ return NULL;
+ newblock->ofs = startofs + size;
+ newblock->size = p->size - size;
+ newblock->free = 1;
+ newblock->heap = p->heap;
+
+ newblock->next = p->next;
+ newblock->prev = p;
+ p->next->prev = newblock;
+ p->next = newblock;
+
+ newblock->next_free = p->next_free;
+ newblock->prev_free = p;
+ p->next_free->prev_free = newblock;
+ p->next_free = newblock;
+
+ p->size = size;
+ }
+
+ /* p = middle block */
+ p->free = 0;
+
+ /* Remove p from the free list:
+ */
+ p->next_free->prev_free = p->prev_free;
+ p->prev_free->next_free = p->next_free;
+
+ p->next_free = 0;
+ p->prev_free = 0;
+
+ p->reserved = reserved;
+ return p;
+}
+
+
+struct mem_block *
+mmAllocMem(struct mem_block *heap, int size, int align2, int startSearch)
+{
+ struct mem_block *p;
+ const int mask = (1 << align2)-1;
+ int startofs = 0;
+ int endofs;
+
+ if (!heap || align2 < 0 || size <= 0)
+ return NULL;
+
+ for (p = heap->next_free; p != heap; p = p->next_free) {
+ assert(p->free);
+
+ startofs = (p->ofs + mask) & ~mask;
+ if ( startofs < startSearch ) {
+ startofs = startSearch;
+ }
+ endofs = startofs+size;
+ if (endofs <= (p->ofs+p->size))
+ break;
+ }
+
+ if (p == heap)
+ return NULL;
+
+ assert(p->free);
+ p = SliceBlock(p,startofs,size,0,mask+1);
+
+ return p;
+}
+
+
+struct mem_block *
+mmFindBlock(struct mem_block *heap, int start)
+{
+ struct mem_block *p;
+
+ for (p = heap->next; p != heap; p = p->next) {
+ if (p->ofs == start)
+ return p;
+ }
+
+ return NULL;
+}
+
+
+static int
+Join2Blocks(struct mem_block *p)
+{
+ /* XXX there should be some assertions here */
+
+ /* NOTE: heap->free == 0 */
+
+ if (p->free && p->next->free) {
+ struct mem_block *q = p->next;
+
+ assert(p->ofs + p->size == q->ofs);
+ p->size += q->size;
+
+ p->next = q->next;
+ q->next->prev = p;
+
+ q->next_free->prev_free = q->prev_free;
+ q->prev_free->next_free = q->next_free;
+
+ free(q);
+ return 1;
+ }
+ return 0;
+}
+
+int
+mmFreeMem(struct mem_block *b)
+{
+ if (!b)
+ return 0;
+
+ if (b->free) {
+ drmMsg("block already free\n");
+ return -1;
+ }
+ if (b->reserved) {
+ drmMsg("block is reserved\n");
+ return -1;
+ }
+
+ b->free = 1;
+ b->next_free = b->heap->next_free;
+ b->prev_free = b->heap;
+ b->next_free->prev_free = b;
+ b->prev_free->next_free = b;
+
+ Join2Blocks(b);
+ if (b->prev != b->heap)
+ Join2Blocks(b->prev);
+
+ return 0;
+}
+
+
+void
+mmDestroy(struct mem_block *heap)
+{
+ struct mem_block *p;
+
+ if (!heap)
+ return;
+
+ for (p = heap->next; p != heap; ) {
+ struct mem_block *next = p->next;
+ free(p);
+ p = next;
+ }
+
+ free(heap);
+}
diff --git a/libdrm/intel/mm.h b/libdrm/intel/mm.h
new file mode 100644
index 00000000..49e3eecc
--- /dev/null
+++ b/libdrm/intel/mm.h
@@ -0,0 +1,96 @@
+/*
+ * GLX Hardware Device Driver common code
+ * Copyright (C) 1999 Wittawat Yamwong
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * KEITH WHITWELL, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/**
+ * Memory manager code. Primarily used by device drivers to manage texture
+ * heaps, etc.
+ */
+
+
+#ifndef MM_H
+#define MM_H
+
+struct mem_block {
+ struct mem_block *next, *prev;
+ struct mem_block *next_free, *prev_free;
+ struct mem_block *heap;
+ int ofs,size;
+ unsigned int free:1;
+ unsigned int reserved:1;
+};
+
+/* Rename the variables in the drm copy of this code so that it doesn't
+ * conflict with mesa or whoever else has copied it around.
+ */
+#define mmInit drm_mmInit
+#define mmAllocMem drm_mmAllocMem
+#define mmFreeMem drm_mmFreeMem
+#define mmFindBlock drm_mmFindBlock
+#define mmDestroy drm_mmDestroy
+#define mmDumpMemInfo drm_mmDumpMemInfo
+
+/**
+ * input: total size in bytes
+ * return: a heap pointer if OK, NULL if error
+ */
+extern struct mem_block *mmInit(int ofs, int size);
+
+/**
+ * Allocate 'size' bytes with 2^align2 bytes alignment,
+ * restrict the search to free memory after 'startSearch'
+ * depth and back buffers should be in different 4mb banks
+ * to get better page hits if possible
+ * input: size = size of block
+ * align2 = 2^align2 bytes alignment
+ * startSearch = linear offset from start of heap to begin search
+ * return: pointer to the allocated block, 0 if error
+ */
+extern struct mem_block *mmAllocMem(struct mem_block *heap, int size,
+ int align2, int startSearch);
+
+/**
+ * Free block starts at offset
+ * input: pointer to a block
+ * return: 0 if OK, -1 if error
+ */
+extern int mmFreeMem(struct mem_block *b);
+
+/**
+ * Free block starts at offset
+ * input: pointer to a heap, start offset
+ * return: pointer to a block
+ */
+extern struct mem_block *mmFindBlock(struct mem_block *heap, int start);
+
+/**
+ * destroy MM
+ */
+extern void mmDestroy(struct mem_block *mmInit);
+
+/**
+ * For debuging purpose.
+ */
+extern void mmDumpMemInfo(const struct mem_block *mmInit);
+
+#endif
diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c
index 28f481ff..edb0c90b 100644
--- a/libdrm/xf86drm.c
+++ b/libdrm/xf86drm.c
@@ -110,7 +110,7 @@ static int drmDebugPrint(const char *format, va_list ap)
static int (*drm_debug_print)(const char *format, va_list ap) = drmDebugPrint;
-static void
+void
drmMsg(const char *format, ...)
{
va_list ap;
@@ -171,6 +171,19 @@ static char *drmStrdup(const char *s)
return retval;
}
+/**
+ * Call ioctl, restarting if it is interupted
+ */
+static int
+drmIoctl(int fd, int request, void *arg)
+{
+ int ret;
+
+ do {
+ ret = ioctl(fd, request, arg);
+ } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
+ return ret;
+}
static unsigned long drmGetKeyFromFd(int fd)
{
@@ -668,7 +681,7 @@ drmVersionPtr drmGetVersion(int fd)
version->desc_len = 0;
version->desc = NULL;
- if (ioctl(fd, DRM_IOCTL_VERSION, version)) {
+ if (drmIoctl(fd, DRM_IOCTL_VERSION, version)) {
drmFreeKernelVersion(version);
return NULL;
}
@@ -680,7 +693,7 @@ drmVersionPtr drmGetVersion(int fd)
if (version->desc_len)
version->desc = drmMalloc(version->desc_len + 1);
- if (ioctl(fd, DRM_IOCTL_VERSION, version)) {
+ if (drmIoctl(fd, DRM_IOCTL_VERSION, version)) {
drmMsg("DRM_IOCTL_VERSION: %s\n", strerror(errno));
drmFreeKernelVersion(version);
return NULL;
@@ -766,10 +779,10 @@ char *drmGetBusid(int fd)
u.unique_len = 0;
u.unique = NULL;
- if (ioctl(fd, DRM_IOCTL_GET_UNIQUE, &u))
+ if (drmIoctl(fd, DRM_IOCTL_GET_UNIQUE, &u))
return NULL;
u.unique = drmMalloc(u.unique_len + 1);
- if (ioctl(fd, DRM_IOCTL_GET_UNIQUE, &u))
+ if (drmIoctl(fd, DRM_IOCTL_GET_UNIQUE, &u))
return NULL;
u.unique[u.unique_len] = '\0';
@@ -796,7 +809,7 @@ int drmSetBusid(int fd, const char *busid)
u.unique = (char *)busid;
u.unique_len = strlen(busid);
- if (ioctl(fd, DRM_IOCTL_SET_UNIQUE, &u)) {
+ if (drmIoctl(fd, DRM_IOCTL_SET_UNIQUE, &u)) {
return -errno;
}
return 0;
@@ -807,7 +820,7 @@ int drmGetMagic(int fd, drm_magic_t * magic)
drm_auth_t auth;
*magic = 0;
- if (ioctl(fd, DRM_IOCTL_GET_MAGIC, &auth))
+ if (drmIoctl(fd, DRM_IOCTL_GET_MAGIC, &auth))
return -errno;
*magic = auth.magic;
return 0;
@@ -818,7 +831,7 @@ int drmAuthMagic(int fd, drm_magic_t magic)
drm_auth_t auth;
auth.magic = magic;
- if (ioctl(fd, DRM_IOCTL_AUTH_MAGIC, &auth))
+ if (drmIoctl(fd, DRM_IOCTL_AUTH_MAGIC, &auth))
return -errno;
return 0;
}
@@ -883,7 +896,7 @@ int drmAddMap(int fd, drm_handle_t offset, drmSize size, drmMapType type,
map.handle = 0;
map.type = type;
map.flags = flags;
- if (ioctl(fd, DRM_IOCTL_ADD_MAP, &map))
+ if (drmIoctl(fd, DRM_IOCTL_ADD_MAP, &map))
return -errno;
if (handle)
*handle = (drm_handle_t)map.handle;
@@ -896,7 +909,7 @@ int drmRmMap(int fd, drm_handle_t handle)
map.handle = (void *)handle;
- if(ioctl(fd, DRM_IOCTL_RM_MAP, &map))
+ if(drmIoctl(fd, DRM_IOCTL_RM_MAP, &map))
return -errno;
return 0;
}
@@ -929,7 +942,7 @@ int drmAddBufs(int fd, int count, int size, drmBufDescFlags flags,
request.flags = flags;
request.agp_start = agp_offset;
- if (ioctl(fd, DRM_IOCTL_ADD_BUFS, &request))
+ if (drmIoctl(fd, DRM_IOCTL_ADD_BUFS, &request))
return -errno;
return request.count;
}
@@ -942,7 +955,7 @@ int drmMarkBufs(int fd, double low, double high)
info.count = 0;
info.list = NULL;
- if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info))
+ if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info))
return -EINVAL;
if (!info.count)
@@ -951,7 +964,7 @@ int drmMarkBufs(int fd, double low, double high)
if (!(info.list = drmMalloc(info.count * sizeof(*info.list))))
return -ENOMEM;
- if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info)) {
+ if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info)) {
int retval = -errno;
drmFree(info.list);
return retval;
@@ -960,7 +973,7 @@ int drmMarkBufs(int fd, double low, double high)
for (i = 0; i < info.count; i++) {
info.list[i].low_mark = low * info.list[i].count;
info.list[i].high_mark = high * info.list[i].count;
- if (ioctl(fd, DRM_IOCTL_MARK_BUFS, &info.list[i])) {
+ if (drmIoctl(fd, DRM_IOCTL_MARK_BUFS, &info.list[i])) {
int retval = -errno;
drmFree(info.list);
return retval;
@@ -992,7 +1005,7 @@ int drmFreeBufs(int fd, int count, int *list)
request.count = count;
request.list = list;
- if (ioctl(fd, DRM_IOCTL_FREE_BUFS, &request))
+ if (drmIoctl(fd, DRM_IOCTL_FREE_BUFS, &request))
return -errno;
return 0;
}
@@ -1081,14 +1094,14 @@ drmBufInfoPtr drmGetBufInfo(int fd)
info.count = 0;
info.list = NULL;
- if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info))
+ if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info))
return NULL;
if (info.count) {
if (!(info.list = drmMalloc(info.count * sizeof(*info.list))))
return NULL;
- if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info)) {
+ if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info)) {
drmFree(info.list);
return NULL;
}
@@ -1132,7 +1145,7 @@ drmBufMapPtr drmMapBufs(int fd)
bufs.count = 0;
bufs.list = NULL;
bufs.virtual = NULL;
- if (ioctl(fd, DRM_IOCTL_MAP_BUFS, &bufs))
+ if (drmIoctl(fd, DRM_IOCTL_MAP_BUFS, &bufs))
return NULL;
if (!bufs.count)
@@ -1141,7 +1154,7 @@ drmBufMapPtr drmMapBufs(int fd)
if (!(bufs.list = drmMalloc(bufs.count * sizeof(*bufs.list))))
return NULL;
- if (ioctl(fd, DRM_IOCTL_MAP_BUFS, &bufs)) {
+ if (drmIoctl(fd, DRM_IOCTL_MAP_BUFS, &bufs)) {
drmFree(bufs.list);
return NULL;
}
@@ -1256,7 +1269,7 @@ int drmGetLock(int fd, drm_context_t context, drmLockFlags flags)
if (flags & DRM_HALT_ALL_QUEUES) lock.flags |= _DRM_HALT_ALL_QUEUES;
if (flags & DRM_HALT_CUR_QUEUES) lock.flags |= _DRM_HALT_CUR_QUEUES;
- while (ioctl(fd, DRM_IOCTL_LOCK, &lock))
+ while (drmIoctl(fd, DRM_IOCTL_LOCK, &lock))
;
return 0;
}
@@ -1279,7 +1292,7 @@ int drmUnlock(int fd, drm_context_t context)
lock.context = context;
lock.flags = 0;
- return ioctl(fd, DRM_IOCTL_UNLOCK, &lock);
+ return drmIoctl(fd, DRM_IOCTL_UNLOCK, &lock);
}
drm_context_t *drmGetReservedContextList(int fd, int *count)
@@ -1291,7 +1304,7 @@ drm_context_t *drmGetReservedContextList(int fd, int *count)
res.count = 0;
res.contexts = NULL;
- if (ioctl(fd, DRM_IOCTL_RES_CTX, &res))
+ if (drmIoctl(fd, DRM_IOCTL_RES_CTX, &res))
return NULL;
if (!res.count)
@@ -1305,7 +1318,7 @@ drm_context_t *drmGetReservedContextList(int fd, int *count)
}
res.contexts = list;
- if (ioctl(fd, DRM_IOCTL_RES_CTX, &res))
+ if (drmIoctl(fd, DRM_IOCTL_RES_CTX, &res))
return NULL;
for (i = 0; i < res.count; i++)
@@ -1344,7 +1357,7 @@ int drmCreateContext(int fd, drm_context_t *handle)
drm_ctx_t ctx;
ctx.flags = 0; /* Modified with functions below */
- if (ioctl(fd, DRM_IOCTL_ADD_CTX, &ctx))
+ if (drmIoctl(fd, DRM_IOCTL_ADD_CTX, &ctx))
return -errno;
*handle = ctx.handle;
return 0;
@@ -1355,7 +1368,7 @@ int drmSwitchToContext(int fd, drm_context_t context)
drm_ctx_t ctx;
ctx.handle = context;
- if (ioctl(fd, DRM_IOCTL_SWITCH_CTX, &ctx))
+ if (drmIoctl(fd, DRM_IOCTL_SWITCH_CTX, &ctx))
return -errno;
return 0;
}
@@ -1376,7 +1389,7 @@ int drmSetContextFlags(int fd, drm_context_t context, drm_context_tFlags flags)
ctx.flags |= _DRM_CONTEXT_PRESERVED;
if (flags & DRM_CONTEXT_2DONLY)
ctx.flags |= _DRM_CONTEXT_2DONLY;
- if (ioctl(fd, DRM_IOCTL_MOD_CTX, &ctx))
+ if (drmIoctl(fd, DRM_IOCTL_MOD_CTX, &ctx))
return -errno;
return 0;
}
@@ -1387,7 +1400,7 @@ int drmGetContextFlags(int fd, drm_context_t context,
drm_ctx_t ctx;
ctx.handle = context;
- if (ioctl(fd, DRM_IOCTL_GET_CTX, &ctx))
+ if (drmIoctl(fd, DRM_IOCTL_GET_CTX, &ctx))
return -errno;
*flags = 0;
if (ctx.flags & _DRM_CONTEXT_PRESERVED)
@@ -1418,7 +1431,7 @@ int drmDestroyContext(int fd, drm_context_t handle)
{
drm_ctx_t ctx;
ctx.handle = handle;
- if (ioctl(fd, DRM_IOCTL_RM_CTX, &ctx))
+ if (drmIoctl(fd, DRM_IOCTL_RM_CTX, &ctx))
return -errno;
return 0;
}
@@ -1426,7 +1439,7 @@ int drmDestroyContext(int fd, drm_context_t handle)
int drmCreateDrawable(int fd, drm_drawable_t *handle)
{
drm_draw_t draw;
- if (ioctl(fd, DRM_IOCTL_ADD_DRAW, &draw))
+ if (drmIoctl(fd, DRM_IOCTL_ADD_DRAW, &draw))
return -errno;
*handle = draw.handle;
return 0;
@@ -1436,7 +1449,7 @@ int drmDestroyDrawable(int fd, drm_drawable_t handle)
{
drm_draw_t draw;
draw.handle = handle;
- if (ioctl(fd, DRM_IOCTL_RM_DRAW, &draw))
+ if (drmIoctl(fd, DRM_IOCTL_RM_DRAW, &draw))
return -errno;
return 0;
}
@@ -1452,7 +1465,7 @@ int drmUpdateDrawableInfo(int fd, drm_drawable_t handle,
update.num = num;
update.data = (unsigned long long)(unsigned long)data;
- if (ioctl(fd, DRM_IOCTL_UPDATE_DRAW, &update))
+ if (drmIoctl(fd, DRM_IOCTL_UPDATE_DRAW, &update))
return -errno;
return 0;
@@ -1472,7 +1485,7 @@ int drmUpdateDrawableInfo(int fd, drm_drawable_t handle,
*/
int drmAgpAcquire(int fd)
{
- if (ioctl(fd, DRM_IOCTL_AGP_ACQUIRE, NULL))
+ if (drmIoctl(fd, DRM_IOCTL_AGP_ACQUIRE, NULL))
return -errno;
return 0;
}
@@ -1490,7 +1503,7 @@ int drmAgpAcquire(int fd)
*/
int drmAgpRelease(int fd)
{
- if (ioctl(fd, DRM_IOCTL_AGP_RELEASE, NULL))
+ if (drmIoctl(fd, DRM_IOCTL_AGP_RELEASE, NULL))
return -errno;
return 0;
}
@@ -1513,7 +1526,7 @@ int drmAgpEnable(int fd, unsigned long mode)
drm_agp_mode_t m;
m.mode = mode;
- if (ioctl(fd, DRM_IOCTL_AGP_ENABLE, &m))
+ if (drmIoctl(fd, DRM_IOCTL_AGP_ENABLE, &m))
return -errno;
return 0;
}
@@ -1544,7 +1557,7 @@ int drmAgpAlloc(int fd, unsigned long size, unsigned long type,
b.size = size;
b.handle = 0;
b.type = type;
- if (ioctl(fd, DRM_IOCTL_AGP_ALLOC, &b))
+ if (drmIoctl(fd, DRM_IOCTL_AGP_ALLOC, &b))
return -errno;
if (address != 0UL)
*address = b.physical;
@@ -1571,7 +1584,7 @@ int drmAgpFree(int fd, drm_handle_t handle)
b.size = 0;
b.handle = handle;
- if (ioctl(fd, DRM_IOCTL_AGP_FREE, &b))
+ if (drmIoctl(fd, DRM_IOCTL_AGP_FREE, &b))
return -errno;
return 0;
}
@@ -1596,7 +1609,7 @@ int drmAgpBind(int fd, drm_handle_t handle, unsigned long offset)
b.handle = handle;
b.offset = offset;
- if (ioctl(fd, DRM_IOCTL_AGP_BIND, &b))
+ if (drmIoctl(fd, DRM_IOCTL_AGP_BIND, &b))
return -errno;
return 0;
}
@@ -1620,7 +1633,7 @@ int drmAgpUnbind(int fd, drm_handle_t handle)
b.handle = handle;
b.offset = 0;
- if (ioctl(fd, DRM_IOCTL_AGP_UNBIND, &b))
+ if (drmIoctl(fd, DRM_IOCTL_AGP_UNBIND, &b))
return -errno;
return 0;
}
@@ -1641,7 +1654,7 @@ int drmAgpVersionMajor(int fd)
{
drm_agp_info_t i;
- if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
+ if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
return -errno;
return i.agp_version_major;
}
@@ -1662,7 +1675,7 @@ int drmAgpVersionMinor(int fd)
{
drm_agp_info_t i;
- if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
+ if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
return -errno;
return i.agp_version_minor;
}
@@ -1683,7 +1696,7 @@ unsigned long drmAgpGetMode(int fd)
{
drm_agp_info_t i;
- if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
+ if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
return 0;
return i.mode;
}
@@ -1704,7 +1717,7 @@ unsigned long drmAgpBase(int fd)
{
drm_agp_info_t i;
- if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
+ if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
return 0;
return i.aperture_base;
}
@@ -1725,7 +1738,7 @@ unsigned long drmAgpSize(int fd)
{
drm_agp_info_t i;
- if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
+ if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
return 0;
return i.aperture_size;
}
@@ -1746,7 +1759,7 @@ unsigned long drmAgpMemoryUsed(int fd)
{
drm_agp_info_t i;
- if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
+ if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
return 0;
return i.memory_used;
}
@@ -1767,7 +1780,7 @@ unsigned long drmAgpMemoryAvail(int fd)
{
drm_agp_info_t i;
- if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
+ if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
return 0;
return i.memory_allowed;
}
@@ -1788,7 +1801,7 @@ unsigned int drmAgpVendorId(int fd)
{
drm_agp_info_t i;
- if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
+ if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
return 0;
return i.id_vendor;
}
@@ -1809,7 +1822,7 @@ unsigned int drmAgpDeviceId(int fd)
{
drm_agp_info_t i;
- if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i))
+ if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))
return 0;
return i.id_device;
}
@@ -1821,7 +1834,7 @@ int drmScatterGatherAlloc(int fd, unsigned long size, drm_handle_t *handle)
*handle = 0;
sg.size = size;
sg.handle = 0;
- if (ioctl(fd, DRM_IOCTL_SG_ALLOC, &sg))
+ if (drmIoctl(fd, DRM_IOCTL_SG_ALLOC, &sg))
return -errno;
*handle = sg.handle;
return 0;
@@ -1833,7 +1846,7 @@ int drmScatterGatherFree(int fd, drm_handle_t handle)
sg.size = 0;
sg.handle = handle;
- if (ioctl(fd, DRM_IOCTL_SG_FREE, &sg))
+ if (drmIoctl(fd, DRM_IOCTL_SG_FREE, &sg))
return -errno;
return 0;
}
@@ -1854,7 +1867,7 @@ int drmWaitVBlank(int fd, drmVBlankPtr vbl)
int ret;
do {
- ret = ioctl(fd, DRM_IOCTL_WAIT_VBLANK, vbl);
+ ret = drmIoctl(fd, DRM_IOCTL_WAIT_VBLANK, vbl);
vbl->request.type &= ~DRM_VBLANK_RELATIVE;
} while (ret && errno == EINTR);
@@ -1904,7 +1917,7 @@ int drmCtlInstHandler(int fd, int irq)
ctl.func = DRM_INST_HANDLER;
ctl.irq = irq;
- if (ioctl(fd, DRM_IOCTL_CONTROL, &ctl))
+ if (drmIoctl(fd, DRM_IOCTL_CONTROL, &ctl))
return -errno;
return 0;
}
@@ -1927,7 +1940,7 @@ int drmCtlUninstHandler(int fd)
ctl.func = DRM_UNINST_HANDLER;
ctl.irq = 0;
- if (ioctl(fd, DRM_IOCTL_CONTROL, &ctl))
+ if (drmIoctl(fd, DRM_IOCTL_CONTROL, &ctl))
return -errno;
return 0;
}
@@ -1944,7 +1957,7 @@ int drmFinish(int fd, int context, drmLockFlags flags)
if (flags & DRM_LOCK_FLUSH_ALL) lock.flags |= _DRM_LOCK_FLUSH_ALL;
if (flags & DRM_HALT_ALL_QUEUES) lock.flags |= _DRM_HALT_ALL_QUEUES;
if (flags & DRM_HALT_CUR_QUEUES) lock.flags |= _DRM_HALT_CUR_QUEUES;
- if (ioctl(fd, DRM_IOCTL_FINISH, &lock))
+ if (drmIoctl(fd, DRM_IOCTL_FINISH, &lock))
return -errno;
return 0;
}
@@ -1970,7 +1983,7 @@ int drmGetInterruptFromBusID(int fd, int busnum, int devnum, int funcnum)
p.busnum = busnum;
p.devnum = devnum;
p.funcnum = funcnum;
- if (ioctl(fd, DRM_IOCTL_IRQ_BUSID, &p))
+ if (drmIoctl(fd, DRM_IOCTL_IRQ_BUSID, &p))
return -errno;
return p.irq;
}
@@ -2012,7 +2025,7 @@ int drmAddContextPrivateMapping(int fd, drm_context_t ctx_id,
map.ctx_id = ctx_id;
map.handle = (void *)handle;
- if (ioctl(fd, DRM_IOCTL_SET_SAREA_CTX, &map))
+ if (drmIoctl(fd, DRM_IOCTL_SET_SAREA_CTX, &map))
return -errno;
return 0;
}
@@ -2024,7 +2037,7 @@ int drmGetContextPrivateMapping(int fd, drm_context_t ctx_id,
map.ctx_id = ctx_id;
- if (ioctl(fd, DRM_IOCTL_GET_SAREA_CTX, &map))
+ if (drmIoctl(fd, DRM_IOCTL_GET_SAREA_CTX, &map))
return -errno;
if (handle)
*handle = (drm_handle_t)map.handle;
@@ -2039,7 +2052,7 @@ int drmGetMap(int fd, int idx, drm_handle_t *offset, drmSize *size,
drm_map_t map;
map.offset = idx;
- if (ioctl(fd, DRM_IOCTL_GET_MAP, &map))
+ if (drmIoctl(fd, DRM_IOCTL_GET_MAP, &map))
return -errno;
*offset = map.offset;
*size = map.size;
@@ -2056,7 +2069,7 @@ int drmGetClient(int fd, int idx, int *auth, int *pid, int *uid,
drm_client_t client;
client.idx = idx;
- if (ioctl(fd, DRM_IOCTL_GET_CLIENT, &client))
+ if (drmIoctl(fd, DRM_IOCTL_GET_CLIENT, &client))
return -errno;
*auth = client.auth;
*pid = client.pid;
@@ -2071,7 +2084,7 @@ int drmGetStats(int fd, drmStatsT *stats)
drm_stats_t s;
int i;
- if (ioctl(fd, DRM_IOCTL_GET_STATS, &s))
+ if (drmIoctl(fd, DRM_IOCTL_GET_STATS, &s))
return -errno;
stats->count = 0;
@@ -2213,7 +2226,7 @@ int drmSetInterfaceVersion(int fd, drmSetVersion *version)
sv.drm_dd_major = version->drm_dd_major;
sv.drm_dd_minor = version->drm_dd_minor;
- if (ioctl(fd, DRM_IOCTL_SET_VERSION, &sv)) {
+ if (drmIoctl(fd, DRM_IOCTL_SET_VERSION, &sv)) {
retcode = -errno;
}
@@ -2244,7 +2257,7 @@ int drmCommandNone(int fd, unsigned long drmCommandIndex)
request = DRM_IO( DRM_COMMAND_BASE + drmCommandIndex);
- if (ioctl(fd, request, data)) {
+ if (drmIoctl(fd, request, data)) {
return -errno;
}
return 0;
@@ -2273,7 +2286,7 @@ int drmCommandRead(int fd, unsigned long drmCommandIndex, void *data,
request = DRM_IOC( DRM_IOC_READ, DRM_IOCTL_BASE,
DRM_COMMAND_BASE + drmCommandIndex, size);
- if (ioctl(fd, request, data)) {
+ if (drmIoctl(fd, request, data)) {
return -errno;
}
return 0;
@@ -2302,7 +2315,7 @@ int drmCommandWrite(int fd, unsigned long drmCommandIndex, void *data,
request = DRM_IOC( DRM_IOC_WRITE, DRM_IOCTL_BASE,
DRM_COMMAND_BASE + drmCommandIndex, size);
- if (ioctl(fd, request, data)) {
+ if (drmIoctl(fd, request, data)) {
return -errno;
}
return 0;
@@ -2331,9 +2344,8 @@ int drmCommandWriteRead(int fd, unsigned long drmCommandIndex, void *data,
request = DRM_IOC( DRM_IOC_READ|DRM_IOC_WRITE, DRM_IOCTL_BASE,
DRM_COMMAND_BASE + drmCommandIndex, size);
- if (ioctl(fd, request, data)) {
+ if (drmIoctl(fd, request, data))
return -errno;
- }
return 0;
}
@@ -2355,7 +2367,7 @@ int drmFenceCreate(int fd, unsigned flags, int fence_class, unsigned type,
arg.type = type;
arg.fence_class = fence_class;
- if (ioctl(fd, DRM_IOCTL_FENCE_CREATE, &arg))
+ if (drmIoctl(fd, DRM_IOCTL_FENCE_CREATE, &arg))
return -errno;
fence->handle = arg.handle;
fence->fence_class = arg.fence_class;
@@ -2379,7 +2391,7 @@ int drmFenceBuffers(int fd, unsigned flags, uint32_t fence_class, drmFence *fenc
arg.flags = flags;
arg.fence_class = fence_class;
- if (ioctl(fd, DRM_IOCTL_FENCE_BUFFERS, &arg))
+ if (drmIoctl(fd, DRM_IOCTL_FENCE_BUFFERS, &arg))
return -errno;
fence->handle = arg.handle;
fence->fence_class = arg.fence_class;
@@ -2397,7 +2409,7 @@ int drmFenceReference(int fd, unsigned handle, drmFence *fence)
memset(&arg, 0, sizeof(arg));
arg.handle = handle;
- if (ioctl(fd, DRM_IOCTL_FENCE_REFERENCE, &arg))
+ if (drmIoctl(fd, DRM_IOCTL_FENCE_REFERENCE, &arg))
return -errno;
fence->handle = arg.handle;
fence->fence_class = arg.fence_class;
@@ -2414,7 +2426,7 @@ int drmFenceUnreference(int fd, const drmFence *fence)
memset(&arg, 0, sizeof(arg));
arg.handle = fence->handle;
- if (ioctl(fd, DRM_IOCTL_FENCE_UNREFERENCE, &arg))
+ if (drmIoctl(fd, DRM_IOCTL_FENCE_UNREFERENCE, &arg))
return -errno;
return 0;
}
@@ -2427,7 +2439,7 @@ int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type)
arg.handle = fence->handle;
arg.type = flush_type;
- if (ioctl(fd, DRM_IOCTL_FENCE_FLUSH, &arg))
+ if (drmIoctl(fd, DRM_IOCTL_FENCE_FLUSH, &arg))
return -errno;
fence->fence_class = arg.fence_class;
fence->type = arg.type;
@@ -2442,7 +2454,7 @@ int drmFenceUpdate(int fd, drmFence *fence)
memset(&arg, 0, sizeof(arg));
arg.handle = fence->handle;
- if (ioctl(fd, DRM_IOCTL_FENCE_SIGNALED, &arg))
+ if (drmIoctl(fd, DRM_IOCTL_FENCE_SIGNALED, &arg))
return -errno;
fence->fence_class = arg.fence_class;
fence->type = arg.type;
@@ -2482,7 +2494,7 @@ int drmFenceEmit(int fd, unsigned flags, drmFence *fence, unsigned emit_type)
arg.handle = fence->handle;
arg.type = emit_type;
- if (ioctl(fd, DRM_IOCTL_FENCE_EMIT, &arg))
+ if (drmIoctl(fd, DRM_IOCTL_FENCE_EMIT, &arg))
return -errno;
fence->fence_class = arg.fence_class;
fence->type = arg.type;
@@ -2520,7 +2532,7 @@ drmIoctlTimeout(int fd, unsigned long request, void *argp)
int ret;
do {
- ret = ioctl(fd, request, argp);
+ ret = drmIoctl(fd, request, argp);
if (ret != 0 && errno == EAGAIN) {
if (!haveThen) {
gettimeofday(&then, NULL);
@@ -2630,7 +2642,7 @@ int drmBOReference(int fd, unsigned handle, drmBO *buf)
memset(&arg, 0, sizeof(arg));
req->handle = handle;
- if (ioctl(fd, DRM_IOCTL_BO_REFERENCE, &arg))
+ if (drmIoctl(fd, DRM_IOCTL_BO_REFERENCE, &arg))
return -errno;
drmBOCopyReply(rep, buf);
@@ -2654,7 +2666,7 @@ int drmBOUnreference(int fd, drmBO *buf)
memset(&arg, 0, sizeof(arg));
arg.handle = buf->handle;
- if (ioctl(fd, DRM_IOCTL_BO_UNREFERENCE, &arg))
+ if (drmIoctl(fd, DRM_IOCTL_BO_UNREFERENCE, &arg))
return -errno;
buf->handle = 0;
@@ -2724,7 +2736,7 @@ int drmBOUnmap(int fd, drmBO *buf)
memset(&arg, 0, sizeof(arg));
arg.handle = buf->handle;
- if (ioctl(fd, DRM_IOCTL_BO_UNMAP, &arg)) {
+ if (drmIoctl(fd, DRM_IOCTL_BO_UNMAP, &arg)) {
return -errno;
}
buf->mapCount--;
@@ -2770,7 +2782,7 @@ int drmBOInfo(int fd, drmBO *buf)
memset(&arg, 0, sizeof(arg));
req->handle = buf->handle;
- ret = ioctl(fd, DRM_IOCTL_BO_INFO, &arg);
+ ret = drmIoctl(fd, DRM_IOCTL_BO_INFO, &arg);
if (ret)
return -errno;
@@ -2825,7 +2837,7 @@ int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize,
arg.p_size = pSize;
arg.mem_type = memType;
- if (ioctl(fd, DRM_IOCTL_MM_INIT, &arg))
+ if (drmIoctl(fd, DRM_IOCTL_MM_INIT, &arg))
return -errno;
return 0;
}
@@ -2837,7 +2849,7 @@ int drmMMTakedown(int fd, unsigned memType)
memset(&arg, 0, sizeof(arg));
arg.mem_type = memType;
- if (ioctl(fd, DRM_IOCTL_MM_TAKEDOWN, &arg))
+ if (drmIoctl(fd, DRM_IOCTL_MM_TAKEDOWN, &arg))
return -errno;
return 0;
}
@@ -2879,7 +2891,7 @@ int drmMMInfo(int fd, unsigned memType, uint64_t *size)
arg.mem_type = memType;
- if (ioctl(fd, DRM_IOCTL_MM_INFO, &arg))
+ if (drmIoctl(fd, DRM_IOCTL_MM_INFO, &arg))
return -errno;
*size = arg.p_size;
@@ -2894,7 +2906,7 @@ int drmBOVersion(int fd, unsigned int *major,
int ret;
memset(&arg, 0, sizeof(arg));
- ret = ioctl(fd, DRM_IOCTL_BO_VERSION, &arg);
+ ret = drmIoctl(fd, DRM_IOCTL_BO_VERSION, &arg);
if (ret)
return -errno;
diff --git a/libdrm/xf86drm.h b/libdrm/xf86drm.h
index 230f54ce..6ced284a 100644
--- a/libdrm/xf86drm.h
+++ b/libdrm/xf86drm.h
@@ -657,6 +657,7 @@ extern int drmSLLookupNeighbors(void *l, unsigned long key,
extern int drmOpenOnce(void *unused, const char *BusID, int *newlyopened);
extern void drmCloseOnce(int fd);
+extern void drmMsg(const char *format, ...);
#include "xf86mm.h"
diff --git a/libdrm/xf86mm.h b/libdrm/xf86mm.h
index bb573407..a31de424 100644
--- a/libdrm/xf86mm.h
+++ b/libdrm/xf86mm.h
@@ -94,6 +94,18 @@ typedef struct _drmMMListHead
#define DRMLISTENTRY(__type, __item, __field) \
((__type *)(((char *) (__item)) - offsetof(__type, __field)))
+#define DRMLISTEMPTY(__item) ((__item)->next == (__item))
+
+#define DRMLISTFOREACHSAFE(__item, __temp, __list) \
+ for ((__item) = (__list)->next, (__temp) = (__item)->next; \
+ (__item) != (__list); \
+ (__item) = (__temp), (__temp) = (__item)->next)
+
+#define DRMLISTFOREACHSAFEREVERSE(__item, __temp, __list) \
+ for ((__item) = (__list)->prev, (__temp) = (__item)->prev; \
+ (__item) != (__list); \
+ (__item) = (__temp), (__temp) = (__item)->prev)
+
typedef struct _drmFence
{
unsigned handle;
diff --git a/linux-core/Makefile b/linux-core/Makefile
index 3af6f370..55dfb77c 100644
--- a/linux-core/Makefile
+++ b/linux-core/Makefile
@@ -116,7 +116,7 @@ V := $(shell if [ -f $(BOOTVERSION_PREFIX)version.h ]; then \
ifeq ($(V),"$(RUNNING_REL)")
HEADERFROMBOOT := 1
-GETCONFIG := MAKEFILES=$(shell pwd)/.config
+GETCONFIG := MAKEFILES=$(shell /bin/pwd)/.config
HAVECONFIG := y
endif
@@ -163,7 +163,7 @@ endif
all: modules
modules: includes
- +make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`pwd` DRMSRCDIR=`pwd` modules
+ +make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`/bin/pwd` DRMSRCDIR=`/bin/pwd` modules
ifeq ($(HEADERFROMBOOT),1)
@@ -239,7 +239,7 @@ drmstat: drmstat.c
$(CC) $(PRGCFLAGS) $< -o $@ $(DRMSTATLIBS)
install:
- make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`pwd` DRMSRCDIR=`pwd` modules_install
+ make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`/bin/pwd` DRMSRCDIR=`/bin/pwd` modules_install
else
diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel
index 45a6b1f9..f338b598 100644
--- a/linux-core/Makefile.kernel
+++ b/linux-core/Makefile.kernel
@@ -12,16 +12,17 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \
drm_memory_debug.o ati_pcigart.o drm_sman.o \
- drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \
+ drm_hashtab.o drm_memrange.o drm_object.o drm_compat.o \
drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_bo_lock.o \
- drm_regman.o drm_vm_nopage_compat.o
+ drm_regman.o drm_vm_nopage_compat.o drm_gem.o
tdfx-objs := tdfx_drv.o
r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
i810-objs := i810_drv.o i810_dma.o
i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
i915_buffer.o i915_compat.o i915_execbuf.o i915_suspend.o \
- i915_opregion.o
+ i915_opregion.o \
+ i915_gem.o i915_gem_debug.o i915_gem_proc.o i915_gem_tiling.o
nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \
nouveau_sgdma.o nouveau_dma.o nouveau_bo.o nouveau_fence.o \
diff --git a/linux-core/drm-gem.txt b/linux-core/drm-gem.txt
new file mode 100644
index 00000000..5cda87f8
--- /dev/null
+++ b/linux-core/drm-gem.txt
@@ -0,0 +1,805 @@
+ The Graphics Execution Manager
+ Part of the Direct Rendering Manager
+ ==============================
+
+ Keith Packard <keithp@keithp.com>
+ Eric Anholt <eric@anholt.net>
+ 2008-5-9
+
+Contents:
+
+ 1. GEM Overview
+ 2. API overview and conventions
+ 3. Object Creation/Destruction
+ 4. Reading/writing contents
+ 5. Mapping objects to userspace
+ 6. Memory Domains
+ 7. Execution (Intel specific)
+ 8. Other misc Intel-specific functions
+
+1. Graphics Execution Manager Overview
+
+Gem is designed to manage graphics memory, control access to the graphics
+device execution context and handle the essentially NUMA environment unique
+to modern graphics hardware. Gem allows multiple applications to share
+graphics device resources without the need to constantly reload the entire
+graphics card. Data may be shared between multiple applications with gem
+ensuring that the correct memory synchronization occurs.
+
+Graphics data can consume arbitrary amounts of memory, with 3D applications
+constructing ever larger sets of textures and vertices. With graphics cards
+memory space growing larger every year, and graphics APIs growing more
+complex, we can no longer insist that each application save a complete copy
+of their graphics state so that the card can be re-initialized from user
+space at each context switch. Ensuring that graphics data remains persistent
+across context switches allows applications significant new functionality
+while also improving performance for existing APIs.
+
+Modern linux desktops include significant 3D rendering as a fundemental
+component of the desktop image construction process. 2D and 3D applications
+paint their content to offscreen storage and the central 'compositing
+manager' constructs the final screen image from those window contents. This
+means that pixel image data from these applications must move within reach
+of the compositing manager and used as source operands for screen image
+rendering operations.
+
+Gem provides simple mechanisms to manage graphics data and control execution
+flow within the linux operating system. Using many existing kernel
+subsystems, it does this with a modest amount of code.
+
+2. API Overview and Conventions
+
+All APIs here are defined in terms of ioctls appplied to the DRM file
+descriptor. To create and manipulate objects, an application must be
+'authorized' using the DRI or DRI2 protocols with the X server. To relax
+that, we will need to implement some better access control mechanisms within
+the hardware portion of the driver to prevent inappropriate
+cross-application data access.
+
+Any DRM driver which does not support GEM will return -ENODEV for all of
+these ioctls. Invalid object handles return -EINVAL. Invalid object names
+return -ENOENT. Other errors are as documented in the specific API below.
+
+To avoid the need to translate ioctl contents on mixed-size systems (with
+32-bit user space running on a 64-bit kernel), the ioctl data structures
+contain explicitly sized objects, using 64-bits for all size and pointer
+data and 32-bits for identifiers. In addition, the 64-bit objects are all
+carefully aligned on 64-bit boundaries. Because of this, all pointers in the
+ioctl data structures are passed as uint64_t values. Suitable casts will
+be necessary.
+
+One significant operation which is explicitly left out of this API is object
+locking. Applications are expected to perform locking of shared objects
+outside of the GEM api. This kind of locking is not necessary to safely
+manipulate the graphics engine, and with multiple objects interacting in
+unknown ways, per-object locking would likely introduce all kinds of
+lock-order issues. Punting this to the application seems like the only
+sensible plan. Given that DRM already offers a global lock on the hardware,
+this doesn't change the current situation.
+
+3. Object Creation and Destruction
+
+Gem provides explicit memory management primitives. System pages are
+allocated when the object is created, either as the fundemental storage for
+hardware where system memory is used by the graphics processor directly, or
+as backing store for graphics-processor resident memory.
+
+Objects are referenced from user space using handles. These are, for all
+intents and purposes, equivalent to file descriptors. We could simply use
+file descriptors were it not for the small limit (1024) of file descriptors
+available to applications, and for the fact that the X server (a rather
+significant user of this API) uses 'select' and has a limited maximum file
+descriptor for that operation. Given the ability to allocate more file
+descriptors, and given the ability to place these 'higher' in the file
+descriptor space, we'd love to simply use file descriptors.
+
+Objects may be published with a name so that other applications can access
+them. The name remains valid as long as the object exists. Right now, our
+DRI APIs use 32-bit integer names, so that's what we expose here
+
+ A. Creation
+
+ struct drm_gem_create {
+ /**
+ * Requested size for the object.
+ *
+ * The (page-aligned) allocated size for the object
+ * will be returned.
+ */
+ uint64_t size;
+ /**
+ * Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ uint32_t handle;
+ uint32_t pad;
+ };
+
+ /* usage */
+ create.size = 16384;
+ ret = ioctl (fd, DRM_IOCTL_GEM_CREATE, &create);
+ if (ret == 0)
+ return create.handle;
+
+ Note that the size is rounded up to a page boundary, and that
+ the rounded-up size is returned in 'size'. No name is assigned to
+ this object, making it local to this process.
+
+ If insufficient memory is availabe, -ENOMEM will be returned.
+
+ B. Closing
+
+ struct drm_gem_close {
+ /** Handle of the object to be closed. */
+ uint32_t handle;
+ uint32_t pad;
+ };
+
+
+ /* usage */
+ close.handle = <handle>;
+ ret = ioctl (fd, DRM_IOCTL_GEM_CLOSE, &close);
+
+ This call makes the specified handle invalid, and if no other
+ applications are using the object, any necessary graphics hardware
+ synchronization is performed and the resources used by the object
+ released.
+
+ C. Naming
+
+ struct drm_gem_flink {
+ /** Handle for the object being named */
+ uint32_t handle;
+
+ /** Returned global name */
+ uint32_t name;
+ };
+
+ /* usage */
+ flink.handle = <handle>;
+ ret = ioctl (fd, DRM_IOCTL_GEM_FLINK, &flink);
+ if (ret == 0)
+ return flink.name;
+
+ Flink creates a name for the object and returns it to the
+ application. This name can be used by other applications to gain
+ access to the same object.
+
+ D. Opening by name
+
+ struct drm_gem_open {
+ /** Name of object being opened */
+ uint32_t name;
+
+ /** Returned handle for the object */
+ uint32_t handle;
+
+ /** Returned size of the object */
+ uint64_t size;
+ };
+
+ /* usage */
+ open.name = <name>;
+ ret = ioctl (fd, DRM_IOCTL_GEM_OPEN, &open);
+ if (ret == 0) {
+ *sizep = open.size;
+ return open.handle;
+ }
+
+ Open accesses an existing object and returns a handle for it. If the
+ object doesn't exist, -ENOENT is returned. The size of the object is
+ also returned. This handle has all the same capabilities as the
+ handle used to create the object. In particular, the object is not
+ destroyed until all handles are closed.
+
+4. Basic read/write operations
+
+By default, gem objects are not mapped to the applications address space,
+getting data in and out of them is done with I/O operations instead. This
+allows the data to reside in otherwise unmapped pages, including pages in
+video memory on an attached discrete graphics card. In addition, using
+explicit I/O operations allows better control over cache contents, as
+graphics devices are generally not cache coherent with the CPU, mapping
+pages used for graphics into an application address space requires the use
+of expensive cache flushing operations. Providing direct control over
+graphics data access ensures that data are handled in the most efficient
+possible fashion.
+
+ A. Reading
+
+ struct drm_gem_pread {
+ /** Handle for the object being read. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset into the object to read from */
+ uint64_t offset;
+ /** Length of data to read */
+ uint64_t size;
+ /** Pointer to write the data into. */
+ uint64_t data_ptr; /* void * */
+ };
+
+ This copies data into the specified object at the specified
+ position. Any necessary graphics device synchronization and
+ flushing will be done automatically.
+
+ struct drm_gem_pwrite {
+ /** Handle for the object being written to. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset into the object to write to */
+ uint64_t offset;
+ /** Length of data to write */
+ uint64_t size;
+ /** Pointer to read the data from. */
+ uint64_t data_ptr; /* void * */
+ };
+
+ This copies data out of the specified object into the
+ waiting user memory. Again, device synchronization will
+ be handled by the kernel to ensure user space sees a
+ consistent view of the graphics device.
+
+5. Mapping objects to user space
+
+For most objects, reading/writing is the preferred interaction mode.
+However, when the CPU is involved in rendering to cover deficiencies in
+hardware support for particular operations, the CPU will want to directly
+access the relevant objects.
+
+Because mmap is fairly heavyweight, we allow applications to retain maps to
+objects persistently and then update how they're using the memory through a
+separate interface. Applications which fail to use this separate interface
+may exhibit unpredictable behaviour as memory consistency will not be
+preserved.
+
+ A. Mapping
+
+ struct drm_gem_mmap {
+ /** Handle for the object being mapped. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset in the object to map. */
+ uint64_t offset;
+ /**
+ * Length of data to map.
+ *
+ * The value will be page-aligned.
+ */
+ uint64_t size;
+ /** Returned pointer the data was mapped at */
+ uint64_t addr_ptr; /* void * */
+ };
+
+ /* usage */
+ mmap.handle = <handle>;
+ mmap.offset = <offset>;
+ mmap.size = <size>;
+ ret = ioctl (fd, DRM_IOCTL_GEM_MMAP, &mmap);
+ if (ret == 0)
+ return (void *) (uintptr_t) mmap.addr_ptr;
+
+
+ B. Unmapping
+
+ munmap (addr, length);
+
+ Nothing strange here, just use the normal munmap syscall.
+
+6. Memory Domains
+
+Graphics devices remain a strong bastion of non cache-coherent memory. As a
+result, accessing data through one functional unit will end up loading that
+cache with data which then needs to be manually synchronized when that data
+is used with another functional unit.
+
+Tracking where data are resident is done by identifying how functional units
+deal with caches. Each cache is labeled as a separate memory domain. Then,
+each sequence of operations is expected to load data into various read
+domains and leave data in at most one write domain. Gem tracks the read and
+write memory domains of each object and performs the necessary
+synchronization operations when objects move from one domain set to another.
+
+For example, if operation 'A' constructs an image that is immediately used
+by operation 'B', then when the read domain for 'B' is not the same as the
+write domain for 'A', then the write domain must be flushed, and the read
+domain invalidated. If these two operations are both executed in the same
+command queue, then the flush operation can go inbetween them in the same
+queue, avoiding any kind of CPU-based synchronization and leaving the GPU to
+do the work itself.
+
+6.1 Memory Domains (GPU-independent)
+
+ * DRM_GEM_DOMAIN_CPU.
+
+ Objects in this domain are using caches which are connected to the CPU.
+ Moving objects from non-CPU domains into the CPU domain can involve waiting
+ for the GPU to finish with operations using this object. Moving objects
+ from this domain to a GPU domain can involve flushing CPU caches and chipset
+ buffers.
+
+6.1 GPU-independent memory domain ioctl
+
+This ioctl is independent of the GPU in use. So far, no use other than
+synchronizing objects to the CPU domain have been found; if that turns out
+to be generally true, this ioctl may be simplified further.
+
+ A. Explicit domain control
+
+ struct drm_gem_set_domain {
+ /** Handle for the object */
+ uint32_t handle;
+
+ /** New read domains */
+ uint32_t read_domains;
+
+ /** New write domain */
+ uint32_t write_domain;
+ };
+
+ /* usage */
+ set_domain.handle = <handle>;
+ set_domain.read_domains = <read_domains>;
+ set_domain.write_domain = <write_domain>;
+ ret = ioctl (fd, DRM_IOCTL_GEM_SET_DOMAIN, &set_domain);
+
+ When the application wants to explicitly manage memory domains for
+ an object, it can use this function. Usually, this is only used
+ when the application wants to synchronize object contents between
+ the GPU and CPU-based application rendering. In that case,
+ the <read_domains> would be set to DRM_GEM_DOMAIN_CPU, and if the
+ application were going to write to the object, the <write_domain>
+ would also be set to DRM_GEM_DOMAIN_CPU. After the call, gem
+ guarantees that all previous rendering operations involving this
+ object are complete. The application is then free to access the
+ object through the address returned by the mmap call. Afterwards,
+ when the application again uses the object through the GPU, any
+ necessary CPU flushing will occur and the object will be correctly
+ synchronized with the GPU.
+
+ Note that this synchronization is not required for any accesses
+ going through the driver itself. The pread, pwrite and execbuffer
+ ioctls all perform the necessary domain management internally.
+ Explicit synchronization is only necessary when accessing the object
+ through the mmap'd address.
+
+7. Execution (Intel specific)
+
+Managing the command buffers is inherently chip-specific, so the core of gem
+doesn't have any intrinsic functions. Rather, execution is left to the
+device-specific portions of the driver.
+
+The Intel DRM_I915_GEM_EXECBUFFER ioctl takes a list of gem objects, all of
+which are mapped to the graphics device. The last object in the list is the
+command buffer.
+
+7.1. Relocations
+
+Command buffers often refer to other objects, and to allow the kernel driver
+to move objects around, a sequence of relocations is associated with each
+object. Device-specific relocation operations are used to place the
+target-object relative value into the object.
+
+The Intel driver has a single relocation type:
+
+ struct drm_i915_gem_relocation_entry {
+ /**
+ * Handle of the buffer being pointed to by this
+ * relocation entry.
+ *
+ * It's appealing to make this be an index into the
+ * mm_validate_entry list to refer to the buffer,
+ * but this allows the driver to create a relocation
+ * list for state buffers and not re-write it per
+ * exec using the buffer.
+ */
+ uint32_t target_handle;
+
+ /**
+ * Value to be added to the offset of the target
+ * buffer to make up the relocation entry.
+ */
+ uint32_t delta;
+
+ /**
+ * Offset in the buffer the relocation entry will be
+ * written into
+ */
+ uint64_t offset;
+
+ /**
+ * Offset value of the target buffer that the
+ * relocation entry was last written as.
+ *
+ * If the buffer has the same offset as last time, we
+ * can skip syncing and writing the relocation. This
+ * value is written back out by the execbuffer ioctl
+ * when the relocation is written.
+ */
+ uint64_t presumed_offset;
+
+ /**
+ * Target memory domains read by this operation.
+ */
+ uint32_t read_domains;
+
+ /*
+ * Target memory domains written by this operation.
+ *
+ * Note that only one domain may be written by the
+ * whole execbuffer operation, so that where there are
+ * conflicts, the application will get -EINVAL back.
+ */
+ uint32_t write_domain;
+ };
+
+ 'target_handle', the handle to the target object. This object must
+ be one of the objects listed in the execbuffer request or
+ bad things will happen. The kernel doesn't check for this.
+
+ 'offset' is where, in the source object, the relocation data
+ are written. Each relocation value is a 32-bit value consisting
+ of the location of the target object in the GPU memory space plus
+ the 'delta' value included in the relocation.
+
+ 'presumed_offset' is where user-space believes the target object
+ lies in GPU memory space. If this value matches where the object
+ actually is, then no relocation data are written, the kernel
+ assumes that user space has set up data in the source object
+ using this presumption. This offers a fairly important optimization
+ as writing relocation data requires mapping of the source object
+ into the kernel memory space.
+
+ 'read_domains' and 'write_domains' list the usage by the source
+ object of the target object. The kernel unions all of the domain
+ information from all relocations in the execbuffer request. No more
+ than one write_domain is allowed, otherwise an EINVAL error is
+ returned. read_domains must contain write_domain. This domain
+ information is used to synchronize buffer contents as described
+ above in the section on domains.
+
+7.1.1 Memory Domains (Intel specific)
+
+The Intel GPU has several internal caches which are not coherent and hence
+require explicit synchronization. Memory domains provide the necessary data
+to synchronize what is needed while leaving other cache contents intact.
+
+ * DRM_GEM_DOMAIN_I915_RENDER.
+ The GPU 3D and 2D rendering operations use a unified rendering cache, so
+ operations doing 3D painting and 2D blts will use this domain
+
+ * DRM_GEM_DOMAIN_I915_SAMPLER
+ Textures are loaded by the sampler through a separate cache, so
+ any texture reading will use this domain. Note that the sampler
+ and renderer use different caches, so moving an object from render target
+ to texture source will require a domain transfer.
+
+ * DRM_GEM_DOMAIN_I915_COMMAND
+ The command buffer doesn't have an explicit cache (although it does
+ read ahead quite a bit), so this domain just indicates that the object
+ needs to be flushed to the GPU.
+
+ * DRM_GEM_DOMAIN_I915_INSTRUCTION
+ All of the programs on Gen4 and later chips use an instruction cache to
+ speed program execution. It must be explicitly flushed when new programs
+ are written to memory by the CPU.
+
+ * DRM_GEM_DOMAIN_I915_VERTEX
+ Vertex data uses two different vertex caches, but they're
+ both flushed with the same instruction.
+
+7.2 Execution object list (Intel specific)
+
+ struct drm_i915_gem_exec_object {
+ /**
+ * User's handle for a buffer to be bound into the GTT
+ * for this operation.
+ */
+ uint32_t handle;
+
+ /**
+ * List of relocations to be performed on this buffer
+ */
+ uint32_t relocation_count;
+ /* struct drm_i915_gem_relocation_entry *relocs */
+ uint64_t relocs_ptr;
+
+ /**
+ * Required alignment in graphics aperture
+ */
+ uint64_t alignment;
+
+ /**
+ * Returned value of the updated offset of the object,
+ * for future presumed_offset writes.
+ */
+ uint64_t offset;
+ };
+
+ Each object involved in a particular execution operation must be
+ listed using one of these structures.
+
+ 'handle' references the object.
+
+ 'relocs_ptr' is a user-mode pointer to a array of 'relocation_count'
+ drm_i915_gem_relocation_entry structs (see above) that
+ define the relocations necessary in this buffer. Note that all
+ relocations must reference other exec_object structures in the same
+ execbuffer ioctl and that those other buffers must come earlier in
+ the exec_object array. In other words, the dependencies mapped by the
+ exec_object relocations must form a directed acyclic graph.
+
+ 'alignment' is the byte alignment necessary for this buffer. Each
+ object has specific alignment requirements, as the kernel doesn't
+ know what each object is being used for, those requirements must be
+ provided by user mode. If an object is used in two different ways,
+ it's quite possible that the alignment requirements will differ.
+
+ 'offset' is a return value, receiving the location of the object
+ during this execbuffer operation. The application should use this
+ as the presumed offset in future operations; if the object does not
+ move, then kernel need not write relocation data.
+
+7.3 Execbuffer ioctl (Intel specific)
+
+ struct drm_i915_gem_execbuffer {
+ /**
+ * List of buffers to be validated with their
+ * relocations to be performend on them.
+ *
+ * These buffers must be listed in an order such that
+ * all relocations a buffer is performing refer to
+ * buffers that have already appeared in the validate
+ * list.
+ */
+ /* struct drm_i915_gem_validate_entry *buffers */
+ uint64_t buffers_ptr;
+ uint32_t buffer_count;
+
+ /**
+ * Offset in the batchbuffer to start execution from.
+ */
+ uint32_t batch_start_offset;
+
+ /**
+ * Bytes used in batchbuffer from batch_start_offset
+ */
+ uint32_t batch_len;
+ uint32_t DR1;
+ uint32_t DR4;
+ uint32_t num_cliprects;
+ uint64_t cliprects_ptr; /* struct drm_clip_rect *cliprects */
+ };
+
+
+ 'buffers_ptr' is a user-mode pointer to an array of 'buffer_count'
+ drm_i915_gem_exec_object structures which contains the complete set
+ of objects required for this execbuffer operation. The last entry in
+ this array, the 'batch buffer', is the buffer of commands which will
+ be linked to the ring and executed.
+
+ 'batch_start_offset' is the byte offset within the batch buffer which
+ contains the first command to execute. So far, we haven't found a
+ reason to use anything other than '0' here, but the thought was that
+ some space might be allocated for additional initialization which
+ could be skipped in some cases. This must be a multiple of 4.
+
+ 'batch_len' is the length, in bytes, of the data to be executed
+ (i.e., the amount of data after batch_start_offset). This must
+ be a multiple of 4.
+
+ 'num_cliprects' and 'cliprects_ptr' reference an array of
+ drm_clip_rect structures that is num_cliprects long. The entire
+ batch buffer will be executed multiple times, once for each
+ rectangle in this list. If num_cliprects is 0, then no clipping
+ rectangle will be set.
+
+ 'DR1' and 'DR4' are portions of the 3DSTATE_DRAWING_RECTANGLE
+ command which will be queued when this operation is clipped
+ (num_cliprects != 0).
+
+ DR1 bit definition
+ 31 Fast Scissor Clip Disable (debug only).
+ Disables a hardware optimization that
+ improves performance. This should have
+ no visible effect, other than reducing
+ performance
+
+ 30 Depth Buffer Coordinate Offset Disable.
+ This disables the addition of the
+ depth buffer offset bits which are used
+ to change the location of the depth buffer
+ relative to the front buffer.
+
+ 27:26 X Dither Offset. Specifies the X pixel
+ offset to use when accessing the dither table
+
+ 25:24 Y Dither Offset. Specifies the Y pixel
+ offset to use when accessing the dither
+ table.
+
+ DR4 bit definition
+ 31:16 Drawing Rectangle Origin Y. Specifies the Y
+ origin of coordinates relative to the
+ draw buffer.
+
+ 15:0 Drawing Rectangle Origin X. Specifies the X
+ origin of coordinates relative to the
+ draw buffer.
+
+ As you can see, these two fields are necessary for correctly
+ offsetting drawing within a buffer which contains multiple surfaces.
+ Note that DR1 is only used on Gen3 and earlier hardware and that
+ newer hardware sticks the dither offset elsewhere.
+
+7.3.1 Detailed Execution Description
+
+ Execution of a single batch buffer requires several preparatory
+ steps to make the objects visible to the graphics engine and resolve
+ relocations to account for their current addresses.
+
+ A. Mapping and Relocation
+
+ Each exec_object structure in the array is examined in turn.
+
+ If the object is not already bound to the GTT, it is assigned a
+ location in the graphics address space. If no space is available in
+ the GTT, some other object will be evicted. This may require waiting
+ for previous execbuffer requests to complete before that object can
+ be unmapped. With the location assigned, the pages for the object
+ are pinned in memory using find_or_create_page and the GTT entries
+ updated to point at the relevant pages using drm_agp_bind_pages.
+
+ Then the array of relocations is traversed. Each relocation record
+ looks up the target object and, if the presumed offset does not
+ match the current offset (remember that this buffer has already been
+ assigned an address as it must have been mapped earlier), the
+ relocation value is computed using the current offset. If the
+ object is currently in use by the graphics engine, writing the data
+ out must be preceeded by a delay while the object is still busy.
+ Once it is idle, then the page containing the relocation is mapped
+ by the CPU and the updated relocation data written out.
+
+ The read_domains and write_domain entries in each relocation are
+ used to compute the new read_domains and write_domain values for the
+ target buffers. The actual execution of the domain changes must wait
+ until all of the exec_object entries have been evaluated as the
+ complete set of domain information will not be available until then.
+
+ B. Memory Domain Resolution
+
+ After all of the new memory domain data has been pulled out of the
+ relocations and computed for each object, the list of objects is
+ again traversed and the new memory domains compared against the
+ current memory domains. There are two basic operations involved here:
+
+ * Flushing the current write domain. If the new read domains
+ are not equal to the current write domain, then the current
+ write domain must be flushed. Otherwise, reads will not see data
+ present in the write domain cache. In addition, any new read domains
+ other than the current write domain must be invalidated to ensure
+ that the flushed data are re-read into their caches.
+
+ * Invaliding new read domains. Any domains which were not currently
+ used for this object must be invalidated as old objects which
+ were mapped at the same location may have stale data in the new
+ domain caches.
+
+ If the CPU cache is being invalidated and some GPU cache is being
+ flushed, then we'll have to wait for rendering to complete so that
+ any pending GPU writes will be complete before we flush the GPU
+ cache.
+
+ If the CPU cache is being flushed, then we use 'clflush' to get data
+ written from the CPU.
+
+ Because the GPU caches cannot be partially flushed or invalidated,
+ we don't actually flush them during this traversal stage. Rather, we
+ gather the invalidate and flush bits up in the device structure.
+
+ Once all of the object domain changes have been evaluated, then the
+ gathered invalidate and flush bits are examined. For any GPU flush
+ operations, we emit a single MI_FLUSH command that performs all of
+ the necessary flushes. We then look to see if the CPU cache was
+ flushed. If so, we use the chipset flush magic (writing to a special
+ page) to get the data out of the chipset and into memory.
+
+ C. Queuing Batch Buffer to the Ring
+
+ With all of the objects resident in graphics memory space, and all
+ of the caches prepared with appropriate data, the batch buffer
+ object can be queued to the ring. If there are clip rectangles, then
+ the buffer is queued once per rectangle, with suitable clipping
+ inserted into the ring just before the batch buffer.
+
+ D. Creating an IRQ Cookie
+
+ Right after the batch buffer is placed in the ring, a request to
+ generate an IRQ is added to the ring along with a command to write a
+ marker into memory. When the IRQ fires, the driver can look at the
+ memory location to see where in the ring the GPU has passed. This
+ magic cookie value is stored in each object used in this execbuffer
+ command; it is used whereever you saw 'wait for rendering' above in
+ this document.
+
+ E. Writing back the new object offsets
+
+ So that the application has a better idea what to use for
+ 'presumed_offset' values later, the current object offsets are
+ written back to the exec_object structures.
+
+
+8. Other misc Intel-specific functions.
+
+To complete the driver, a few other functions were necessary.
+
+8.1 Initialization from the X server
+
+As the X server is currently responsible for apportioning memory between 2D
+and 3D, it must tell the kernel which region of the GTT aperture is
+available for 3D objects to be mapped into.
+
+ struct drm_i915_gem_init {
+ /**
+ * Beginning offset in the GTT to be managed by the
+ * DRM memory manager.
+ */
+ uint64_t gtt_start;
+ /**
+ * Ending offset in the GTT to be managed by the DRM
+ * memory manager.
+ */
+ uint64_t gtt_end;
+ };
+ /* usage */
+ init.gtt_start = <gtt_start>;
+ init.gtt_end = <gtt_end>;
+ ret = ioctl (fd, DRM_IOCTL_I915_GEM_INIT, &init);
+
+ The GTT aperture between gtt_start and gtt_end will be used to map
+ objects. This also tells the kernel that the ring can be used,
+ pulling the ring addresses from the device registers.
+
+8.2 Pinning objects in the GTT
+
+For scan-out buffers and the current shared depth and back buffers, we need
+to have them always available in the GTT, at least for now. Pinning means to
+lock their pages in memory along with keeping them at a fixed offset in the
+graphics aperture. These operations are available only to root.
+
+ struct drm_i915_gem_pin {
+ /** Handle of the buffer to be pinned. */
+ uint32_t handle;
+ uint32_t pad;
+
+ /** alignment required within the aperture */
+ uint64_t alignment;
+
+ /** Returned GTT offset of the buffer. */
+ uint64_t offset;
+ };
+
+ /* usage */
+ pin.handle = <handle>;
+ pin.alignment = <alignment>;
+ ret = ioctl (fd, DRM_IOCTL_I915_GEM_PIN, &pin);
+ if (ret == 0)
+ return pin.offset;
+
+ Pinning an object ensures that it will not be evicted from the GTT
+ or moved. It will stay resident until destroyed or unpinned.
+
+ struct drm_i915_gem_unpin {
+ /** Handle of the buffer to be unpinned. */
+ uint32_t handle;
+ uint32_t pad;
+ };
+
+ /* usage */
+ unpin.handle = <handle>;
+ ret = ioctl (fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
+
+ Unpinning an object makes it possible to evict this object from the
+ GTT. It doesn't ensure that it will be evicted, just that it may.
+
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index 19168cd7..6b5e1851 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -54,6 +54,7 @@
#include <linux/smp_lock.h> /* For (un)lock_kernel */
#include <linux/dma-mapping.h>
#include <linux/mm.h>
+#include <linux/kref.h>
#include <linux/pagemap.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
#include <linux/mutex.h>
@@ -89,6 +90,10 @@
struct drm_device;
struct drm_file;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+typedef unsigned long uintptr_t;
+#endif
+
/* If you want the memory alloc debug functionality, change define below */
/* #define DEBUG_MEMORY */
@@ -107,7 +112,7 @@ struct drm_file;
#define DRIVER_IRQ_SHARED 0x80
#define DRIVER_DMA_QUEUE 0x100
#define DRIVER_FB_DMA 0x200
-
+#define DRIVER_GEM 0x400
/*@}*/
@@ -427,6 +432,11 @@ struct drm_file {
struct list_head refd_objects;
+ /** Mapping of mm object handles to object pointers. */
+ struct idr object_idr;
+ /** Lock for synchronization of access to object_idr. */
+ spinlock_t table_lock;
+
struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES];
struct file *filp;
void *driver_priv;
@@ -464,6 +474,11 @@ struct drm_lock_data {
uint32_t kernel_waiters;
uint32_t user_waiters;
int idle_has_lock;
+ /**
+ * Boolean signaling that the lock is held on behalf of the
+ * file_priv client by the kernel in an ioctl handler.
+ */
+ int kernel_held;
};
/**
@@ -539,17 +554,17 @@ struct drm_sigdata {
* Generic memory manager structs
*/
-struct drm_mm_node {
+struct drm_memrange_node {
struct list_head fl_entry;
struct list_head ml_entry;
int free;
unsigned long start;
unsigned long size;
- struct drm_mm *mm;
+ struct drm_memrange *mm;
void *private;
};
-struct drm_mm {
+struct drm_memrange {
struct list_head fl_entry;
struct list_head ml_entry;
};
@@ -563,7 +578,7 @@ struct drm_map_list {
struct drm_hash_item hash;
struct drm_map *map; /**< mapping */
uint64_t user_token;
- struct drm_mm_node *file_offset_node;
+ struct drm_memrange_node *file_offset_node;
};
typedef struct drm_map drm_local_map_t;
@@ -604,6 +619,56 @@ struct drm_ati_pcigart_info {
int table_size;
};
+/**
+ * This structure defines the drm_mm memory object, which will be used by the
+ * DRM for its buffer objects.
+ */
+struct drm_gem_object {
+ /** Reference count of this object */
+ struct kref refcount;
+
+ /** Handle count of this object. Each handle also holds a reference */
+ struct kref handlecount;
+
+ /** Related drm device */
+ struct drm_device *dev;
+
+ /** File representing the shmem storage */
+ struct file *filp;
+
+ /**
+ * Size of the object, in bytes. Immutable over the object's
+ * lifetime.
+ */
+ size_t size;
+
+ /**
+ * Global name for this object, starts at 1. 0 means unnamed.
+ * Access is covered by the object_name_lock in the related drm_device
+ */
+ int name;
+
+ /**
+ * Memory domains. These monitor which caches contain read/write data
+ * related to the object. When transitioning from one set of domains
+ * to another, the driver is called to ensure that caches are suitably
+ * flushed and invalidated
+ */
+ uint32_t read_domains;
+ uint32_t write_domain;
+
+ /**
+ * While validating an exec operation, the
+ * new read/write domain values are computed here.
+ * They will be transferred to the above values
+ * at the point that any cache flushing occurs
+ */
+ uint32_t pending_read_domains;
+ uint32_t pending_write_domain;
+
+ void *driver_private;
+};
+
#include "drm_objects.h"
/**
@@ -705,6 +770,18 @@ struct drm_driver {
void (*set_version) (struct drm_device *dev,
struct drm_set_version *sv);
+ int (*proc_init)(struct drm_minor *minor);
+ void (*proc_cleanup)(struct drm_minor *minor);
+
+ /**
+ * Driver-specific constructor for drm_gem_objects, to set up
+ * obj->driver_private.
+ *
+ * Returns 0 on success.
+ */
+ int (*gem_init_object) (struct drm_gem_object *obj);
+ void (*gem_free_object) (struct drm_gem_object *obj);
+
struct drm_fence_driver *fence_driver;
struct drm_bo_driver *bo_driver;
@@ -787,7 +864,7 @@ struct drm_device {
struct list_head maplist; /**< Linked list of regions */
int map_count; /**< Number of mappable regions */
struct drm_open_hash map_hash; /**< User token hash table for maps */
- struct drm_mm offset_manager; /**< User token manager */
+ struct drm_memrange offset_manager; /**< User token manager */
struct drm_open_hash object_hash; /**< User token hash table for objects */
struct address_space *dev_mapping; /**< For unmap_mapping_range() */
struct page *ttm_dummy_page;
@@ -892,6 +969,21 @@ struct drm_device {
spinlock_t drw_lock;
struct idr drw_idr;
/*@} */
+
+ /** \name GEM information */
+ /*@{ */
+ spinlock_t object_name_lock;
+ struct idr object_name_idr;
+ atomic_t object_count;
+ atomic_t object_memory;
+ atomic_t pin_count;
+ atomic_t pin_memory;
+ atomic_t gtt_count;
+ atomic_t gtt_memory;
+ uint32_t gtt_total;
+ uint32_t invalidate_domains; /* domains pending invalidation */
+ uint32_t flush_domains; /* domains pending flush */
+ /*@} */
};
#if __OS_HAS_AGP
@@ -1007,6 +1099,10 @@ extern void drm_free_pages(unsigned long address, int order, int area);
extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type);
extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);
extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
+extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
+ struct page **pages,
+ unsigned long num_pages,
+ uint32_t gtt_offset);
extern int drm_unbind_agp(DRM_AGP_MEM * handle);
extern void drm_free_memctl(size_t size);
@@ -1200,7 +1296,7 @@ extern void drm_agp_chipset_flush(struct drm_device *dev);
extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
struct drm_driver *driver);
extern int drm_put_dev(struct drm_device *dev);
-extern int drm_put_minor(struct drm_minor **minor);
+extern int drm_put_minor(struct drm_device *dev);
extern unsigned int drm_debug; /* 1 to enable debug output */
extern struct class *drm_class;
@@ -1240,26 +1336,95 @@ extern int drm_sysfs_device_add(struct drm_minor *minor);
extern void drm_sysfs_device_remove(struct drm_minor *minor);
/*
- * Basic memory manager support (drm_mm.c)
+ * Basic memory manager support (drm_memrange.c)
*/
-extern struct drm_mm_node * drm_mm_get_block(struct drm_mm_node * parent, unsigned long size,
- unsigned alignment);
-extern void drm_mm_put_block(struct drm_mm_node *cur);
-extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size,
- unsigned alignment, int best_match);
-extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size);
-extern void drm_mm_takedown(struct drm_mm *mm);
-extern int drm_mm_clean(struct drm_mm *mm);
-extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
-extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size);
-extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
-
-static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
+extern struct drm_memrange_node *drm_memrange_get_block(struct drm_memrange_node * parent,
+ unsigned long size,
+ unsigned alignment);
+extern void drm_memrange_put_block(struct drm_memrange_node *cur);
+extern struct drm_memrange_node *drm_memrange_search_free(const struct drm_memrange *mm,
+ unsigned long size,
+ unsigned alignment, int best_match);
+extern int drm_memrange_init(struct drm_memrange *mm,
+ unsigned long start, unsigned long size);
+extern void drm_memrange_takedown(struct drm_memrange *mm);
+extern int drm_memrange_clean(struct drm_memrange *mm);
+extern unsigned long drm_memrange_tail_space(struct drm_memrange *mm);
+extern int drm_memrange_remove_space_from_tail(struct drm_memrange *mm,
+ unsigned long size);
+extern int drm_memrange_add_space_to_tail(struct drm_memrange *mm,
+ unsigned long size);
+
+static inline struct drm_memrange *drm_get_mm(struct drm_memrange_node *block)
{
return block->mm;
}
+/* Graphics Execution Manager library functions (drm_gem.c) */
+int
+drm_gem_init (struct drm_device *dev);
+
+void
+drm_gem_object_free (struct kref *kref);
+
+struct drm_gem_object *
+drm_gem_object_alloc(struct drm_device *dev, size_t size);
+
+void
+drm_gem_object_handle_free (struct kref *kref);
+
+static inline void drm_gem_object_reference(struct drm_gem_object *obj)
+{
+ kref_get(&obj->refcount);
+}
+
+static inline void drm_gem_object_unreference(struct drm_gem_object *obj)
+{
+ if (obj == NULL)
+ return;
+
+ kref_put (&obj->refcount, drm_gem_object_free);
+}
+
+int
+drm_gem_handle_create(struct drm_file *file_priv,
+ struct drm_gem_object *obj,
+ int *handlep);
+
+static inline void drm_gem_object_handle_reference (struct drm_gem_object *obj)
+{
+ drm_gem_object_reference (obj);
+ kref_get(&obj->handlecount);
+}
+
+static inline void drm_gem_object_handle_unreference (struct drm_gem_object *obj)
+{
+ if (obj == NULL)
+ return;
+
+ /*
+ * Must bump handle count first as this may be the last
+ * ref, in which case the object would disappear before we
+ * checked for a name
+ */
+ kref_put (&obj->handlecount, drm_gem_object_handle_free);
+ drm_gem_object_unreference (obj);
+}
+
+struct drm_gem_object *
+drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
+ int handle);
+int drm_gem_close_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_gem_open_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
+void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
+
extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
extern void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev);
extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c
index 80663717..6ccb4b6d 100644
--- a/linux-core/drm_agpsupport.c
+++ b/linux-core/drm_agpsupport.c
@@ -484,7 +484,50 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
return agp_unbind_memory(handle);
}
+/**
+ * Binds a collection of pages into AGP memory at the given offset, returning
+ * the AGP memory structure containing them.
+ *
+ * No reference is held on the pages during this time -- it is up to the
+ * caller to handle that.
+ */
+DRM_AGP_MEM *
+drm_agp_bind_pages(struct drm_device *dev,
+ struct page **pages,
+ unsigned long num_pages,
+ uint32_t gtt_offset)
+{
+ DRM_AGP_MEM *mem;
+ int ret, i;
+ DRM_DEBUG("drm_agp_populate_ttm\n");
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
+ mem = drm_agp_allocate_memory(num_pages, AGP_USER_MEMORY);
+#else
+ mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages,
+ AGP_USER_MEMORY);
+#endif
+ if (mem == NULL) {
+ DRM_ERROR("Failed to allocate memory for %ld pages\n",
+ num_pages);
+ return NULL;
+ }
+
+ for (i = 0; i < num_pages; i++)
+ mem->memory[i] = phys_to_gart(page_to_phys(pages[i]));
+ mem->page_count = num_pages;
+
+ mem->is_flushed = TRUE;
+ ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
+ if (ret != 0) {
+ DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
+ agp_free_memory(mem);
+ return NULL;
+ }
+
+ return mem;
+}
+EXPORT_SYMBOL(drm_agp_bind_pages);
/*
* AGP ttm backend interface.
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c
index 88b2ee66..3abbb8c4 100644
--- a/linux-core/drm_bo.c
+++ b/linux-core/drm_bo.c
@@ -418,14 +418,14 @@ static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)
if (!bo->fence) {
list_del_init(&bo->lru);
if (bo->mem.mm_node) {
- drm_mm_put_block(bo->mem.mm_node);
+ drm_memrange_put_block(bo->mem.mm_node);
if (bo->pinned_node == bo->mem.mm_node)
bo->pinned_node = NULL;
bo->mem.mm_node = NULL;
}
list_del_init(&bo->pinned_lru);
if (bo->pinned_node) {
- drm_mm_put_block(bo->pinned_node);
+ drm_memrange_put_block(bo->pinned_node);
bo->pinned_node = NULL;
}
list_del_init(&bo->ddestroy);
@@ -791,7 +791,7 @@ out:
mutex_lock(&dev->struct_mutex);
if (evict_mem.mm_node) {
if (evict_mem.mm_node != bo->pinned_node)
- drm_mm_put_block(evict_mem.mm_node);
+ drm_memrange_put_block(evict_mem.mm_node);
evict_mem.mm_node = NULL;
}
drm_bo_add_to_lru(bo);
@@ -810,7 +810,7 @@ static int drm_bo_mem_force_space(struct drm_device *dev,
struct drm_bo_mem_reg *mem,
uint32_t mem_type, int no_wait)
{
- struct drm_mm_node *node;
+ struct drm_memrange_node *node;
struct drm_buffer_manager *bm = &dev->bm;
struct drm_buffer_object *entry;
struct drm_mem_type_manager *man = &bm->man[mem_type];
@@ -820,7 +820,7 @@ static int drm_bo_mem_force_space(struct drm_device *dev,
mutex_lock(&dev->struct_mutex);
do {
- node = drm_mm_search_free(&man->manager, num_pages,
+ node = drm_memrange_search_free(&man->manager, num_pages,
mem->page_alignment, 1);
if (node)
break;
@@ -846,7 +846,7 @@ static int drm_bo_mem_force_space(struct drm_device *dev,
return -ENOMEM;
}
- node = drm_mm_get_block(node, num_pages, mem->page_alignment);
+ node = drm_memrange_get_block(node, num_pages, mem->page_alignment);
if (unlikely(!node)) {
mutex_unlock(&dev->struct_mutex);
return -ENOMEM;
@@ -924,7 +924,7 @@ int drm_bo_mem_space(struct drm_buffer_object *bo,
int type_found = 0;
int type_ok = 0;
int has_eagain = 0;
- struct drm_mm_node *node = NULL;
+ struct drm_memrange_node *node = NULL;
int ret;
mem->mm_node = NULL;
@@ -952,10 +952,10 @@ int drm_bo_mem_space(struct drm_buffer_object *bo,
mutex_lock(&dev->struct_mutex);
if (man->has_type && man->use_type) {
type_found = 1;
- node = drm_mm_search_free(&man->manager, mem->num_pages,
+ node = drm_memrange_search_free(&man->manager, mem->num_pages,
mem->page_alignment, 1);
if (node)
- node = drm_mm_get_block(node, mem->num_pages,
+ node = drm_memrange_get_block(node, mem->num_pages,
mem->page_alignment);
}
mutex_unlock(&dev->struct_mutex);
@@ -1340,7 +1340,7 @@ out_unlock:
if (ret || !move_unfenced) {
if (mem.mm_node) {
if (mem.mm_node != bo->pinned_node)
- drm_mm_put_block(mem.mm_node);
+ drm_memrange_put_block(mem.mm_node);
mem.mm_node = NULL;
}
drm_bo_add_to_lru(bo);
@@ -1432,7 +1432,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo,
if (bo->pinned_node != bo->mem.mm_node) {
if (bo->pinned_node != NULL)
- drm_mm_put_block(bo->pinned_node);
+ drm_memrange_put_block(bo->pinned_node);
bo->pinned_node = bo->mem.mm_node;
}
@@ -1443,7 +1443,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo,
mutex_lock(&dev->struct_mutex);
if (bo->pinned_node != bo->mem.mm_node)
- drm_mm_put_block(bo->pinned_node);
+ drm_memrange_put_block(bo->pinned_node);
list_del_init(&bo->pinned_lru);
bo->pinned_node = NULL;
@@ -2082,7 +2082,7 @@ static int drm_bo_leave_list(struct drm_buffer_object *bo,
if (bo->pinned_node == bo->mem.mm_node)
bo->pinned_node = NULL;
if (bo->pinned_node != NULL) {
- drm_mm_put_block(bo->pinned_node);
+ drm_memrange_put_block(bo->pinned_node);
bo->pinned_node = NULL;
}
mutex_unlock(&dev->struct_mutex);
@@ -2223,8 +2223,8 @@ int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_clean)
drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
- if (drm_mm_clean(&man->manager)) {
- drm_mm_takedown(&man->manager);
+ if (drm_memrange_clean(&man->manager)) {
+ drm_memrange_takedown(&man->manager);
} else {
ret = -EBUSY;
}
@@ -2295,7 +2295,7 @@ int drm_bo_init_mm(struct drm_device *dev, unsigned type,
DRM_ERROR("Zero size memory manager type %d\n", type);
return ret;
}
- ret = drm_mm_init(&man->manager, p_offset, p_size);
+ ret = drm_memrange_init(&man->manager, p_offset, p_size);
if (ret)
return ret;
}
@@ -2713,7 +2713,7 @@ static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)
list->user_token = 0;
}
if (list->file_offset_node) {
- drm_mm_put_block(list->file_offset_node);
+ drm_memrange_put_block(list->file_offset_node);
list->file_offset_node = NULL;
}
@@ -2756,7 +2756,7 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
atomic_inc(&bo->usage);
map->handle = (void *)bo;
- list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
+ list->file_offset_node = drm_memrange_search_free(&dev->offset_manager,
bo->mem.num_pages, 0, 0);
if (unlikely(!list->file_offset_node)) {
@@ -2764,7 +2764,7 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
return -ENOMEM;
}
- list->file_offset_node = drm_mm_get_block(list->file_offset_node,
+ list->file_offset_node = drm_memrange_get_block(list->file_offset_node,
bo->mem.num_pages, 0);
if (unlikely(!list->file_offset_node)) {
diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c
index bf0e1b74..850be5a3 100644
--- a/linux-core/drm_bo_move.c
+++ b/linux-core/drm_bo_move.c
@@ -41,7 +41,7 @@ static void drm_bo_free_old_node(struct drm_buffer_object *bo)
if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) {
mutex_lock(&bo->dev->struct_mutex);
- drm_mm_put_block(old_mem->mm_node);
+ drm_memrange_put_block(old_mem->mm_node);
mutex_unlock(&bo->dev->struct_mutex);
}
old_mem->mm_node = NULL;
diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h
index 3339219d..6e5d252b 100644
--- a/linux-core/drm_compat.h
+++ b/linux-core/drm_compat.h
@@ -329,7 +329,7 @@ typedef _Bool bool;
#endif
-#if (defined(CONFIG_X86) && defined(CONFIG_X86_32) && defined(CONFIG_HIMEM))
+#if (defined(CONFIG_X86) && defined(CONFIG_X86_32) && defined(CONFIG_HIGHMEM))
#define DRM_KMAP_ATOMIC_PROT_PFN
extern void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
pgprot_t protection);
diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c
index 6ac2adf3..c2445671 100644
--- a/linux-core/drm_drv.c
+++ b/linux-core/drm_drv.c
@@ -150,6 +150,10 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_BO_VERSION, drm_bo_version_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MM_INFO, drm_mm_info_ioctl, 0),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
@@ -415,10 +419,10 @@ static void drm_cleanup(struct drm_device * dev)
drm_ctxbitmap_cleanup(dev);
drm_ht_remove(&dev->map_hash);
- drm_mm_takedown(&dev->offset_manager);
+ drm_memrange_takedown(&dev->offset_manager);
drm_ht_remove(&dev->object_hash);
- drm_put_minor(&dev->primary);
+ drm_put_minor(dev);
if (drm_put_dev(dev))
DRM_ERROR("Cannot unload module\n");
}
diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c
index 3bc25f24..ec521101 100644
--- a/linux-core/drm_fops.c
+++ b/linux-core/drm_fops.c
@@ -274,6 +274,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
goto out_free;
}
+ if (dev->driver->driver_features & DRIVER_GEM)
+ drm_gem_open(dev, priv);
+
if (dev->driver->open) {
ret = dev->driver->open(dev, priv);
if (ret < 0)
@@ -444,6 +447,9 @@ int drm_release(struct inode *inode, struct file *filp)
dev->driver->reclaim_buffers(dev, file_priv);
}
+ if (dev->driver->driver_features & DRIVER_GEM)
+ drm_gem_release(dev, file_priv);
+
drm_fasync(-1, filp, 0);
mutex_lock(&dev->ctxlist_mutex);
diff --git a/linux-core/drm_gem.c b/linux-core/drm_gem.c
new file mode 100644
index 00000000..434155b3
--- /dev/null
+++ b/linux-core/drm_gem.c
@@ -0,0 +1,420 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/module.h>
+#include <linux/mman.h>
+#include <linux/pagemap.h>
+#include "drmP.h"
+
+/** @file drm_gem.c
+ *
+ * This file provides some of the base ioctls and library routines for
+ * the graphics memory manager implemented by each device driver.
+ *
+ * Because various devices have different requirements in terms of
+ * synchronization and migration strategies, implementing that is left up to
+ * the driver, and all that the general API provides should be generic --
+ * allocating objects, reading/writing data with the cpu, freeing objects.
+ * Even there, platform-dependent optimizations for reading/writing data with
+ * the CPU mean we'll likely hook those out to driver-specific calls. However,
+ * the DRI2 implementation wants to have at least allocate/mmap be generic.
+ *
+ * The goal was to have swap-backed object allocation managed through
+ * struct file. However, file descriptors as handles to a struct file have
+ * two major failings:
+ * - Process limits prevent more than 1024 or so being used at a time by
+ * default.
+ * - Inability to allocate high fds will aggravate the X Server's select()
+ * handling, and likely that of many GL client applications as well.
+ *
+ * This led to a plan of using our own integer IDs (called handles, following
+ * DRM terminology) to mimic fds, and implement the fd syscalls we need as
+ * ioctls. The objects themselves will still include the struct file so
+ * that we can transition to fds if the required kernel infrastructure shows
+ * up at a later date, and as our interface with shmfs for memory allocation.
+ */
+
+/**
+ * Initialize the GEM device fields
+ */
+
+int
+drm_gem_init(struct drm_device *dev)
+{
+ spin_lock_init(&dev->object_name_lock);
+ idr_init(&dev->object_name_idr);
+ atomic_set(&dev->object_count, 0);
+ atomic_set(&dev->object_memory, 0);
+ atomic_set(&dev->pin_count, 0);
+ atomic_set(&dev->pin_memory, 0);
+ atomic_set(&dev->gtt_count, 0);
+ atomic_set(&dev->gtt_memory, 0);
+ return 0;
+}
+
+/**
+ * Allocate a GEM object of the specified size with shmfs backing store
+ */
+struct drm_gem_object *
+drm_gem_object_alloc(struct drm_device *dev, size_t size)
+{
+ struct drm_gem_object *obj;
+
+ BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+
+ obj = kcalloc(1, sizeof(*obj), GFP_KERNEL);
+
+ obj->dev = dev;
+ obj->filp = shmem_file_setup("drm mm object", size, 0);
+ if (IS_ERR(obj->filp)) {
+ kfree(obj);
+ return NULL;
+ }
+
+ kref_init(&obj->refcount);
+ kref_init(&obj->handlecount);
+ obj->size = size;
+ if (dev->driver->gem_init_object != NULL &&
+ dev->driver->gem_init_object(obj) != 0) {
+ fput(obj->filp);
+ kfree(obj);
+ return NULL;
+ }
+ atomic_inc(&dev->object_count);
+ atomic_add(obj->size, &dev->object_memory);
+ return obj;
+}
+EXPORT_SYMBOL(drm_gem_object_alloc);
+
+/**
+ * Removes the mapping from handle to filp for this object.
+ */
+static int
+drm_gem_handle_delete(struct drm_file *filp, int handle)
+{
+ struct drm_device *dev;
+ struct drm_gem_object *obj;
+
+ /* This is gross. The idr system doesn't let us try a delete and
+ * return an error code. It just spews if you fail at deleting.
+ * So, we have to grab a lock around finding the object and then
+ * doing the delete on it and dropping the refcount, or the user
+ * could race us to double-decrement the refcount and cause a
+ * use-after-free later. Given the frequency of our handle lookups,
+ * we may want to use ida for number allocation and a hash table
+ * for the pointers, anyway.
+ */
+ spin_lock(&filp->table_lock);
+
+ /* Check if we currently have a reference on the object */
+ obj = idr_find(&filp->object_idr, handle);
+ if (obj == NULL) {
+ spin_unlock(&filp->table_lock);
+ return -EINVAL;
+ }
+ dev = obj->dev;
+
+ /* Release reference and decrement refcount. */
+ idr_remove(&filp->object_idr, handle);
+ spin_unlock(&filp->table_lock);
+
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_handle_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+/**
+ * Create a handle for this object. This adds a handle reference
+ * to the object, which includes a regular reference count. Callers
+ * will likely want to dereference the object afterwards.
+ */
+int
+drm_gem_handle_create(struct drm_file *file_priv,
+ struct drm_gem_object *obj,
+ int *handlep)
+{
+ int ret;
+
+ /*
+ * Get the user-visible handle using idr.
+ */
+again:
+ /* ensure there is space available to allocate a handle */
+ if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
+ return -ENOMEM;
+
+ /* do the allocation under our spinlock */
+ spin_lock(&file_priv->table_lock);
+ ret = idr_get_new_above(&file_priv->object_idr, obj, 1, handlep);
+ spin_unlock(&file_priv->table_lock);
+ if (ret == -EAGAIN)
+ goto again;
+
+ if (ret != 0)
+ return ret;
+
+ drm_gem_object_handle_reference(obj);
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_handle_create);
+
+/** Returns a reference to the object named by the handle. */
+struct drm_gem_object *
+drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
+ int handle)
+{
+ struct drm_gem_object *obj;
+
+ spin_lock(&filp->table_lock);
+
+ /* Check if we currently have a reference on the object */
+ obj = idr_find(&filp->object_idr, handle);
+ if (obj == NULL) {
+ spin_unlock(&filp->table_lock);
+ return NULL;
+ }
+
+ drm_gem_object_reference(obj);
+
+ spin_unlock(&filp->table_lock);
+
+ return obj;
+}
+EXPORT_SYMBOL(drm_gem_object_lookup);
+
+/**
+ * Releases the handle to an mm object.
+ */
+int
+drm_gem_close_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_gem_close *args = data;
+ int ret;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ ret = drm_gem_handle_delete(file_priv, args->handle);
+
+ return ret;
+}
+
+/**
+ * Create a global name for an object, returning the name.
+ *
+ * Note that the name does not hold a reference; when the object
+ * is freed, the name goes away.
+ */
+int
+drm_gem_flink_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_gem_flink *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EINVAL;
+
+again:
+ if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0)
+ return -ENOMEM;
+
+ spin_lock(&dev->object_name_lock);
+ if (obj->name) {
+ spin_unlock(&dev->object_name_lock);
+ return -EEXIST;
+ }
+ ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
+ &obj->name);
+ spin_unlock(&dev->object_name_lock);
+ if (ret == -EAGAIN)
+ goto again;
+
+ if (ret != 0) {
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
+ /*
+ * Leave the reference from the lookup around as the
+ * name table now holds one
+ */
+ args->name = (uint64_t) obj->name;
+
+ return 0;
+}
+
+/**
+ * Open an object using the global name, returning a handle and the size.
+ *
+ * This handle (of course) holds a reference to the object, so the object
+ * will not go away until the handle is deleted.
+ */
+int
+drm_gem_open_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_gem_open *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+ int handle;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ spin_lock(&dev->object_name_lock);
+ obj = idr_find(&dev->object_name_idr, (int) args->name);
+ if (obj)
+ drm_gem_object_reference(obj);
+ spin_unlock(&dev->object_name_lock);
+ if (!obj)
+ return -ENOENT;
+
+ ret = drm_gem_handle_create(file_priv, obj, &handle);
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+ args->size = obj->size;
+
+ return 0;
+}
+
+/**
+ * Called at device open time, sets up the structure for handling refcounting
+ * of mm objects.
+ */
+void
+drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
+{
+ idr_init(&file_private->object_idr);
+ spin_lock_init(&file_private->table_lock);
+}
+
+/**
+ * Called at device close to release the file's
+ * handle references on objects.
+ */
+static int
+drm_gem_object_release_handle(int id, void *ptr, void *data)
+{
+ struct drm_gem_object *obj = ptr;
+
+ drm_gem_object_handle_unreference(obj);
+
+ return 0;
+}
+
+/**
+ * Called at close time when the filp is going away.
+ *
+ * Releases any remaining references on objects by this filp.
+ */
+void
+drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
+{
+ mutex_lock(&dev->struct_mutex);
+ idr_for_each(&file_private->object_idr,
+ &drm_gem_object_release_handle, NULL);
+
+ idr_destroy(&file_private->object_idr);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * Called after the last reference to the object has been lost.
+ *
+ * Frees the object
+ */
+void
+drm_gem_object_free(struct kref *kref)
+{
+ struct drm_gem_object *obj = (struct drm_gem_object *) kref;
+ struct drm_device *dev = obj->dev;
+
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ if (dev->driver->gem_free_object != NULL)
+ dev->driver->gem_free_object(obj);
+
+ fput(obj->filp);
+ atomic_dec(&dev->object_count);
+ atomic_sub(obj->size, &dev->object_memory);
+ kfree(obj);
+}
+EXPORT_SYMBOL(drm_gem_object_free);
+
+/**
+ * Called after the last handle to the object has been closed
+ *
+ * Removes any name for the object. Note that this must be
+ * called before drm_gem_object_free or we'll be touching
+ * freed memory
+ */
+void
+drm_gem_object_handle_free(struct kref *kref)
+{
+ struct drm_gem_object *obj = container_of(kref,
+ struct drm_gem_object,
+ handlecount);
+ struct drm_device *dev = obj->dev;
+
+ /* Remove any name for this object */
+ spin_lock(&dev->object_name_lock);
+ if (obj->name) {
+ idr_remove(&dev->object_name_idr, obj->name);
+ spin_unlock(&dev->object_name_lock);
+ /*
+ * The object name held a reference to this object, drop
+ * that now.
+ */
+ drm_gem_object_unreference(obj);
+ } else
+ spin_unlock(&dev->object_name_lock);
+
+}
+EXPORT_SYMBOL(drm_gem_object_handle_free);
+
diff --git a/linux-core/drm_irq.c b/linux-core/drm_irq.c
index d0d6f987..5b9f474b 100644
--- a/linux-core/drm_irq.c
+++ b/linux-core/drm_irq.c
@@ -63,7 +63,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
return -EINVAL;
- p->irq = dev->irq;
+ p->irq = dev->pdev->irq;
DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
p->irq);
@@ -128,6 +128,7 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
(unsigned long)dev);
+ init_timer_deferrable(&dev->vblank_disable_timer);
spin_lock_init(&dev->vbl_lock);
atomic_set(&dev->vbl_signal_pending, 0);
dev->num_crtcs = num_crtcs;
@@ -201,7 +202,7 @@ int drm_irq_install(struct drm_device * dev)
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
- if (dev->irq == 0)
+ if (dev->pdev->irq == 0)
return -EINVAL;
mutex_lock(&dev->struct_mutex);
@@ -219,7 +220,7 @@ int drm_irq_install(struct drm_device * dev)
dev->irq_enabled = 1;
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("irq=%d\n", dev->irq);
+ DRM_DEBUG("irq=%d\n", dev->pdev->irq);
/* Before installing handler */
dev->driver->irq_preinstall(dev);
@@ -228,7 +229,7 @@ int drm_irq_install(struct drm_device * dev)
if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
sh_flags = IRQF_SHARED;
- ret = request_irq(dev->irq, dev->driver->irq_handler,
+ ret = request_irq(dev->pdev->irq, dev->driver->irq_handler,
sh_flags, dev->devname, dev);
if (ret < 0) {
mutex_lock(&dev->struct_mutex);
@@ -236,6 +237,10 @@ int drm_irq_install(struct drm_device * dev)
mutex_unlock(&dev->struct_mutex);
return ret;
}
+ /* Expose the device irq to device drivers that want to export it for
+ * whatever reason.
+ */
+ dev->irq = dev->pdev->irq;
/* After installing handler */
ret = dev->driver->irq_postinstall(dev);
@@ -271,11 +276,11 @@ int drm_irq_uninstall(struct drm_device * dev)
if (!irq_enabled)
return -EINVAL;
- DRM_DEBUG("irq=%d\n", dev->irq);
+ DRM_DEBUG("irq=%d\n", dev->pdev->irq);
dev->driver->irq_uninstall(dev);
- free_irq(dev->irq, dev);
+ free_irq(dev->pdev->irq, dev);
drm_vblank_cleanup(dev);
@@ -309,7 +314,7 @@ int drm_control(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return 0;
if (dev->if_version < DRM_IF_VERSION(1, 2) &&
- ctl->irq != dev->irq)
+ ctl->irq != dev->pdev->irq)
return -EINVAL;
return drm_irq_install(dev);
case DRM_UNINST_HANDLER:
@@ -514,7 +519,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
int ret = 0;
unsigned int flags, seq, crtc;
- if ((!dev->irq) || (!dev->irq_enabled))
+ if ((!dev->pdev->irq) || (!dev->irq_enabled))
return -EINVAL;
if (vblwait->request.type &
diff --git a/linux-core/drm_lock.c b/linux-core/drm_lock.c
index 573213de..a2966efb 100644
--- a/linux-core/drm_lock.c
+++ b/linux-core/drm_lock.c
@@ -218,22 +218,16 @@ int drm_lock_take(struct drm_lock_data *lock_data,
} while (prev != old);
spin_unlock_bh(&lock_data->spinlock);
- if (_DRM_LOCKING_CONTEXT(old) == context) {
- if (old & _DRM_LOCK_HELD) {
- if (context != DRM_KERNEL_CONTEXT) {
- DRM_ERROR("%d holds heavyweight lock\n",
- context);
- }
- return 0;
+ /* Warn on recursive locking of user contexts. */
+ if (_DRM_LOCKING_CONTEXT(old) == context && _DRM_LOCK_IS_HELD(old)) {
+ if (context != DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("%d holds heavyweight lock\n",
+ context);
}
+ return 0;
}
- if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
- /* Have lock */
-
- return 1;
- }
- return 0;
+ return !_DRM_LOCK_IS_HELD(old);
}
/**
@@ -386,7 +380,6 @@ void drm_idlelock_release(struct drm_lock_data *lock_data)
}
EXPORT_SYMBOL(drm_idlelock_release);
-
int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
{
diff --git a/linux-core/drm_memory.c b/linux-core/drm_memory.c
index e1df3dac..b90fc020 100644
--- a/linux-core/drm_memory.c
+++ b/linux-core/drm_memory.c
@@ -310,6 +310,7 @@ int drm_free_agp(DRM_AGP_MEM * handle, int pages)
{
return drm_agp_free_memory(handle) ? 0 : -EINVAL;
}
+EXPORT_SYMBOL(drm_free_agp);
/** Wrapper around agp_bind_memory() */
int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
@@ -322,6 +323,7 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
{
return drm_agp_unbind_memory(handle);
}
+EXPORT_SYMBOL(drm_unbind_agp);
#else /* __OS_HAS_AGP*/
static void *agp_remap(unsigned long offset, unsigned long size,
diff --git a/linux-core/drm_mm.c b/linux-core/drm_memrange.c
index 59110293..7014c4e2 100644
--- a/linux-core/drm_mm.c
+++ b/linux-core/drm_memrange.c
@@ -44,26 +44,26 @@
#include "drmP.h"
#include <linux/slab.h>
-unsigned long drm_mm_tail_space(struct drm_mm *mm)
+unsigned long drm_memrange_tail_space(struct drm_memrange *mm)
{
struct list_head *tail_node;
- struct drm_mm_node *entry;
+ struct drm_memrange_node *entry;
tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
+ entry = list_entry(tail_node, struct drm_memrange_node, ml_entry);
if (!entry->free)
return 0;
return entry->size;
}
-int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
+int drm_memrange_remove_space_from_tail(struct drm_memrange *mm, unsigned long size)
{
struct list_head *tail_node;
- struct drm_mm_node *entry;
+ struct drm_memrange_node *entry;
tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
+ entry = list_entry(tail_node, struct drm_memrange_node, ml_entry);
if (!entry->free)
return -ENOMEM;
@@ -75,13 +75,13 @@ int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
}
-static int drm_mm_create_tail_node(struct drm_mm *mm,
+static int drm_memrange_create_tail_node(struct drm_memrange *mm,
unsigned long start,
unsigned long size)
{
- struct drm_mm_node *child;
+ struct drm_memrange_node *child;
- child = (struct drm_mm_node *)
+ child = (struct drm_memrange_node *)
drm_ctl_alloc(sizeof(*child), DRM_MEM_MM);
if (!child)
return -ENOMEM;
@@ -98,26 +98,26 @@ static int drm_mm_create_tail_node(struct drm_mm *mm,
}
-int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size)
+int drm_memrange_add_space_to_tail(struct drm_memrange *mm, unsigned long size)
{
struct list_head *tail_node;
- struct drm_mm_node *entry;
+ struct drm_memrange_node *entry;
tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
+ entry = list_entry(tail_node, struct drm_memrange_node, ml_entry);
if (!entry->free) {
- return drm_mm_create_tail_node(mm, entry->start + entry->size, size);
+ return drm_memrange_create_tail_node(mm, entry->start + entry->size, size);
}
entry->size += size;
return 0;
}
-static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
+static struct drm_memrange_node *drm_memrange_split_at_start(struct drm_memrange_node *parent,
unsigned long size)
{
- struct drm_mm_node *child;
+ struct drm_memrange_node *child;
- child = (struct drm_mm_node *)
+ child = (struct drm_memrange_node *)
drm_ctl_alloc(sizeof(*child), DRM_MEM_MM);
if (!child)
return NULL;
@@ -137,19 +137,19 @@ static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
return child;
}
-struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
+struct drm_memrange_node *drm_memrange_get_block(struct drm_memrange_node * parent,
unsigned long size, unsigned alignment)
{
- struct drm_mm_node *align_splitoff = NULL;
- struct drm_mm_node *child;
+ struct drm_memrange_node *align_splitoff = NULL;
+ struct drm_memrange_node *child;
unsigned tmp = 0;
if (alignment)
tmp = parent->start % alignment;
if (tmp) {
- align_splitoff = drm_mm_split_at_start(parent, alignment - tmp);
+ align_splitoff = drm_memrange_split_at_start(parent, alignment - tmp);
if (!align_splitoff)
return NULL;
}
@@ -159,40 +159,41 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
parent->free = 0;
return parent;
} else {
- child = drm_mm_split_at_start(parent, size);
+ child = drm_memrange_split_at_start(parent, size);
}
if (align_splitoff)
- drm_mm_put_block(align_splitoff);
+ drm_memrange_put_block(align_splitoff);
return child;
}
+EXPORT_SYMBOL(drm_memrange_get_block);
/*
* Put a block. Merge with the previous and / or next block if they are free.
* Otherwise add to the free stack.
*/
-void drm_mm_put_block(struct drm_mm_node * cur)
+void drm_memrange_put_block(struct drm_memrange_node * cur)
{
- struct drm_mm *mm = cur->mm;
+ struct drm_memrange *mm = cur->mm;
struct list_head *cur_head = &cur->ml_entry;
struct list_head *root_head = &mm->ml_entry;
- struct drm_mm_node *prev_node = NULL;
- struct drm_mm_node *next_node;
+ struct drm_memrange_node *prev_node = NULL;
+ struct drm_memrange_node *next_node;
int merged = 0;
if (cur_head->prev != root_head) {
- prev_node = list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
+ prev_node = list_entry(cur_head->prev, struct drm_memrange_node, ml_entry);
if (prev_node->free) {
prev_node->size += cur->size;
merged = 1;
}
}
if (cur_head->next != root_head) {
- next_node = list_entry(cur_head->next, struct drm_mm_node, ml_entry);
+ next_node = list_entry(cur_head->next, struct drm_memrange_node, ml_entry);
if (next_node->free) {
if (merged) {
prev_node->size += next_node->size;
@@ -215,16 +216,16 @@ void drm_mm_put_block(struct drm_mm_node * cur)
drm_ctl_free(cur, sizeof(*cur), DRM_MEM_MM);
}
}
-EXPORT_SYMBOL(drm_mm_put_block);
+EXPORT_SYMBOL(drm_memrange_put_block);
-struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
+struct drm_memrange_node *drm_memrange_search_free(const struct drm_memrange * mm,
unsigned long size,
unsigned alignment, int best_match)
{
struct list_head *list;
const struct list_head *free_stack = &mm->fl_entry;
- struct drm_mm_node *entry;
- struct drm_mm_node *best;
+ struct drm_memrange_node *entry;
+ struct drm_memrange_node *best;
unsigned long best_size;
unsigned wasted;
@@ -232,7 +233,7 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
best_size = ~0UL;
list_for_each(list, free_stack) {
- entry = list_entry(list, struct drm_mm_node, fl_entry);
+ entry = list_entry(list, struct drm_memrange_node, fl_entry);
wasted = 0;
if (entry->size < size)
@@ -257,30 +258,31 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
return best;
}
+EXPORT_SYMBOL(drm_memrange_search_free);
-int drm_mm_clean(struct drm_mm * mm)
+int drm_memrange_clean(struct drm_memrange * mm)
{
struct list_head *head = &mm->ml_entry;
return (head->next->next == head);
}
-int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
+int drm_memrange_init(struct drm_memrange * mm, unsigned long start, unsigned long size)
{
INIT_LIST_HEAD(&mm->ml_entry);
INIT_LIST_HEAD(&mm->fl_entry);
- return drm_mm_create_tail_node(mm, start, size);
+ return drm_memrange_create_tail_node(mm, start, size);
}
-EXPORT_SYMBOL(drm_mm_init);
+EXPORT_SYMBOL(drm_memrange_init);
-void drm_mm_takedown(struct drm_mm * mm)
+void drm_memrange_takedown(struct drm_memrange * mm)
{
struct list_head *bnode = mm->fl_entry.next;
- struct drm_mm_node *entry;
+ struct drm_memrange_node *entry;
- entry = list_entry(bnode, struct drm_mm_node, fl_entry);
+ entry = list_entry(bnode, struct drm_memrange_node, fl_entry);
if (entry->ml_entry.next != &mm->ml_entry ||
entry->fl_entry.next != &mm->fl_entry) {
@@ -293,4 +295,4 @@ void drm_mm_takedown(struct drm_mm * mm)
drm_ctl_free(entry, sizeof(*entry), DRM_MEM_MM);
}
-EXPORT_SYMBOL(drm_mm_takedown);
+EXPORT_SYMBOL(drm_memrange_takedown);
diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h
index 770fbc56..6ec09ef8 100644
--- a/linux-core/drm_objects.h
+++ b/linux-core/drm_objects.h
@@ -301,7 +301,12 @@ struct drm_ttm_backend_func {
void (*destroy) (struct drm_ttm_backend *backend);
};
-
+/**
+ * This structure associates a set of flags and methods with a drm_ttm
+ * object, and will also be subclassed by the particular backend.
+ *
+ * \sa #drm_agp_ttm_backend
+ */
struct drm_ttm_backend {
struct drm_device *dev;
uint32_t flags;
@@ -413,7 +418,7 @@ extern int drm_ttm_destroy(struct drm_ttm *ttm);
*/
struct drm_bo_mem_reg {
- struct drm_mm_node *mm_node;
+ struct drm_memrange_node *mm_node;
unsigned long size;
unsigned long num_pages;
uint32_t page_alignment;
@@ -494,7 +499,7 @@ struct drm_buffer_object {
unsigned long num_pages;
/* For pinned buffers */
- struct drm_mm_node *pinned_node;
+ struct drm_memrange_node *pinned_node;
uint32_t pinned_mem_type;
struct list_head pinned_lru;
@@ -529,7 +534,7 @@ struct drm_mem_type_manager {
int has_type;
int use_type;
int kern_init_type;
- struct drm_mm manager;
+ struct drm_memrange manager;
struct list_head lru;
struct list_head pinned;
uint32_t flags;
diff --git a/linux-core/drm_proc.c b/linux-core/drm_proc.c
index 42da5c69..2bbe7eea 100644
--- a/linux-core/drm_proc.c
+++ b/linux-core/drm_proc.c
@@ -51,6 +51,10 @@ static int drm_bufs_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_objects_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
+static int drm_gem_name_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data);
+static int drm_gem_object_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data);
#if DRM_DEBUG_CODE
static int drm_vma_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
@@ -70,6 +74,8 @@ static struct drm_proc_list {
{"queues", drm_queues_info},
{"bufs", drm_bufs_info},
{"objects", drm_objects_info},
+ {"gem_names", drm_gem_name_info},
+ {"gem_objects", drm_gem_object_info},
#if DRM_DEBUG_CODE
{"vma", drm_vma_info},
#endif
@@ -582,6 +588,84 @@ static int drm_clients_info(char *buf, char **start, off_t offset,
return ret;
}
+struct drm_gem_name_info_data {
+ int len;
+ char *buf;
+ int eof;
+};
+
+static int drm_gem_one_name_info (int id, void *ptr, void *data)
+{
+ struct drm_gem_object *obj = ptr;
+ struct drm_gem_name_info_data *nid = data;
+
+ DRM_INFO ("name %d size %d\n", obj->name, obj->size);
+ if (nid->eof)
+ return 0;
+
+ nid->len += sprintf (&nid->buf[nid->len],
+ "%6d%9d%8d%9d\n",
+ obj->name, obj->size,
+ atomic_read(&obj->handlecount.refcount),
+ atomic_read(&obj->refcount.refcount));
+ if (nid->len > DRM_PROC_LIMIT) {
+ nid->eof = 1;
+ return 0;
+ }
+ return 0;
+}
+
+static int drm_gem_name_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data)
+{
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
+ struct drm_gem_name_info_data nid;
+
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
+ }
+
+ nid.len = sprintf (buf, " name size handles refcount\n");
+ nid.buf = buf;
+ nid.eof = 0;
+ idr_for_each (&dev->object_name_idr, drm_gem_one_name_info, &nid);
+
+ *start = &buf[offset];
+ *eof = 0;
+ if (nid.len > request + offset)
+ return request;
+ *eof = 1;
+ return nid.len - offset;
+}
+
+static int drm_gem_object_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data)
+{
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
+ int len = 0;
+
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
+ }
+
+ *start = &buf[offset];
+ *eof = 0;
+ DRM_PROC_PRINT("%d objects\n", atomic_read (&dev->object_count));
+ DRM_PROC_PRINT("%d object bytes\n", atomic_read (&dev->object_memory));
+ DRM_PROC_PRINT("%d pinned\n", atomic_read (&dev->pin_count));
+ DRM_PROC_PRINT("%d pin bytes\n", atomic_read (&dev->pin_memory));
+ DRM_PROC_PRINT("%d gtt bytes\n", atomic_read (&dev->gtt_memory));
+ DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total);
+ if (len > request + offset)
+ return request;
+ *eof = 1;
+ return len - offset;
+}
+
#if DRM_DEBUG_CODE
static int drm__vma_info(char *buf, char **start, off_t offset, int request,
diff --git a/linux-core/drm_sman.c b/linux-core/drm_sman.c
index 8421a939..7c16f685 100644
--- a/linux-core/drm_sman.c
+++ b/linux-core/drm_sman.c
@@ -88,34 +88,34 @@ EXPORT_SYMBOL(drm_sman_init);
static void *drm_sman_mm_allocate(void *private, unsigned long size,
unsigned alignment)
{
- struct drm_mm *mm = (struct drm_mm *) private;
- struct drm_mm_node *tmp;
+ struct drm_memrange *mm = (struct drm_memrange *) private;
+ struct drm_memrange_node *tmp;
- tmp = drm_mm_search_free(mm, size, alignment, 1);
+ tmp = drm_memrange_search_free(mm, size, alignment, 1);
if (!tmp) {
return NULL;
}
- tmp = drm_mm_get_block(tmp, size, alignment);
+ tmp = drm_memrange_get_block(tmp, size, alignment);
return tmp;
}
static void drm_sman_mm_free(void *private, void *ref)
{
- struct drm_mm_node *node = (struct drm_mm_node *) ref;
+ struct drm_memrange_node *node = (struct drm_memrange_node *) ref;
- drm_mm_put_block(node);
+ drm_memrange_put_block(node);
}
static void drm_sman_mm_destroy(void *private)
{
- struct drm_mm *mm = (struct drm_mm *) private;
- drm_mm_takedown(mm);
+ struct drm_memrange *mm = (struct drm_memrange *) private;
+ drm_memrange_takedown(mm);
drm_free(mm, sizeof(*mm), DRM_MEM_MM);
}
static unsigned long drm_sman_mm_offset(void *private, void *ref)
{
- struct drm_mm_node *node = (struct drm_mm_node *) ref;
+ struct drm_memrange_node *node = (struct drm_memrange_node *) ref;
return node->start;
}
@@ -124,7 +124,7 @@ drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
unsigned long start, unsigned long size)
{
struct drm_sman_mm *sman_mm;
- struct drm_mm *mm;
+ struct drm_memrange *mm;
int ret;
BUG_ON(manager >= sman->num_managers);
@@ -135,7 +135,7 @@ drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
return -ENOMEM;
}
sman_mm->private = mm;
- ret = drm_mm_init(mm, start, size);
+ ret = drm_memrange_init(mm, start, size);
if (ret) {
drm_free(mm, sizeof(*mm), DRM_MEM_MM);
diff --git a/linux-core/drm_sman.h b/linux-core/drm_sman.h
index 39a39fef..0299776c 100644
--- a/linux-core/drm_sman.h
+++ b/linux-core/drm_sman.h
@@ -45,7 +45,7 @@
/*
* A class that is an abstration of a simple memory allocator.
* The sman implementation provides a default such allocator
- * using the drm_mm.c implementation. But the user can replace it.
+ * using the drm_memrange.c implementation. But the user can replace it.
* See the SiS implementation, which may use the SiS FB kernel module
* for memory management.
*/
@@ -116,7 +116,7 @@ extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
unsigned int user_order, unsigned int owner_order);
/*
- * Initialize a drm_mm.c allocator. Should be called only once for each
+ * Initialize a drm_memrange.c allocator. Should be called only once for each
* manager unless a customized allogator is used.
*/
diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c
index c68adbaf..1aacd4ff 100644
--- a/linux-core/drm_stub.c
+++ b/linux-core/drm_stub.c
@@ -115,15 +115,15 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
if (drm_ht_create(&dev->map_hash, DRM_MAP_HASH_ORDER)) {
return -ENOMEM;
}
- if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
- DRM_FILE_PAGE_OFFSET_SIZE)) {
+ if (drm_memrange_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
+ DRM_FILE_PAGE_OFFSET_SIZE)) {
drm_ht_remove(&dev->map_hash);
return -ENOMEM;
}
if (drm_ht_create(&dev->object_hash, DRM_OBJECT_HASH_ORDER)) {
drm_ht_remove(&dev->map_hash);
- drm_mm_takedown(&dev->offset_manager);
+ drm_memrange_takedown(&dev->offset_manager);
return -ENOMEM;
}
@@ -163,7 +163,16 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
goto error_out_unreg;
}
+ if (driver->driver_features & DRIVER_GEM) {
+ retcode = drm_gem_init (dev);
+ if (retcode) {
+ DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
+ goto error_out_unreg;
+ }
+ }
+
drm_fence_manager_init(dev);
+
return 0;
error_out_unreg:
@@ -213,6 +222,13 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t
DRM_ERROR("DRM: Failed to initialize /proc/dri.\n");
goto err_mem;
}
+ if (dev->driver->proc_init) {
+ ret = dev->driver->proc_init(new_minor);
+ if (ret) {
+ DRM_ERROR("DRM: Driver failed to initialize /proc/dri.\n");
+ goto err_mem;
+ }
+ }
} else
new_minor->dev_root = NULL;
@@ -229,8 +245,11 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t
err_g2:
- if (new_minor->type == DRM_MINOR_LEGACY)
+ if (new_minor->type == DRM_MINOR_LEGACY) {
+ if (dev->driver->proc_cleanup)
+ dev->driver->proc_cleanup(new_minor);
drm_proc_cleanup(new_minor, drm_proc_root);
+ }
err_mem:
kfree(new_minor);
err_idr:
@@ -293,7 +312,7 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
return 0;
err_g4:
- drm_put_minor(&dev->primary);
+ drm_put_minor(dev);
err_g3:
if (!drm_fb_loaded)
pci_disable_device(pdev);
@@ -349,13 +368,17 @@ int drm_put_dev(struct drm_device * dev)
* last minor released.
*
*/
-int drm_put_minor(struct drm_minor **minor_p)
+int drm_put_minor(struct drm_device *dev)
{
+ struct drm_minor **minor_p = &dev->primary;
struct drm_minor *minor = *minor_p;
DRM_DEBUG("release secondary minor %d\n", minor->index);
- if (minor->type == DRM_MINOR_LEGACY)
+ if (minor->type == DRM_MINOR_LEGACY) {
+ if (dev->driver->proc_cleanup)
+ dev->driver->proc_cleanup(minor);
drm_proc_cleanup(minor, drm_proc_root);
+ }
drm_sysfs_device_remove(minor);
idr_remove(&drm_minors_idr, minor->index);
diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c
index abb45de7..e0eb6335 100644
--- a/linux-core/i915_drv.c
+++ b/linux-core/i915_drv.c
@@ -107,18 +107,22 @@ static int i915_resume(struct drm_device *dev)
}
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void remove(struct pci_dev *pdev);
+
static struct drm_driver driver = {
/* don't use mtrr's here, the Xserver or user space app should
* deal with them for intel hardware.
*/
.driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR | */
- DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
.load = i915_driver_load,
.unload = i915_driver_unload,
.firstopen = i915_driver_firstopen,
+ .open = i915_driver_open,
.lastclose = i915_driver_lastclose,
.preclose = i915_driver_preclose,
+ .postclose = i915_driver_postclose,
.suspend = i915_suspend,
.resume = i915_resume,
.device_is_agp = i915_driver_device_is_agp,
@@ -132,7 +136,11 @@ static struct drm_driver driver = {
.reclaim_buffers = drm_core_reclaim_buffers,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
+ .proc_init = i915_gem_proc_init,
+ .proc_cleanup = i915_gem_proc_cleanup,
.ioctls = i915_ioctls,
+ .gem_init_object = i915_gem_init_object,
+ .gem_free_object = i915_gem_free_object,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -149,7 +157,7 @@ static struct drm_driver driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = probe,
- .remove = __devexit_p(drm_cleanup_pci),
+ .remove = remove,
},
#ifdef I915_HAVE_FENCE
.fence_driver = &i915_fence_driver,
@@ -167,7 +175,28 @@ static struct drm_driver driver = {
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- return drm_get_dev(pdev, ent, &driver);
+ int ret;
+
+ /* On the 945G/GM, the chipset reports the MSI capability on the
+ * integrated graphics even though the support isn't actually there
+ * according to the published specs. It doesn't appear to function
+ * correctly in testing on 945G.
+ * This may be a side effect of MSI having been made available for PEG
+ * and the registers being closely associated.
+ */
+ if (pdev->device != 0x2772 && pdev->device != 0x27A2)
+ (void )pci_enable_msi(pdev);
+
+ ret = drm_get_dev(pdev, ent, &driver);
+ if (ret && pdev->msi_enabled)
+ pci_disable_msi(pdev);
+ return ret;
+}
+static void remove(struct pci_dev *pdev)
+{
+ if (pdev->msi_enabled)
+ pci_disable_msi(pdev);
+ drm_cleanup_pci(pdev);
}
static int __init i915_init(void)
diff --git a/linux-core/i915_gem.c b/linux-core/i915_gem.c
new file mode 100644
index 00000000..4c167d29
--- /dev/null
+++ b/linux-core/i915_gem.c
@@ -0,0 +1,2501 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_compat.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include <linux/swap.h>
+
+static int
+i915_gem_object_set_domain(struct drm_gem_object *obj,
+ uint32_t read_domains,
+ uint32_t write_domain);
+static int
+i915_gem_object_set_domain_range(struct drm_gem_object *obj,
+ uint64_t offset,
+ uint64_t size,
+ uint32_t read_domains,
+ uint32_t write_domain);
+int
+i915_gem_set_domain(struct drm_gem_object *obj,
+ struct drm_file *file_priv,
+ uint32_t read_domains,
+ uint32_t write_domain);
+static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
+static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
+static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
+
+int
+i915_gem_init_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_init *args = data;
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (args->gtt_start >= args->gtt_end ||
+ (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
+ (args->gtt_end & (PAGE_SIZE - 1)) != 0) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ drm_memrange_init(&dev_priv->mm.gtt_space, args->gtt_start,
+ args->gtt_end - args->gtt_start);
+
+ dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+
+/**
+ * Creates a new mm object and returns a handle to it.
+ */
+int
+i915_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_create *args = data;
+ struct drm_gem_object *obj;
+ int handle, ret;
+
+ args->size = roundup(args->size, PAGE_SIZE);
+
+ /* Allocate the new object */
+ obj = drm_gem_object_alloc(dev, args->size);
+ if (obj == NULL)
+ return -ENOMEM;
+
+ ret = drm_gem_handle_create(file_priv, obj, &handle);
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_handle_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+
+ return 0;
+}
+
+/**
+ * Reads data from the object referenced by handle.
+ *
+ * On error, the contents of *data are undefined.
+ */
+int
+i915_gem_pread_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_pread *args = data;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ ssize_t read;
+ loff_t offset;
+ int ret;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EBADF;
+ obj_priv = obj->driver_private;
+
+ /* Bounds check source.
+ *
+ * XXX: This could use review for overflow issues...
+ */
+ if (args->offset > obj->size || args->size > obj->size ||
+ args->offset + args->size > obj->size) {
+ drm_gem_object_unreference(obj);
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ ret = i915_gem_object_set_domain_range(obj, args->offset, args->size,
+ I915_GEM_DOMAIN_CPU, 0);
+ if (ret != 0) {
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ offset = args->offset;
+
+ read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
+ args->size, &offset);
+ if (read != args->size) {
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ if (read < 0)
+ return read;
+ else
+ return -EINVAL;
+ }
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+#include "drm_compat.h"
+
+static int
+i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+ struct drm_i915_gem_pwrite *args,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ ssize_t remain;
+ loff_t offset;
+ char __user *user_data;
+ char *vaddr;
+ int i, o, l;
+ int ret = 0;
+ unsigned long pfn;
+ unsigned long unwritten;
+
+ user_data = (char __user *) (uintptr_t) args->data_ptr;
+ remain = args->size;
+ if (!access_ok(VERIFY_READ, user_data, remain))
+ return -EFAULT;
+
+
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_gem_object_pin(obj, 0);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+ ret = i915_gem_set_domain(obj, file_priv,
+ I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
+ if (ret)
+ goto fail;
+
+ obj_priv = obj->driver_private;
+ offset = obj_priv->gtt_offset + args->offset;
+ obj_priv->dirty = 1;
+
+ while (remain > 0) {
+ /* Operation in this page
+ *
+ * i = page number
+ * o = offset within page
+ * l = bytes to copy
+ */
+ i = offset >> PAGE_SHIFT;
+ o = offset & (PAGE_SIZE-1);
+ l = remain;
+ if ((o + l) > PAGE_SIZE)
+ l = PAGE_SIZE - o;
+
+ pfn = (dev->agp->base >> PAGE_SHIFT) + i;
+
+#ifdef DRM_KMAP_ATOMIC_PROT_PFN
+ /* kmap_atomic can't map IO pages on non-HIGHMEM kernels
+ */
+ vaddr = kmap_atomic_prot_pfn(pfn, KM_USER0,
+ __pgprot(__PAGE_KERNEL));
+#if WATCH_PWRITE
+ DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n",
+ i, o, l, pfn, vaddr);
+#endif
+ unwritten = __copy_from_user_inatomic_nocache(vaddr + o,
+ user_data, l);
+ kunmap_atomic(vaddr, KM_USER0);
+
+ if (unwritten)
+#endif
+ {
+ vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+#if WATCH_PWRITE
+ DRM_INFO("pwrite slow i %d o %d l %d "
+ "pfn %ld vaddr %p\n",
+ i, o, l, pfn, vaddr);
+#endif
+ if (vaddr == NULL) {
+ ret = -EFAULT;
+ goto fail;
+ }
+ unwritten = __copy_from_user(vaddr + o, user_data, l);
+#if WATCH_PWRITE
+ DRM_INFO("unwritten %ld\n", unwritten);
+#endif
+ iounmap(vaddr);
+ if (unwritten) {
+ ret = -EFAULT;
+ goto fail;
+ }
+ }
+
+ remain -= l;
+ user_data += l;
+ offset += l;
+ }
+#if WATCH_PWRITE && 1
+ i915_gem_clflush_object(obj);
+ i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0);
+ i915_gem_clflush_object(obj);
+#endif
+
+fail:
+ i915_gem_object_unpin(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+int
+i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+ struct drm_i915_gem_pwrite *args,
+ struct drm_file *file_priv)
+{
+ int ret;
+ loff_t offset;
+ ssize_t written;
+
+ mutex_lock(&dev->struct_mutex);
+
+ ret = i915_gem_set_domain(obj, file_priv,
+ I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
+ offset = args->offset;
+
+ written = vfs_write(obj->filp,
+ (char __user *)(uintptr_t) args->data_ptr,
+ args->size, &offset);
+ if (written != args->size) {
+ mutex_unlock(&dev->struct_mutex);
+ if (written < 0)
+ return written;
+ else
+ return -EINVAL;
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+/**
+ * Writes data to the object referenced by handle.
+ *
+ * On error, the contents of the buffer that were to be modified are undefined.
+ */
+int
+i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_pwrite *args = data;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret = 0;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EBADF;
+ obj_priv = obj->driver_private;
+
+ /* Bounds check destination.
+ *
+ * XXX: This could use review for overflow issues...
+ */
+ if (args->offset > obj->size || args->size > obj->size ||
+ args->offset + args->size > obj->size) {
+ drm_gem_object_unreference(obj);
+ return -EINVAL;
+ }
+
+ /* We can only do the GTT pwrite on untiled buffers, as otherwise
+ * it would end up going through the fenced access, and we'll get
+ * different detiling behavior between reading and writing.
+ * pread/pwrite currently are reading and writing from the CPU
+ * perspective, requiring manual detiling by the client.
+ */
+ if (obj_priv->tiling_mode == I915_TILING_NONE &&
+ dev->gtt_total != 0)
+ ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
+ else
+ ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
+
+#if WATCH_PWRITE
+ if (ret)
+ DRM_INFO("pwrite failed %d\n", ret);
+#endif
+
+ drm_gem_object_unreference(obj);
+
+ return ret;
+}
+
+/**
+ * Called when user space prepares to use an object
+ */
+int
+i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_set_domain *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EBADF;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_gem_set_domain(obj, file_priv,
+ args->read_domains, args->write_domain);
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+/**
+ * Called when user space has done writes to this buffer
+ */
+int
+i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_sw_finish *args = data;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret = 0;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EBADF;
+ }
+
+#if WATCH_BUF
+ DRM_INFO("%s: sw_finish %d (%p)\n",
+ __func__, args->handle, obj);
+#endif
+ obj_priv = obj->driver_private;
+
+ /* Pinned buffers may be scanout, so flush the cache */
+ if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) {
+ i915_gem_clflush_object(obj);
+ drm_agp_chipset_flush(dev);
+ }
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+/**
+ * Maps the contents of an object, returning the address it is mapped
+ * into.
+ *
+ * While the mapping holds a reference on the contents of the object, it doesn't
+ * imply a ref on the object itself.
+ */
+int
+i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_mmap *args = data;
+ struct drm_gem_object *obj;
+ loff_t offset;
+ unsigned long addr;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EBADF;
+
+ offset = args->offset;
+
+ down_write(&current->mm->mmap_sem);
+ addr = do_mmap(obj->filp, 0, args->size,
+ PROT_READ | PROT_WRITE, MAP_SHARED,
+ args->offset);
+ up_write(&current->mm->mmap_sem);
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ if (IS_ERR((void *)addr))
+ return addr;
+
+ args->addr_ptr = (uint64_t) addr;
+
+ return 0;
+}
+
+static void
+i915_gem_object_free_page_list(struct drm_gem_object *obj)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int page_count = obj->size / PAGE_SIZE;
+ int i;
+
+ if (obj_priv->page_list == NULL)
+ return;
+
+
+ for (i = 0; i < page_count; i++)
+ if (obj_priv->page_list[i] != NULL) {
+ if (obj_priv->dirty)
+ set_page_dirty(obj_priv->page_list[i]);
+ mark_page_accessed(obj_priv->page_list[i]);
+ page_cache_release(obj_priv->page_list[i]);
+ }
+ obj_priv->dirty = 0;
+
+ drm_free(obj_priv->page_list,
+ page_count * sizeof(struct page *),
+ DRM_MEM_DRIVER);
+ obj_priv->page_list = NULL;
+}
+
+static void
+i915_gem_object_move_to_active(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ /* Add a reference if we're newly entering the active list. */
+ if (!obj_priv->active) {
+ drm_gem_object_reference(obj);
+ obj_priv->active = 1;
+ }
+ /* Move from whatever list we were on to the tail of execution. */
+ list_move_tail(&obj_priv->list,
+ &dev_priv->mm.active_list);
+}
+
+
+static void
+i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ i915_verify_inactive(dev, __FILE__, __LINE__);
+ if (obj_priv->pin_count != 0)
+ list_del_init(&obj_priv->list);
+ else
+ list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+
+ if (obj_priv->active) {
+ obj_priv->active = 0;
+ drm_gem_object_unreference(obj);
+ }
+ i915_verify_inactive(dev, __FILE__, __LINE__);
+}
+
+/**
+ * Creates a new sequence number, emitting a write of it to the status page
+ * plus an interrupt, which will trigger i915_user_interrupt_handler.
+ *
+ * Must be called with struct_lock held.
+ *
+ * Returned sequence numbers are nonzero on success.
+ */
+static uint32_t
+i915_add_request(struct drm_device *dev, uint32_t flush_domains)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_request *request;
+ uint32_t seqno;
+ int was_empty;
+ RING_LOCALS;
+
+ request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
+ if (request == NULL)
+ return 0;
+
+ /* Grab the seqno we're going to make this request be, and bump the
+ * next (skipping 0 so it can be the reserved no-seqno value).
+ */
+ seqno = dev_priv->mm.next_gem_seqno;
+ dev_priv->mm.next_gem_seqno++;
+ if (dev_priv->mm.next_gem_seqno == 0)
+ dev_priv->mm.next_gem_seqno++;
+
+ BEGIN_LP_RING(4);
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(seqno);
+
+ OUT_RING(MI_USER_INTERRUPT);
+ ADVANCE_LP_RING();
+
+ DRM_DEBUG("%d\n", seqno);
+
+ request->seqno = seqno;
+ request->emitted_jiffies = jiffies;
+ request->flush_domains = flush_domains;
+ was_empty = list_empty(&dev_priv->mm.request_list);
+ list_add_tail(&request->list, &dev_priv->mm.request_list);
+
+ if (was_empty)
+ schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
+ return seqno;
+}
+
+/**
+ * Command execution barrier
+ *
+ * Ensures that all commands in the ring are finished
+ * before signalling the CPU
+ */
+uint32_t
+i915_retire_commands(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+ uint32_t flush_domains = 0;
+ RING_LOCALS;
+
+ /* The sampler always gets flushed on i965 (sigh) */
+ if (IS_I965G(dev))
+ flush_domains |= I915_GEM_DOMAIN_SAMPLER;
+ BEGIN_LP_RING(2);
+ OUT_RING(cmd);
+ OUT_RING(0); /* noop */
+ ADVANCE_LP_RING();
+ return flush_domains;
+}
+
+/**
+ * Moves buffers associated only with the given active seqno from the active
+ * to inactive list, potentially freeing them.
+ */
+static void
+i915_gem_retire_request(struct drm_device *dev,
+ struct drm_i915_gem_request *request)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (request->flush_domains != 0) {
+ struct drm_i915_gem_object *obj_priv, *next;
+
+ /* First clear any buffers that were only waiting for a flush
+ * matching the one just retired.
+ */
+
+ list_for_each_entry_safe(obj_priv, next,
+ &dev_priv->mm.flushing_list, list) {
+ struct drm_gem_object *obj = obj_priv->obj;
+
+ if (obj->write_domain & request->flush_domains) {
+ obj->write_domain = 0;
+ i915_gem_object_move_to_inactive(obj);
+ }
+ }
+
+ }
+
+ /* Move any buffers on the active list that are no longer referenced
+ * by the ringbuffer to the flushing/inactive lists as appropriate.
+ */
+ while (!list_empty(&dev_priv->mm.active_list)) {
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ obj_priv = list_first_entry(&dev_priv->mm.active_list,
+ struct drm_i915_gem_object,
+ list);
+ obj = obj_priv->obj;
+
+ /* If the seqno being retired doesn't match the oldest in the
+ * list, then the oldest in the list must still be newer than
+ * this seqno.
+ */
+ if (obj_priv->last_rendering_seqno != request->seqno)
+ return;
+#if WATCH_LRU
+ DRM_INFO("%s: retire %d moves to inactive list %p\n",
+ __func__, request->seqno, obj);
+#endif
+
+ if (obj->write_domain != 0) {
+ list_move_tail(&obj_priv->list,
+ &dev_priv->mm.flushing_list);
+ } else {
+ i915_gem_object_move_to_inactive(obj);
+ }
+ }
+}
+
+/**
+ * Returns true if seq1 is later than seq2.
+ */
+static int
+i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+{
+ return (int32_t)(seq1 - seq2) >= 0;
+}
+
+uint32_t
+i915_get_gem_seqno(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
+}
+
+/**
+ * This function clears the request list as sequence numbers are passed.
+ */
+void
+i915_gem_retire_requests(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t seqno;
+
+ seqno = i915_get_gem_seqno(dev);
+
+ while (!list_empty(&dev_priv->mm.request_list)) {
+ struct drm_i915_gem_request *request;
+ uint32_t retiring_seqno;
+
+ request = list_first_entry(&dev_priv->mm.request_list,
+ struct drm_i915_gem_request,
+ list);
+ retiring_seqno = request->seqno;
+
+ if (i915_seqno_passed(seqno, retiring_seqno) ||
+ dev_priv->mm.wedged) {
+ i915_gem_retire_request(dev, request);
+
+ list_del(&request->list);
+ drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
+ } else
+ break;
+ }
+}
+
+void
+i915_gem_retire_work_handler(struct work_struct *work)
+{
+ drm_i915_private_t *dev_priv;
+ struct drm_device *dev;
+
+ dev_priv = container_of(work, drm_i915_private_t,
+ mm.retire_work.work);
+ dev = dev_priv->dev;
+
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_retire_requests(dev);
+ if (!list_empty(&dev_priv->mm.request_list))
+ schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
+int
+i915_wait_request(struct drm_device *dev, uint32_t seqno)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret = 0;
+
+ BUG_ON(seqno == 0);
+
+ if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
+ dev_priv->mm.waiting_gem_seqno = seqno;
+ i915_user_irq_on(dev_priv);
+ ret = wait_event_interruptible(dev_priv->irq_queue,
+ i915_seqno_passed(i915_get_gem_seqno(dev),
+ seqno) ||
+ dev_priv->mm.wedged);
+ i915_user_irq_off(dev_priv);
+ dev_priv->mm.waiting_gem_seqno = 0;
+ }
+ if (dev_priv->mm.wedged)
+ ret = -EIO;
+
+ if (ret)
+ DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
+ __func__, ret, seqno, i915_get_gem_seqno(dev));
+
+ /* Directly dispatch request retiring. While we have the work queue
+ * to handle this, the waiter on a request often wants an associated
+ * buffer to have made it to the inactive list, and we would need
+ * a separate wait queue to handle that.
+ */
+ if (ret == 0)
+ i915_gem_retire_requests(dev);
+
+ return ret;
+}
+
+static void
+i915_gem_flush(struct drm_device *dev,
+ uint32_t invalidate_domains,
+ uint32_t flush_domains)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t cmd;
+ RING_LOCALS;
+
+#if WATCH_EXEC
+ DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
+ invalidate_domains, flush_domains);
+#endif
+
+ if (flush_domains & I915_GEM_DOMAIN_CPU)
+ drm_agp_chipset_flush(dev);
+
+ if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
+ I915_GEM_DOMAIN_GTT)) {
+ /*
+ * read/write caches:
+ *
+ * I915_GEM_DOMAIN_RENDER is always invalidated, but is
+ * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
+ * also flushed at 2d versus 3d pipeline switches.
+ *
+ * read-only caches:
+ *
+ * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
+ * MI_READ_FLUSH is set, and is always flushed on 965.
+ *
+ * I915_GEM_DOMAIN_COMMAND may not exist?
+ *
+ * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
+ * invalidated when MI_EXE_FLUSH is set.
+ *
+ * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
+ * invalidated with every MI_FLUSH.
+ *
+ * TLBs:
+ *
+ * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
+ * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
+ * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
+ * are flushed at any MI_FLUSH.
+ */
+
+ cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+ if ((invalidate_domains|flush_domains) &
+ I915_GEM_DOMAIN_RENDER)
+ cmd &= ~MI_NO_WRITE_FLUSH;
+ if (!IS_I965G(dev)) {
+ /*
+ * On the 965, the sampler cache always gets flushed
+ * and this bit is reserved.
+ */
+ if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+ cmd |= MI_READ_FLUSH;
+ }
+ if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
+ cmd |= MI_EXE_FLUSH;
+
+#if WATCH_EXEC
+ DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
+#endif
+ BEGIN_LP_RING(2);
+ OUT_RING(cmd);
+ OUT_RING(0); /* noop */
+ ADVANCE_LP_RING();
+ }
+}
+
+/**
+ * Ensures that all rendering to the object has completed and the object is
+ * safe to unbind from the GTT or access from the CPU.
+ */
+static int
+i915_gem_object_wait_rendering(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int ret;
+
+ /* If there are writes queued to the buffer, flush and
+ * create a new seqno to wait for.
+ */
+ if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
+ uint32_t write_domain = obj->write_domain;
+#if WATCH_BUF
+ DRM_INFO("%s: flushing object %p from write domain %08x\n",
+ __func__, obj, write_domain);
+#endif
+ i915_gem_flush(dev, 0, write_domain);
+ obj->write_domain = 0;
+
+ i915_gem_object_move_to_active(obj);
+ obj_priv->last_rendering_seqno = i915_add_request(dev,
+ write_domain);
+ BUG_ON(obj_priv->last_rendering_seqno == 0);
+#if WATCH_LRU
+ DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
+#endif
+ }
+ /* If there is rendering queued on the buffer being evicted, wait for
+ * it.
+ */
+ if (obj_priv->active) {
+#if WATCH_BUF
+ DRM_INFO("%s: object %p wait for seqno %08x\n",
+ __func__, obj, obj_priv->last_rendering_seqno);
+#endif
+ ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * Unbinds an object from the GTT aperture.
+ */
+static int
+i915_gem_object_unbind(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int ret = 0;
+
+#if WATCH_BUF
+ DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
+ DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
+#endif
+ if (obj_priv->gtt_space == NULL)
+ return 0;
+
+ if (obj_priv->pin_count != 0) {
+ DRM_ERROR("Attempting to unbind pinned buffer\n");
+ return -EINVAL;
+ }
+
+ /* Wait for any rendering to complete
+ */
+ ret = i915_gem_object_wait_rendering(obj);
+ if (ret) {
+ DRM_ERROR("wait_rendering failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Move the object to the CPU domain to ensure that
+ * any possible CPU writes while it's not in the GTT
+ * are flushed when we go to remap it. This will
+ * also ensure that all pending GPU writes are finished
+ * before we unbind.
+ */
+ ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU,
+ I915_GEM_DOMAIN_CPU);
+ if (ret) {
+ DRM_ERROR("set_domain failed: %d\n", ret);
+ return ret;
+ }
+
+ if (obj_priv->agp_mem != NULL) {
+ drm_unbind_agp(obj_priv->agp_mem);
+ drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
+ obj_priv->agp_mem = NULL;
+ }
+
+ BUG_ON(obj_priv->active);
+
+ i915_gem_object_free_page_list(obj);
+
+ if (obj_priv->gtt_space) {
+ atomic_dec(&dev->gtt_count);
+ atomic_sub(obj->size, &dev->gtt_memory);
+
+ drm_memrange_put_block(obj_priv->gtt_space);
+ obj_priv->gtt_space = NULL;
+ }
+
+ /* Remove ourselves from the LRU list if present. */
+ if (!list_empty(&obj_priv->list))
+ list_del_init(&obj_priv->list);
+
+ return 0;
+}
+
+static int
+i915_gem_evict_something(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret = 0;
+
+ for (;;) {
+ /* If there's an inactive buffer available now, grab it
+ * and be done.
+ */
+ if (!list_empty(&dev_priv->mm.inactive_list)) {
+ obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
+ struct drm_i915_gem_object,
+ list);
+ obj = obj_priv->obj;
+ BUG_ON(obj_priv->pin_count != 0);
+#if WATCH_LRU
+ DRM_INFO("%s: evicting %p\n", __func__, obj);
+#endif
+ BUG_ON(obj_priv->active);
+
+ /* Wait on the rendering and unbind the buffer. */
+ ret = i915_gem_object_unbind(obj);
+ break;
+ }
+
+ /* If we didn't get anything, but the ring is still processing
+ * things, wait for one of those things to finish and hopefully
+ * leave us a buffer to evict.
+ */
+ if (!list_empty(&dev_priv->mm.request_list)) {
+ struct drm_i915_gem_request *request;
+
+ request = list_first_entry(&dev_priv->mm.request_list,
+ struct drm_i915_gem_request,
+ list);
+
+ ret = i915_wait_request(dev, request->seqno);
+ if (ret)
+ break;
+
+ /* if waiting caused an object to become inactive,
+ * then loop around and wait for it. Otherwise, we
+ * assume that waiting freed and unbound something,
+ * so there should now be some space in the GTT
+ */
+ if (!list_empty(&dev_priv->mm.inactive_list))
+ continue;
+ break;
+ }
+
+ /* If we didn't have anything on the request list but there
+ * are buffers awaiting a flush, emit one and try again.
+ * When we wait on it, those buffers waiting for that flush
+ * will get moved to inactive.
+ */
+ if (!list_empty(&dev_priv->mm.flushing_list)) {
+ obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
+ struct drm_i915_gem_object,
+ list);
+ obj = obj_priv->obj;
+
+ i915_gem_flush(dev,
+ obj->write_domain,
+ obj->write_domain);
+ i915_add_request(dev, obj->write_domain);
+
+ obj = NULL;
+ continue;
+ }
+
+ DRM_ERROR("inactive empty %d request empty %d "
+ "flushing empty %d\n",
+ list_empty(&dev_priv->mm.inactive_list),
+ list_empty(&dev_priv->mm.request_list),
+ list_empty(&dev_priv->mm.flushing_list));
+ /* If we didn't do any of the above, there's nothing to be done
+ * and we just can't fit it in.
+ */
+ return -ENOMEM;
+ }
+ return ret;
+}
+
+static int
+i915_gem_object_get_page_list(struct drm_gem_object *obj)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int page_count, i;
+ struct address_space *mapping;
+ struct inode *inode;
+ struct page *page;
+ int ret;
+
+ if (obj_priv->page_list)
+ return 0;
+
+ /* Get the list of pages out of our struct file. They'll be pinned
+ * at this point until we release them.
+ */
+ page_count = obj->size / PAGE_SIZE;
+ BUG_ON(obj_priv->page_list != NULL);
+ obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
+ DRM_MEM_DRIVER);
+ if (obj_priv->page_list == NULL) {
+ DRM_ERROR("Faled to allocate page list\n");
+ return -ENOMEM;
+ }
+
+ inode = obj->filp->f_path.dentry->d_inode;
+ mapping = inode->i_mapping;
+ for (i = 0; i < page_count; i++) {
+ page = find_get_page(mapping, i);
+ if (page == NULL || !PageUptodate(page)) {
+ if (page) {
+ page_cache_release(page);
+ page = NULL;
+ }
+ ret = shmem_getpage(inode, i, &page, SGP_DIRTY, NULL);
+
+ if (ret) {
+ DRM_ERROR("shmem_getpage failed: %d\n", ret);
+ i915_gem_object_free_page_list(obj);
+ return ret;
+ }
+ unlock_page(page);
+ }
+ obj_priv->page_list[i] = page;
+ }
+ return 0;
+}
+
+/**
+ * Finds free space in the GTT aperture and binds the object there.
+ */
+static int
+i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
+{
+ struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_memrange_node *free_space;
+ int page_count, ret;
+
+ if (alignment == 0)
+ alignment = PAGE_SIZE;
+ if (alignment & (PAGE_SIZE - 1)) {
+ DRM_ERROR("Invalid object alignment requested %u\n", alignment);
+ return -EINVAL;
+ }
+
+ search_free:
+ free_space = drm_memrange_search_free(&dev_priv->mm.gtt_space,
+ obj->size,
+ alignment, 0);
+ if (free_space != NULL) {
+ obj_priv->gtt_space =
+ drm_memrange_get_block(free_space, obj->size,
+ alignment);
+ if (obj_priv->gtt_space != NULL) {
+ obj_priv->gtt_space->private = obj;
+ obj_priv->gtt_offset = obj_priv->gtt_space->start;
+ }
+ }
+ if (obj_priv->gtt_space == NULL) {
+ /* If the gtt is empty and we're still having trouble
+ * fitting our object in, we're out of memory.
+ */
+#if WATCH_LRU
+ DRM_INFO("%s: GTT full, evicting something\n", __func__);
+#endif
+ if (list_empty(&dev_priv->mm.inactive_list) &&
+ list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->mm.active_list)) {
+ DRM_ERROR("GTT full, but LRU list empty\n");
+ return -ENOMEM;
+ }
+
+ ret = i915_gem_evict_something(dev);
+ if (ret != 0) {
+ DRM_ERROR("Failed to evict a buffer %d\n", ret);
+ return ret;
+ }
+ goto search_free;
+ }
+
+#if WATCH_BUF
+ DRM_INFO("Binding object of size %d at 0x%08x\n",
+ obj->size, obj_priv->gtt_offset);
+#endif
+ ret = i915_gem_object_get_page_list(obj);
+ if (ret) {
+ drm_memrange_put_block(obj_priv->gtt_space);
+ obj_priv->gtt_space = NULL;
+ return ret;
+ }
+
+ page_count = obj->size / PAGE_SIZE;
+ /* Create an AGP memory structure pointing at our pages, and bind it
+ * into the GTT.
+ */
+ obj_priv->agp_mem = drm_agp_bind_pages(dev,
+ obj_priv->page_list,
+ page_count,
+ obj_priv->gtt_offset);
+ if (obj_priv->agp_mem == NULL) {
+ i915_gem_object_free_page_list(obj);
+ drm_memrange_put_block(obj_priv->gtt_space);
+ obj_priv->gtt_space = NULL;
+ return -ENOMEM;
+ }
+ atomic_inc(&dev->gtt_count);
+ atomic_add(obj->size, &dev->gtt_memory);
+
+ /* Assert that the object is not currently in any GPU domain. As it
+ * wasn't in the GTT, there shouldn't be any way it could have been in
+ * a GPU cache
+ */
+ BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
+ BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
+
+ return 0;
+}
+
+void
+i915_gem_clflush_object(struct drm_gem_object *obj)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ /* If we don't have a page list set up, then we're not pinned
+ * to GPU, and we can ignore the cache flush because it'll happen
+ * again at bind time.
+ */
+ if (obj_priv->page_list == NULL)
+ return;
+
+ drm_ttm_cache_flush(obj_priv->page_list, obj->size / PAGE_SIZE);
+}
+
+/*
+ * Set the next domain for the specified object. This
+ * may not actually perform the necessary flushing/invaliding though,
+ * as that may want to be batched with other set_domain operations
+ *
+ * This is (we hope) the only really tricky part of gem. The goal
+ * is fairly simple -- track which caches hold bits of the object
+ * and make sure they remain coherent. A few concrete examples may
+ * help to explain how it works. For shorthand, we use the notation
+ * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
+ * a pair of read and write domain masks.
+ *
+ * Case 1: the batch buffer
+ *
+ * 1. Allocated
+ * 2. Written by CPU
+ * 3. Mapped to GTT
+ * 4. Read by GPU
+ * 5. Unmapped from GTT
+ * 6. Freed
+ *
+ * Let's take these a step at a time
+ *
+ * 1. Allocated
+ * Pages allocated from the kernel may still have
+ * cache contents, so we set them to (CPU, CPU) always.
+ * 2. Written by CPU (using pwrite)
+ * The pwrite function calls set_domain (CPU, CPU) and
+ * this function does nothing (as nothing changes)
+ * 3. Mapped by GTT
+ * This function asserts that the object is not
+ * currently in any GPU-based read or write domains
+ * 4. Read by GPU
+ * i915_gem_execbuffer calls set_domain (COMMAND, 0).
+ * As write_domain is zero, this function adds in the
+ * current read domains (CPU+COMMAND, 0).
+ * flush_domains is set to CPU.
+ * invalidate_domains is set to COMMAND
+ * clflush is run to get data out of the CPU caches
+ * then i915_dev_set_domain calls i915_gem_flush to
+ * emit an MI_FLUSH and drm_agp_chipset_flush
+ * 5. Unmapped from GTT
+ * i915_gem_object_unbind calls set_domain (CPU, CPU)
+ * flush_domains and invalidate_domains end up both zero
+ * so no flushing/invalidating happens
+ * 6. Freed
+ * yay, done
+ *
+ * Case 2: The shared render buffer
+ *
+ * 1. Allocated
+ * 2. Mapped to GTT
+ * 3. Read/written by GPU
+ * 4. set_domain to (CPU,CPU)
+ * 5. Read/written by CPU
+ * 6. Read/written by GPU
+ *
+ * 1. Allocated
+ * Same as last example, (CPU, CPU)
+ * 2. Mapped to GTT
+ * Nothing changes (assertions find that it is not in the GPU)
+ * 3. Read/written by GPU
+ * execbuffer calls set_domain (RENDER, RENDER)
+ * flush_domains gets CPU
+ * invalidate_domains gets GPU
+ * clflush (obj)
+ * MI_FLUSH and drm_agp_chipset_flush
+ * 4. set_domain (CPU, CPU)
+ * flush_domains gets GPU
+ * invalidate_domains gets CPU
+ * wait_rendering (obj) to make sure all drawing is complete.
+ * This will include an MI_FLUSH to get the data from GPU
+ * to memory
+ * clflush (obj) to invalidate the CPU cache
+ * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
+ * 5. Read/written by CPU
+ * cache lines are loaded and dirtied
+ * 6. Read written by GPU
+ * Same as last GPU access
+ *
+ * Case 3: The constant buffer
+ *
+ * 1. Allocated
+ * 2. Written by CPU
+ * 3. Read by GPU
+ * 4. Updated (written) by CPU again
+ * 5. Read by GPU
+ *
+ * 1. Allocated
+ * (CPU, CPU)
+ * 2. Written by CPU
+ * (CPU, CPU)
+ * 3. Read by GPU
+ * (CPU+RENDER, 0)
+ * flush_domains = CPU
+ * invalidate_domains = RENDER
+ * clflush (obj)
+ * MI_FLUSH
+ * drm_agp_chipset_flush
+ * 4. Updated (written) by CPU again
+ * (CPU, CPU)
+ * flush_domains = 0 (no previous write domain)
+ * invalidate_domains = 0 (no new read domains)
+ * 5. Read by GPU
+ * (CPU+RENDER, 0)
+ * flush_domains = CPU
+ * invalidate_domains = RENDER
+ * clflush (obj)
+ * MI_FLUSH
+ * drm_agp_chipset_flush
+ */
+static int
+i915_gem_object_set_domain(struct drm_gem_object *obj,
+ uint32_t read_domains,
+ uint32_t write_domain)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ uint32_t invalidate_domains = 0;
+ uint32_t flush_domains = 0;
+ int ret;
+
+#if WATCH_BUF
+ DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
+ __func__, obj,
+ obj->read_domains, read_domains,
+ obj->write_domain, write_domain);
+#endif
+ /*
+ * If the object isn't moving to a new write domain,
+ * let the object stay in multiple read domains
+ */
+ if (write_domain == 0)
+ read_domains |= obj->read_domains;
+ else
+ obj_priv->dirty = 1;
+
+ /*
+ * Flush the current write domain if
+ * the new read domains don't match. Invalidate
+ * any read domains which differ from the old
+ * write domain
+ */
+ if (obj->write_domain && obj->write_domain != read_domains) {
+ flush_domains |= obj->write_domain;
+ invalidate_domains |= read_domains & ~obj->write_domain;
+ }
+ /*
+ * Invalidate any read caches which may have
+ * stale data. That is, any new read domains.
+ */
+ invalidate_domains |= read_domains & ~obj->read_domains;
+ if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
+#if WATCH_BUF
+ DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
+ __func__, flush_domains, invalidate_domains);
+#endif
+ /*
+ * If we're invaliding the CPU cache and flushing a GPU cache,
+ * then pause for rendering so that the GPU caches will be
+ * flushed before the cpu cache is invalidated
+ */
+ if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
+ (flush_domains & ~(I915_GEM_DOMAIN_CPU |
+ I915_GEM_DOMAIN_GTT))) {
+ ret = i915_gem_object_wait_rendering(obj);
+ if (ret)
+ return ret;
+ }
+ i915_gem_clflush_object(obj);
+ }
+
+ if ((write_domain | flush_domains) != 0)
+ obj->write_domain = write_domain;
+
+ /* If we're invalidating the CPU domain, clear the per-page CPU
+ * domain list as well.
+ */
+ if (obj_priv->page_cpu_valid != NULL &&
+ (obj->read_domains & I915_GEM_DOMAIN_CPU) &&
+ ((read_domains & I915_GEM_DOMAIN_CPU) == 0)) {
+ memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
+ }
+ obj->read_domains = read_domains;
+
+ dev->invalidate_domains |= invalidate_domains;
+ dev->flush_domains |= flush_domains;
+#if WATCH_BUF
+ DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
+ __func__,
+ obj->read_domains, obj->write_domain,
+ dev->invalidate_domains, dev->flush_domains);
+#endif
+ return 0;
+}
+
+/**
+ * Set the read/write domain on a range of the object.
+ *
+ * Currently only implemented for CPU reads, otherwise drops to normal
+ * i915_gem_object_set_domain().
+ */
+static int
+i915_gem_object_set_domain_range(struct drm_gem_object *obj,
+ uint64_t offset,
+ uint64_t size,
+ uint32_t read_domains,
+ uint32_t write_domain)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int ret, i;
+
+ if (obj->read_domains & I915_GEM_DOMAIN_CPU)
+ return 0;
+
+ if (read_domains != I915_GEM_DOMAIN_CPU ||
+ write_domain != 0)
+ return i915_gem_object_set_domain(obj,
+ read_domains, write_domain);
+
+ /* Wait on any GPU rendering to the object to be flushed. */
+ if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) {
+ ret = i915_gem_object_wait_rendering(obj);
+ if (ret)
+ return ret;
+ }
+
+ if (obj_priv->page_cpu_valid == NULL) {
+ obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
+ DRM_MEM_DRIVER);
+ }
+
+ /* Flush the cache on any pages that are still invalid from the CPU's
+ * perspective.
+ */
+ for (i = offset / PAGE_SIZE; i < (offset + size - 1) / PAGE_SIZE; i++) {
+ if (obj_priv->page_cpu_valid[i])
+ continue;
+
+ drm_ttm_cache_flush(obj_priv->page_list + i, 1);
+
+ obj_priv->page_cpu_valid[i] = 1;
+ }
+
+ return 0;
+}
+
+/**
+ * Once all of the objects have been set in the proper domain,
+ * perform the necessary flush and invalidate operations.
+ *
+ * Returns the write domains flushed, for use in flush tracking.
+ */
+static uint32_t
+i915_gem_dev_set_domain(struct drm_device *dev)
+{
+ uint32_t flush_domains = dev->flush_domains;
+
+ /*
+ * Now that all the buffers are synced to the proper domains,
+ * flush and invalidate the collected domains
+ */
+ if (dev->invalidate_domains | dev->flush_domains) {
+#if WATCH_EXEC
+ DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
+ __func__,
+ dev->invalidate_domains,
+ dev->flush_domains);
+#endif
+ i915_gem_flush(dev,
+ dev->invalidate_domains,
+ dev->flush_domains);
+ dev->invalidate_domains = 0;
+ dev->flush_domains = 0;
+ }
+
+ return flush_domains;
+}
+
+/**
+ * Pin an object to the GTT and evaluate the relocations landing in it.
+ */
+static int
+i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
+ struct drm_file *file_priv,
+ struct drm_i915_gem_exec_object *entry)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_relocation_entry reloc;
+ struct drm_i915_gem_relocation_entry __user *relocs;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int i, ret;
+ uint32_t last_reloc_offset = -1;
+ void *reloc_page = NULL;
+
+ /* Choose the GTT offset for our buffer and put it there. */
+ ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
+ if (ret)
+ return ret;
+
+ entry->offset = obj_priv->gtt_offset;
+
+ relocs = (struct drm_i915_gem_relocation_entry __user *)
+ (uintptr_t) entry->relocs_ptr;
+ /* Apply the relocations, using the GTT aperture to avoid cache
+ * flushing requirements.
+ */
+ for (i = 0; i < entry->relocation_count; i++) {
+ struct drm_gem_object *target_obj;
+ struct drm_i915_gem_object *target_obj_priv;
+ uint32_t reloc_val, reloc_offset, *reloc_entry;
+ int ret;
+
+ ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
+ if (ret != 0) {
+ i915_gem_object_unpin(obj);
+ return ret;
+ }
+
+ target_obj = drm_gem_object_lookup(obj->dev, file_priv,
+ reloc.target_handle);
+ if (target_obj == NULL) {
+ i915_gem_object_unpin(obj);
+ return -EBADF;
+ }
+ target_obj_priv = target_obj->driver_private;
+
+ /* The target buffer should have appeared before us in the
+ * exec_object list, so it should have a GTT space bound by now.
+ */
+ if (target_obj_priv->gtt_space == NULL) {
+ DRM_ERROR("No GTT space found for object %d\n",
+ reloc.target_handle);
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ return -EINVAL;
+ }
+
+ if (reloc.offset > obj->size - 4) {
+ DRM_ERROR("Relocation beyond object bounds: "
+ "obj %p target %d offset %d size %d.\n",
+ obj, reloc.target_handle,
+ (int) reloc.offset, (int) obj->size);
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ return -EINVAL;
+ }
+ if (reloc.offset & 3) {
+ DRM_ERROR("Relocation not 4-byte aligned: "
+ "obj %p target %d offset %d.\n",
+ obj, reloc.target_handle,
+ (int) reloc.offset);
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ return -EINVAL;
+ }
+
+ if (reloc.write_domain && target_obj->pending_write_domain &&
+ reloc.write_domain != target_obj->pending_write_domain) {
+ DRM_ERROR("Write domain conflict: "
+ "obj %p target %d offset %d "
+ "new %08x old %08x\n",
+ obj, reloc.target_handle,
+ (int) reloc.offset,
+ reloc.write_domain,
+ target_obj->pending_write_domain);
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ return -EINVAL;
+ }
+
+#if WATCH_RELOC
+ DRM_INFO("%s: obj %p offset %08x target %d "
+ "read %08x write %08x gtt %08x "
+ "presumed %08x delta %08x\n",
+ __func__,
+ obj,
+ (int) reloc.offset,
+ (int) reloc.target_handle,
+ (int) reloc.read_domains,
+ (int) reloc.write_domain,
+ (int) target_obj_priv->gtt_offset,
+ (int) reloc.presumed_offset,
+ reloc.delta);
+#endif
+
+ target_obj->pending_read_domains |= reloc.read_domains;
+ target_obj->pending_write_domain |= reloc.write_domain;
+
+ /* If the relocation already has the right value in it, no
+ * more work needs to be done.
+ */
+ if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
+ drm_gem_object_unreference(target_obj);
+ continue;
+ }
+
+ /* Now that we're going to actually write some data in,
+ * make sure that any rendering using this buffer's contents
+ * is completed.
+ */
+ i915_gem_object_wait_rendering(obj);
+
+ /* As we're writing through the gtt, flush
+ * any CPU writes before we write the relocations
+ */
+ if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
+ i915_gem_clflush_object(obj);
+ drm_agp_chipset_flush(dev);
+ obj->write_domain = 0;
+ }
+
+ /* Map the page containing the relocation we're going to
+ * perform.
+ */
+ reloc_offset = obj_priv->gtt_offset + reloc.offset;
+ if (reloc_page == NULL ||
+ (last_reloc_offset & ~(PAGE_SIZE - 1)) !=
+ (reloc_offset & ~(PAGE_SIZE - 1))) {
+ if (reloc_page != NULL)
+ iounmap(reloc_page);
+
+ reloc_page = ioremap(dev->agp->base +
+ (reloc_offset & ~(PAGE_SIZE - 1)),
+ PAGE_SIZE);
+ last_reloc_offset = reloc_offset;
+ if (reloc_page == NULL) {
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ return -ENOMEM;
+ }
+ }
+
+ reloc_entry = (uint32_t *)((char *)reloc_page +
+ (reloc_offset & (PAGE_SIZE - 1)));
+ reloc_val = target_obj_priv->gtt_offset + reloc.delta;
+
+#if WATCH_BUF
+ DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
+ obj, (unsigned int) reloc.offset,
+ readl(reloc_entry), reloc_val);
+#endif
+ writel(reloc_val, reloc_entry);
+
+ /* Write the updated presumed offset for this entry back out
+ * to the user.
+ */
+ reloc.presumed_offset = target_obj_priv->gtt_offset;
+ ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
+ if (ret != 0) {
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ return ret;
+ }
+
+ drm_gem_object_unreference(target_obj);
+ }
+
+ if (reloc_page != NULL)
+ iounmap(reloc_page);
+
+#if WATCH_BUF
+ if (0)
+ i915_gem_dump_object(obj, 128, __func__, ~0);
+#endif
+ return 0;
+}
+
+/** Dispatch a batchbuffer to the ring
+ */
+static int
+i915_dispatch_gem_execbuffer(struct drm_device *dev,
+ struct drm_i915_gem_execbuffer *exec,
+ uint64_t exec_offset)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
+ (uintptr_t) exec->cliprects_ptr;
+ int nbox = exec->num_cliprects;
+ int i = 0, count;
+ uint32_t exec_start, exec_len;
+ RING_LOCALS;
+
+ exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+ exec_len = (uint32_t) exec->batch_len;
+
+ if ((exec_start | exec_len) & 0x7) {
+ DRM_ERROR("alignment\n");
+ return -EINVAL;
+ }
+
+ if (!exec_start)
+ return -EINVAL;
+
+ count = nbox ? nbox : 1;
+
+ for (i = 0; i < count; i++) {
+ if (i < nbox) {
+ int ret = i915_emit_box(dev, boxes, i,
+ exec->DR1, exec->DR4);
+ if (ret)
+ return ret;
+ }
+
+ if (IS_I830(dev) || IS_845G(dev)) {
+ BEGIN_LP_RING(4);
+ OUT_RING(MI_BATCH_BUFFER);
+ OUT_RING(exec_start | MI_BATCH_NON_SECURE);
+ OUT_RING(exec_start + exec_len - 4);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ } else {
+ BEGIN_LP_RING(2);
+ if (IS_I965G(dev)) {
+ OUT_RING(MI_BATCH_BUFFER_START |
+ (2 << 6) |
+ MI_BATCH_NON_SECURE_I965);
+ OUT_RING(exec_start);
+ } else {
+ OUT_RING(MI_BATCH_BUFFER_START |
+ (2 << 6));
+ OUT_RING(exec_start | MI_BATCH_NON_SECURE);
+ }
+ ADVANCE_LP_RING();
+ }
+ }
+
+ /* XXX breadcrumb */
+ return 0;
+}
+
+/* Throttle our rendering by waiting until the ring has completed our requests
+ * emitted over 20 msec ago.
+ *
+ * This should get us reasonable parallelism between CPU and GPU but also
+ * relatively low latency when blocking on a particular request to finish.
+ */
+static int
+i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
+{
+ struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+ int ret = 0;
+ uint32_t seqno;
+
+ mutex_lock(&dev->struct_mutex);
+ seqno = i915_file_priv->mm.last_gem_throttle_seqno;
+ i915_file_priv->mm.last_gem_throttle_seqno =
+ i915_file_priv->mm.last_gem_seqno;
+ if (seqno)
+ ret = i915_wait_request(dev, seqno);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int
+i915_gem_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+ struct drm_i915_gem_execbuffer *args = data;
+ struct drm_i915_gem_exec_object *exec_list = NULL;
+ struct drm_gem_object **object_list = NULL;
+ struct drm_gem_object *batch_obj;
+ int ret, i, pinned = 0;
+ uint64_t exec_offset;
+ uint32_t seqno, flush_domains;
+
+#if WATCH_EXEC
+ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+
+ /* Copy in the exec list from userland */
+ exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
+ DRM_MEM_DRIVER);
+ object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
+ DRM_MEM_DRIVER);
+ if (exec_list == NULL || object_list == NULL) {
+ DRM_ERROR("Failed to allocate exec or object list "
+ "for %d buffers\n",
+ args->buffer_count);
+ ret = -ENOMEM;
+ goto pre_mutex_err;
+ }
+ ret = copy_from_user(exec_list,
+ (struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ sizeof(*exec_list) * args->buffer_count);
+ if (ret != 0) {
+ DRM_ERROR("copy %d exec entries failed %d\n",
+ args->buffer_count, ret);
+ goto pre_mutex_err;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ i915_verify_inactive(dev, __FILE__, __LINE__);
+
+ if (dev_priv->mm.wedged) {
+ DRM_ERROR("Execbuf while wedged\n");
+ mutex_unlock(&dev->struct_mutex);
+ return -EIO;
+ }
+
+ if (dev_priv->mm.suspended) {
+ DRM_ERROR("Execbuf while VT-switched.\n");
+ mutex_unlock(&dev->struct_mutex);
+ return -EBUSY;
+ }
+
+ /* Zero the gloabl flush/invalidate flags. These
+ * will be modified as each object is bound to the
+ * gtt
+ */
+ dev->invalidate_domains = 0;
+ dev->flush_domains = 0;
+
+ /* Look up object handles and perform the relocations */
+ for (i = 0; i < args->buffer_count; i++) {
+ object_list[i] = drm_gem_object_lookup(dev, file_priv,
+ exec_list[i].handle);
+ if (object_list[i] == NULL) {
+ DRM_ERROR("Invalid object handle %d at index %d\n",
+ exec_list[i].handle, i);
+ ret = -EBADF;
+ goto err;
+ }
+
+ object_list[i]->pending_read_domains = 0;
+ object_list[i]->pending_write_domain = 0;
+ ret = i915_gem_object_pin_and_relocate(object_list[i],
+ file_priv,
+ &exec_list[i]);
+ if (ret) {
+ DRM_ERROR("object bind and relocate failed %d\n", ret);
+ goto err;
+ }
+ pinned = i + 1;
+ }
+
+ /* Set the pending read domains for the batch buffer to COMMAND */
+ batch_obj = object_list[args->buffer_count-1];
+ batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
+ batch_obj->pending_write_domain = 0;
+
+ i915_verify_inactive(dev, __FILE__, __LINE__);
+
+ for (i = 0; i < args->buffer_count; i++) {
+ struct drm_gem_object *obj = object_list[i];
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ if (obj_priv->gtt_space == NULL) {
+ /* We evicted the buffer in the process of validating
+ * our set of buffers in. We could try to recover by
+ * kicking them everything out and trying again from
+ * the start.
+ */
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /* make sure all previous memory operations have passed */
+ ret = i915_gem_object_set_domain(obj,
+ obj->pending_read_domains,
+ obj->pending_write_domain);
+ if (ret)
+ goto err;
+ }
+
+ i915_verify_inactive(dev, __FILE__, __LINE__);
+
+ /* Flush/invalidate caches and chipset buffer */
+ flush_domains = i915_gem_dev_set_domain(dev);
+
+ i915_verify_inactive(dev, __FILE__, __LINE__);
+
+#if WATCH_COHERENCY
+ for (i = 0; i < args->buffer_count; i++) {
+ i915_gem_object_check_coherency(object_list[i],
+ exec_list[i].handle);
+ }
+#endif
+
+ exec_offset = exec_list[args->buffer_count - 1].offset;
+
+#if WATCH_EXEC
+ i915_gem_dump_object(object_list[args->buffer_count - 1],
+ args->batch_len,
+ __func__,
+ ~0);
+#endif
+
+ /* Exec the batchbuffer */
+ ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
+ if (ret) {
+ DRM_ERROR("dispatch failed %d\n", ret);
+ goto err;
+ }
+
+ /*
+ * Ensure that the commands in the batch buffer are
+ * finished before the interrupt fires
+ */
+ flush_domains |= i915_retire_commands(dev);
+
+ i915_verify_inactive(dev, __FILE__, __LINE__);
+
+ /*
+ * Get a seqno representing the execution of the current buffer,
+ * which we can wait on. We would like to mitigate these interrupts,
+ * likely by only creating seqnos occasionally (so that we have
+ * *some* interrupts representing completion of buffers that we can
+ * wait on when trying to clear up gtt space).
+ */
+ seqno = i915_add_request(dev, flush_domains);
+ BUG_ON(seqno == 0);
+ i915_file_priv->mm.last_gem_seqno = seqno;
+ for (i = 0; i < args->buffer_count; i++) {
+ struct drm_gem_object *obj = object_list[i];
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ i915_gem_object_move_to_active(obj);
+ obj_priv->last_rendering_seqno = seqno;
+#if WATCH_LRU
+ DRM_INFO("%s: move to exec list %p\n", __func__, obj);
+#endif
+ }
+#if WATCH_LRU
+ i915_dump_lru(dev, __func__);
+#endif
+
+ i915_verify_inactive(dev, __FILE__, __LINE__);
+
+ /* Copy the new buffer offsets back to the user's exec list. */
+ ret = copy_to_user((struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ exec_list,
+ sizeof(*exec_list) * args->buffer_count);
+ if (ret)
+ DRM_ERROR("failed to copy %d exec entries "
+ "back to user (%d)\n",
+ args->buffer_count, ret);
+err:
+ if (object_list != NULL) {
+ for (i = 0; i < pinned; i++)
+ i915_gem_object_unpin(object_list[i]);
+
+ for (i = 0; i < args->buffer_count; i++)
+ drm_gem_object_unreference(object_list[i]);
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+pre_mutex_err:
+ drm_free(object_list, sizeof(*object_list) * args->buffer_count,
+ DRM_MEM_DRIVER);
+ drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
+ DRM_MEM_DRIVER);
+
+ return ret;
+}
+
+int
+i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int ret;
+
+ i915_verify_inactive(dev, __FILE__, __LINE__);
+ if (obj_priv->gtt_space == NULL) {
+ ret = i915_gem_object_bind_to_gtt(obj, alignment);
+ if (ret != 0) {
+ DRM_ERROR("Failure to bind: %d", ret);
+ return ret;
+ }
+ }
+ obj_priv->pin_count++;
+
+ /* If the object is not active and not pending a flush,
+ * remove it from the inactive list
+ */
+ if (obj_priv->pin_count == 1) {
+ atomic_inc(&dev->pin_count);
+ atomic_add(obj->size, &dev->pin_memory);
+ if (!obj_priv->active &&
+ (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
+ I915_GEM_DOMAIN_GTT)) == 0 &&
+ !list_empty(&obj_priv->list))
+ list_del_init(&obj_priv->list);
+ }
+ i915_verify_inactive(dev, __FILE__, __LINE__);
+
+ return 0;
+}
+
+void
+i915_gem_object_unpin(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ i915_verify_inactive(dev, __FILE__, __LINE__);
+ obj_priv->pin_count--;
+ BUG_ON(obj_priv->pin_count < 0);
+ BUG_ON(obj_priv->gtt_space == NULL);
+
+ /* If the object is no longer pinned, and is
+ * neither active nor being flushed, then stick it on
+ * the inactive list
+ */
+ if (obj_priv->pin_count == 0) {
+ if (!obj_priv->active &&
+ (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
+ I915_GEM_DOMAIN_GTT)) == 0)
+ list_move_tail(&obj_priv->list,
+ &dev_priv->mm.inactive_list);
+ atomic_dec(&dev->pin_count);
+ atomic_sub(obj->size, &dev->pin_memory);
+ }
+ i915_verify_inactive(dev, __FILE__, __LINE__);
+}
+
+int
+i915_gem_pin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_pin *args = data;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL) {
+ DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
+ args->handle);
+ mutex_unlock(&dev->struct_mutex);
+ return -EBADF;
+ }
+ obj_priv = obj->driver_private;
+
+ ret = i915_gem_object_pin(obj, args->alignment);
+ if (ret != 0) {
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
+ /* XXX - flush the CPU caches for pinned objects
+ * as the X server doesn't manage domains yet
+ */
+ if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
+ i915_gem_clflush_object(obj);
+ drm_agp_chipset_flush(dev);
+ obj->write_domain = 0;
+ }
+ args->offset = obj_priv->gtt_offset;
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+int
+i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_pin *args = data;
+ struct drm_gem_object *obj;
+
+ mutex_lock(&dev->struct_mutex);
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL) {
+ DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
+ args->handle);
+ mutex_unlock(&dev->struct_mutex);
+ return -EBADF;
+ }
+
+ i915_gem_object_unpin(obj);
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+int
+i915_gem_busy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_busy *args = data;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL) {
+ DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
+ args->handle);
+ mutex_unlock(&dev->struct_mutex);
+ return -EBADF;
+ }
+
+ obj_priv = obj->driver_private;
+ args->busy = obj_priv->active;
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+int
+i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return i915_gem_ring_throttle(dev, file_priv);
+}
+
+int i915_gem_init_object(struct drm_gem_object *obj)
+{
+ struct drm_i915_gem_object *obj_priv;
+
+ obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
+ if (obj_priv == NULL)
+ return -ENOMEM;
+
+ /*
+ * We've just allocated pages from the kernel,
+ * so they've just been written by the CPU with
+ * zeros. They'll need to be clflushed before we
+ * use them with the GPU.
+ */
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
+
+ obj->driver_private = obj_priv;
+ obj_priv->obj = obj;
+ INIT_LIST_HEAD(&obj_priv->list);
+ return 0;
+}
+
+void i915_gem_free_object(struct drm_gem_object *obj)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ while (obj_priv->pin_count > 0)
+ i915_gem_object_unpin(obj);
+
+ i915_gem_object_unbind(obj);
+
+ drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
+ drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
+}
+
+int
+i915_gem_set_domain(struct drm_gem_object *obj,
+ struct drm_file *file_priv,
+ uint32_t read_domains,
+ uint32_t write_domain)
+{
+ struct drm_device *dev = obj->dev;
+ int ret;
+ uint32_t flush_domains;
+
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
+ if (ret)
+ return ret;
+ flush_domains = i915_gem_dev_set_domain(obj->dev);
+
+ if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
+ (void) i915_add_request(dev, flush_domains);
+
+ return 0;
+}
+
+/** Unbinds all objects that are on the given buffer list. */
+static int
+i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
+{
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ while (!list_empty(head)) {
+ obj_priv = list_first_entry(head,
+ struct drm_i915_gem_object,
+ list);
+ obj = obj_priv->obj;
+
+ if (obj_priv->pin_count != 0) {
+ DRM_ERROR("Pinned object in unbind list\n");
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ ret = i915_gem_object_unbind(obj);
+ if (ret != 0) {
+ DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
+ ret);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+ }
+
+
+ return 0;
+}
+
+static int
+i915_gem_idle(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t seqno, cur_seqno, last_seqno;
+ int stuck;
+
+ if (dev_priv->mm.suspended)
+ return 0;
+
+ /* Hack! Don't let anybody do execbuf while we don't control the chip.
+ * We need to replace this with a semaphore, or something.
+ */
+ dev_priv->mm.suspended = 1;
+
+ i915_kernel_lost_context(dev);
+
+ /* Flush the GPU along with all non-CPU write domains
+ */
+ i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
+ ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
+ seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU |
+ I915_GEM_DOMAIN_GTT));
+
+ if (seqno == 0) {
+ mutex_unlock(&dev->struct_mutex);
+ return -ENOMEM;
+ }
+
+ dev_priv->mm.waiting_gem_seqno = seqno;
+ last_seqno = 0;
+ stuck = 0;
+ for (;;) {
+ cur_seqno = i915_get_gem_seqno(dev);
+ if (i915_seqno_passed(cur_seqno, seqno))
+ break;
+ if (last_seqno == cur_seqno) {
+ if (stuck++ > 100) {
+ DRM_ERROR("hardware wedged\n");
+ dev_priv->mm.wedged = 1;
+ DRM_WAKEUP(&dev_priv->irq_queue);
+ break;
+ }
+ }
+ msleep(10);
+ last_seqno = cur_seqno;
+ }
+ dev_priv->mm.waiting_gem_seqno = 0;
+
+ i915_gem_retire_requests(dev);
+
+ /* Active and flushing should now be empty as we've
+ * waited for a sequence higher than any pending execbuffer
+ */
+ BUG_ON(!list_empty(&dev_priv->mm.active_list));
+ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+
+ /* Request should now be empty as we've also waited
+ * for the last request in the list
+ */
+ BUG_ON(!list_empty(&dev_priv->mm.request_list));
+
+ /* Move all buffers out of the GTT. */
+ i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
+
+ BUG_ON(!list_empty(&dev_priv->mm.active_list));
+ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+ BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
+ BUG_ON(!list_empty(&dev_priv->mm.request_list));
+ return 0;
+}
+
+static int
+i915_gem_init_hws(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ /* If we need a physical address for the status page, it's already
+ * initialized at driver load time.
+ */
+ if (!I915_NEED_GFX_HWS(dev))
+ return 0;
+
+ obj = drm_gem_object_alloc(dev, 4096);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate status page\n");
+ return -ENOMEM;
+ }
+ obj_priv = obj->driver_private;
+
+ ret = i915_gem_object_pin(obj, 4096);
+ if (ret != 0) {
+ drm_gem_object_unreference(obj);
+ return ret;
+ }
+
+ dev_priv->status_gfx_addr = obj_priv->gtt_offset;
+ dev_priv->hws_map.offset = dev->agp->base + obj_priv->gtt_offset;
+ dev_priv->hws_map.size = 4096;
+ dev_priv->hws_map.type = 0;
+ dev_priv->hws_map.flags = 0;
+ dev_priv->hws_map.mtrr = 0;
+
+ drm_core_ioremap(&dev_priv->hws_map, dev);
+ if (dev_priv->hws_map.handle == NULL) {
+ DRM_ERROR("Failed to map status page.\n");
+ memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+ drm_gem_object_unreference(obj);
+ return -EINVAL;
+ }
+ dev_priv->hws_obj = obj;
+ dev_priv->hw_status_page = dev_priv->hws_map.handle;
+ memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+ I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+ DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
+
+ return 0;
+}
+
+static int
+i915_gem_init_ringbuffer(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ ret = i915_gem_init_hws(dev);
+ if (ret != 0)
+ return ret;
+
+ obj = drm_gem_object_alloc(dev, 128 * 1024);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate ringbuffer\n");
+ return -ENOMEM;
+ }
+ obj_priv = obj->driver_private;
+
+ ret = i915_gem_object_pin(obj, 4096);
+ if (ret != 0) {
+ drm_gem_object_unreference(obj);
+ return ret;
+ }
+
+ /* Set up the kernel mapping for the ring. */
+ dev_priv->ring.Size = obj->size;
+ dev_priv->ring.tail_mask = obj->size - 1;
+
+ dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset;
+ dev_priv->ring.map.size = obj->size;
+ dev_priv->ring.map.type = 0;
+ dev_priv->ring.map.flags = 0;
+ dev_priv->ring.map.mtrr = 0;
+
+ drm_core_ioremap(&dev_priv->ring.map, dev);
+ if (dev_priv->ring.map.handle == NULL) {
+ DRM_ERROR("Failed to map ringbuffer.\n");
+ memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
+ drm_gem_object_unreference(obj);
+ return -EINVAL;
+ }
+ dev_priv->ring.ring_obj = obj;
+ dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+
+ /* Stop the ring if it's running. */
+ I915_WRITE(PRB0_CTL, 0);
+ I915_WRITE(PRB0_HEAD, 0);
+ I915_WRITE(PRB0_TAIL, 0);
+ I915_WRITE(PRB0_START, 0);
+
+ /* Initialize the ring. */
+ I915_WRITE(PRB0_START, obj_priv->gtt_offset);
+ I915_WRITE(PRB0_CTL,
+ ((obj->size - 4096) & RING_NR_PAGES) |
+ RING_NO_REPORT |
+ RING_VALID);
+
+ /* Update our cache of the ring state */
+ i915_kernel_lost_context(dev);
+
+ return 0;
+}
+
+static void
+i915_gem_cleanup_ringbuffer(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (dev_priv->ring.ring_obj == NULL)
+ return;
+
+ drm_core_ioremapfree(&dev_priv->ring.map, dev);
+
+ i915_gem_object_unpin(dev_priv->ring.ring_obj);
+ drm_gem_object_unreference(dev_priv->ring.ring_obj);
+ dev_priv->ring.ring_obj = NULL;
+ memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
+
+ if (dev_priv->hws_obj != NULL) {
+ i915_gem_object_unpin(dev_priv->hws_obj);
+ drm_gem_object_unreference(dev_priv->hws_obj);
+ dev_priv->hws_obj = NULL;
+ memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+
+ /* Write high address into HWS_PGA when disabling. */
+ I915_WRITE(HWS_PGA, 0x1ffff000);
+ }
+}
+
+int
+i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ if (dev_priv->mm.wedged) {
+ DRM_ERROR("Reenabling wedged hardware, good luck\n");
+ dev_priv->mm.wedged = 0;
+ }
+
+ ret = i915_gem_init_ringbuffer(dev);
+ if (ret != 0)
+ return ret;
+
+ mutex_lock(&dev->struct_mutex);
+ BUG_ON(!list_empty(&dev_priv->mm.active_list));
+ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+ BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
+ BUG_ON(!list_empty(&dev_priv->mm.request_list));
+ dev_priv->mm.suspended = 0;
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+int
+i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_gem_idle(dev);
+ if (ret == 0)
+ i915_gem_cleanup_ringbuffer(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+void
+i915_gem_lastclose(struct drm_device *dev)
+{
+ int ret;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (dev_priv->ring.ring_obj != NULL) {
+ ret = i915_gem_idle(dev);
+ if (ret)
+ DRM_ERROR("failed to idle hardware: %d\n", ret);
+
+ i915_gem_cleanup_ringbuffer(dev);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+}
+
+void i915_gem_load(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ INIT_LIST_HEAD(&dev_priv->mm.active_list);
+ INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
+ INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+ INIT_LIST_HEAD(&dev_priv->mm.request_list);
+ INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
+ i915_gem_retire_work_handler);
+ dev_priv->mm.next_gem_seqno = 1;
+
+ i915_gem_detect_bit_6_swizzle(dev);
+}
diff --git a/linux-core/i915_gem_proc.c b/linux-core/i915_gem_proc.c
new file mode 100644
index 00000000..132eb3d1
--- /dev/null
+++ b/linux-core/i915_gem_proc.c
@@ -0,0 +1,293 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_compat.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+static int i915_gem_active_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data)
+{
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+ int len = 0;
+
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
+ }
+
+ *start = &buf[offset];
+ *eof = 0;
+ DRM_PROC_PRINT("Active:\n");
+ list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
+ list)
+ {
+ struct drm_gem_object *obj = obj_priv->obj;
+ if (obj->name) {
+ DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
+ obj, obj->name,
+ obj->read_domains, obj->write_domain,
+ obj_priv->last_rendering_seqno);
+ } else {
+ DRM_PROC_PRINT(" %p: %08x %08x %d\n",
+ obj,
+ obj->read_domains, obj->write_domain,
+ obj_priv->last_rendering_seqno);
+ }
+ }
+ if (len > request + offset)
+ return request;
+ *eof = 1;
+ return len - offset;
+}
+
+static int i915_gem_flushing_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data)
+{
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+ int len = 0;
+
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
+ }
+
+ *start = &buf[offset];
+ *eof = 0;
+ DRM_PROC_PRINT("Flushing:\n");
+ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
+ list)
+ {
+ struct drm_gem_object *obj = obj_priv->obj;
+ if (obj->name) {
+ DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
+ obj, obj->name,
+ obj->read_domains, obj->write_domain,
+ obj_priv->last_rendering_seqno);
+ } else {
+ DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
+ obj->read_domains, obj->write_domain,
+ obj_priv->last_rendering_seqno);
+ }
+ }
+ if (len > request + offset)
+ return request;
+ *eof = 1;
+ return len - offset;
+}
+
+static int i915_gem_inactive_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data)
+{
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+ int len = 0;
+
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
+ }
+
+ *start = &buf[offset];
+ *eof = 0;
+ DRM_PROC_PRINT("Inactive:\n");
+ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list,
+ list)
+ {
+ struct drm_gem_object *obj = obj_priv->obj;
+ if (obj->name) {
+ DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
+ obj, obj->name,
+ obj->read_domains, obj->write_domain,
+ obj_priv->last_rendering_seqno);
+ } else {
+ DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
+ obj->read_domains, obj->write_domain,
+ obj_priv->last_rendering_seqno);
+ }
+ }
+ if (len > request + offset)
+ return request;
+ *eof = 1;
+ return len - offset;
+}
+
+static int i915_gem_request_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data)
+{
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_request *gem_request;
+ int len = 0;
+
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
+ }
+
+ *start = &buf[offset];
+ *eof = 0;
+ DRM_PROC_PRINT("Request:\n");
+ list_for_each_entry(gem_request, &dev_priv->mm.request_list,
+ list)
+ {
+ DRM_PROC_PRINT(" %d @ %d %08x\n",
+ gem_request->seqno,
+ (int) (jiffies - gem_request->emitted_jiffies),
+ gem_request->flush_domains);
+ }
+ if (len > request + offset)
+ return request;
+ *eof = 1;
+ return len - offset;
+}
+
+static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data)
+{
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int len = 0;
+
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
+ }
+
+ *start = &buf[offset];
+ *eof = 0;
+ DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev));
+ DRM_PROC_PRINT("Waiter sequence: %d\n",
+ dev_priv->mm.waiting_gem_seqno);
+ DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
+ if (len > request + offset)
+ return request;
+ *eof = 1;
+ return len - offset;
+}
+
+
+static int i915_interrupt_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data)
+{
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int len = 0;
+
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
+ }
+
+ *start = &buf[offset];
+ *eof = 0;
+ DRM_PROC_PRINT("Interrupt enable: %08x\n",
+ I915_READ(IER));
+ DRM_PROC_PRINT("Interrupt identity: %08x\n",
+ I915_READ(IIR));
+ DRM_PROC_PRINT("Interrupt mask: %08x\n",
+ I915_READ(IMR));
+ DRM_PROC_PRINT("Pipe A stat: %08x\n",
+ I915_READ(PIPEASTAT));
+ DRM_PROC_PRINT("Pipe B stat: %08x\n",
+ I915_READ(PIPEBSTAT));
+ DRM_PROC_PRINT("Interrupts received: %d\n",
+ atomic_read(&dev_priv->irq_received));
+ DRM_PROC_PRINT("Current sequence: %d\n",
+ i915_get_gem_seqno(dev));
+ DRM_PROC_PRINT("Waiter sequence: %d\n",
+ dev_priv->mm.waiting_gem_seqno);
+ DRM_PROC_PRINT("IRQ sequence: %d\n",
+ dev_priv->mm.irq_gem_seqno);
+ if (len > request + offset)
+ return request;
+ *eof = 1;
+ return len - offset;
+}
+
+static struct drm_proc_list {
+ /** file name */
+ const char *name;
+ /** proc callback*/
+ int (*f) (char *, char **, off_t, int, int *, void *);
+} i915_gem_proc_list[] = {
+ {"i915_gem_active", i915_gem_active_info},
+ {"i915_gem_flushing", i915_gem_flushing_info},
+ {"i915_gem_inactive", i915_gem_inactive_info},
+ {"i915_gem_request", i915_gem_request_info},
+ {"i915_gem_seqno", i915_gem_seqno_info},
+ {"i915_gem_interrupt", i915_interrupt_info},
+};
+
+#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list)
+
+int i915_gem_proc_init(struct drm_minor *minor)
+{
+ struct proc_dir_entry *ent;
+ int i, j;
+
+ for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) {
+ ent = create_proc_entry(i915_gem_proc_list[i].name,
+ S_IFREG | S_IRUGO, minor->dev_root);
+ if (!ent) {
+ DRM_ERROR("Cannot create /proc/dri/.../%s\n",
+ i915_gem_proc_list[i].name);
+ for (j = 0; j < i; j++)
+ remove_proc_entry(i915_gem_proc_list[i].name,
+ minor->dev_root);
+ return -1;
+ }
+ ent->read_proc = i915_gem_proc_list[i].f;
+ ent->data = minor;
+ }
+ return 0;
+}
+
+void i915_gem_proc_cleanup(struct drm_minor *minor)
+{
+ int i;
+
+ if (!minor->dev_root)
+ return;
+
+ for (i = 0; i < I915_GEM_PROC_ENTRIES; i++)
+ remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root);
+}
diff --git a/linux-core/i915_gem_tiling.c b/linux-core/i915_gem_tiling.c
new file mode 100644
index 00000000..90029192
--- /dev/null
+++ b/linux-core/i915_gem_tiling.c
@@ -0,0 +1,305 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+/** @file i915_gem_tiling.c
+ *
+ * Support for managing tiling state of buffer objects.
+ *
+ * The idea behind tiling is to increase cache hit rates by rearranging
+ * pixel data so that a group of pixel accesses are in the same cacheline.
+ * Performance improvement from doing this on the back/depth buffer are on
+ * the order of 30%.
+ *
+ * Intel architectures make this somewhat more complicated, though, by
+ * adjustments made to addressing of data when the memory is in interleaved
+ * mode (matched pairs of DIMMS) to improve memory bandwidth.
+ * For interleaved memory, the CPU sends every sequential 64 bytes
+ * to an alternate memory channel so it can get the bandwidth from both.
+ *
+ * The GPU also rearranges its accesses for increased bandwidth to interleaved
+ * memory, and it matches what the CPU does for non-tiled. However, when tiled
+ * it does it a little differently, since one walks addresses not just in the
+ * X direction but also Y. So, along with alternating channels when bit
+ * 6 of the address flips, it also alternates when other bits flip -- Bits 9
+ * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
+ * are common to both the 915 and 965-class hardware.
+ *
+ * The CPU also sometimes XORs in higher bits as well, to improve
+ * bandwidth doing strided access like we do so frequently in graphics. This
+ * is called "Channel XOR Randomization" in the MCH documentation. The result
+ * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
+ * decode.
+ *
+ * All of this bit 6 XORing has an effect on our memory management,
+ * as we need to make sure that the 3d driver can correctly address object
+ * contents.
+ *
+ * If we don't have interleaved memory, all tiling is safe and no swizzling is
+ * required.
+ *
+ * When bit 17 is XORed in, we simply refuse to tile at all. Bit
+ * 17 is not just a page offset, so as we page an objet out and back in,
+ * individual pages in it will have different bit 17 addresses, resulting in
+ * each 64 bytes being swapped with its neighbor!
+ *
+ * Otherwise, if interleaved, we have to tell the 3d driver what the address
+ * swizzling it needs to do is, since it's writing with the CPU to the pages
+ * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
+ * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
+ * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
+ * to match what the GPU expects.
+ */
+
+/**
+ * Detects bit 6 swizzling of address lookup between IGD access and CPU
+ * access through main memory.
+ */
+void
+i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct pci_dev *bridge;
+ uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+ int mchbar_offset;
+ char __iomem *mchbar;
+ int ret;
+
+ bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
+ if (bridge == NULL) {
+ DRM_ERROR("Couldn't get bridge device\n");
+ return;
+ }
+
+ ret = pci_enable_device(bridge);
+ if (ret != 0) {
+ DRM_ERROR("pci_enable_device failed: %d\n", ret);
+ return;
+ }
+
+ if (IS_I965G(dev))
+ mchbar_offset = 0x48;
+ else
+ mchbar_offset = 0x44;
+
+ /* Use resource 2 for our BAR that's stashed in a nonstandard location,
+ * since the bridge would only ever use standard BARs 0-1 (though it
+ * doesn't anyway)
+ */
+ pci_read_base(bridge, mchbar_offset, &bridge->resource[2]);
+
+ mchbar = ioremap(pci_resource_start(bridge, 2),
+ pci_resource_len(bridge, 2));
+ if (mchbar == NULL) {
+ DRM_ERROR("Couldn't map MCHBAR to determine tile swizzling\n");
+ return;
+ }
+
+ if (IS_I965G(dev) && !IS_I965GM(dev)) {
+ uint32_t chdecmisc;
+
+ /* On the 965, channel interleave appears to be determined by
+ * the flex bit. If flex is set, then the ranks (sides of a
+ * DIMM) of memory will be "stacked" (physical addresses walk
+ * through one rank then move on to the next, flipping channels
+ * or not depending on rank configuration). The GPU in this
+ * case does exactly the same addressing as the CPU.
+ *
+ * Unlike the 945, channel randomization based does not
+ * appear to be available.
+ *
+ * XXX: While the G965 doesn't appear to do any interleaving
+ * when the DIMMs are not exactly matched, the G4x chipsets
+ * might be for "L-shaped" configurations, and will need to be
+ * detected.
+ *
+ * L-shaped configuration:
+ *
+ * +-----+
+ * | |
+ * |DIMM2| <-- non-interleaved
+ * +-----+
+ * +-----+ +-----+
+ * | | | |
+ * |DIMM0| |DIMM1| <-- interleaved area
+ * +-----+ +-----+
+ */
+ chdecmisc = readb(mchbar + CHDECMISC);
+
+ if (chdecmisc == 0xff) {
+ DRM_ERROR("Couldn't read from MCHBAR. "
+ "Disabling tiling.\n");
+ } else if (chdecmisc & CHDECMISC_FLEXMEMORY) {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ } else {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ }
+ } else if (IS_I9XX(dev)) {
+ uint32_t dcc;
+
+ /* On 915-945 and GM965, channel interleave by the CPU is
+ * determined by DCC. The CPU will alternate based on bit 6
+ * in interleaved mode, and the GPU will then also alternate
+ * on bit 6, 9, and 10 for X, but the CPU may also optionally
+ * alternate based on bit 17 (XOR not disabled and XOR
+ * bit == 17).
+ */
+ dcc = readl(mchbar + DCC);
+ switch (dcc & DCC_ADDRESSING_MODE_MASK) {
+ case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
+ case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ break;
+ case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
+ if (IS_I915G(dev) || IS_I915GM(dev) ||
+ dcc & DCC_CHANNEL_XOR_DISABLE) {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else if (IS_I965GM(dev)) {
+ /* GM965 only does bit 11-based channel
+ * randomization
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
+ swizzle_y = I915_BIT_6_SWIZZLE_9_11;
+ } else {
+ /* Bit 17 or perhaps other swizzling */
+ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+ }
+ break;
+ }
+ if (dcc == 0xffffffff) {
+ DRM_ERROR("Couldn't read from MCHBAR. "
+ "Disabling tiling.\n");
+ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+ }
+ } else {
+ /* As far as we know, the 865 doesn't have these bit 6
+ * swizzling issues.
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ }
+
+ iounmap(mchbar);
+
+ dev_priv->mm.bit_6_swizzle_x = swizzle_x;
+ dev_priv->mm.bit_6_swizzle_y = swizzle_y;
+}
+
+/**
+ * Sets the tiling mode of an object, returning the required swizzling of
+ * bit 6 of addresses in the object.
+ */
+int
+i915_gem_set_tiling(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_set_tiling *args = data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EINVAL;
+ obj_priv = obj->driver_private;
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (args->tiling_mode == I915_TILING_NONE) {
+ obj_priv->tiling_mode = I915_TILING_NONE;
+ args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ } else {
+ if (args->tiling_mode == I915_TILING_X)
+ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
+ else
+ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+ /* If we can't handle the swizzling, make it untiled. */
+ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
+ args->tiling_mode = I915_TILING_NONE;
+ args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ }
+ }
+ obj_priv->tiling_mode = args->tiling_mode;
+
+ mutex_unlock(&dev->struct_mutex);
+
+ drm_gem_object_unreference(obj);
+
+ return 0;
+}
+
+/**
+ * Returns the current tiling mode and required bit 6 swizzling for the object.
+ */
+int
+i915_gem_get_tiling(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_get_tiling *args = data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EINVAL;
+ obj_priv = obj->driver_private;
+
+ mutex_lock(&dev->struct_mutex);
+
+ args->tiling_mode = obj_priv->tiling_mode;
+ switch (obj_priv->tiling_mode) {
+ case I915_TILING_X:
+ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
+ break;
+ case I915_TILING_Y:
+ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+ break;
+ case I915_TILING_NONE:
+ args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ break;
+ default:
+ DRM_ERROR("unknown tiling mode\n");
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+ drm_gem_object_unreference(obj);
+
+ return 0;
+}
diff --git a/linux-core/i915_opregion.c b/linux-core/i915_opregion.c
index e691571a..015376f8 100644
--- a/linux-core/i915_opregion.c
+++ b/linux-core/i915_opregion.c
@@ -249,19 +249,20 @@ void opregion_enable_asle(struct drm_device *dev)
struct opregion_asle *asle = dev_priv->opregion.asle;
if (asle) {
- u32 pipeb_stats = I915_READ(PIPEBSTAT);
if (IS_MOBILE(dev)) {
+ u32 pipeb_stats = I915_READ(PIPEBSTAT);
/* Some hardware uses the legacy backlight controller
to signal interrupts, so we need to set up pipe B
to generate an IRQ on writes */
- I915_WRITE(PIPEBSTAT, pipeb_stats |=
- I915_LEGACY_BLC_EVENT_ENABLE);
- dev_priv->irq_enable_reg |=
- (I915_ASLE_INTERRUPT
- | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
- } else
- dev_priv->irq_enable_reg |= I915_ASLE_INTERRUPT;
-
+ pipeb_stats |= I915_LEGACY_BLC_EVENT_ENABLE;
+ I915_WRITE(PIPEBSTAT, pipeb_stats);
+
+ dev_priv->irq_mask_reg &=
+ ~I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+ }
+
+ dev_priv->irq_mask_reg &= ~I915_ASLE_INTERRUPT;
+
asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
ASLE_PFMB_EN;
asle->ardy = 1;
diff --git a/linux-core/nouveau_bo.c b/linux-core/nouveau_bo.c
index ab3b23a4..86347e03 100644
--- a/linux-core/nouveau_bo.c
+++ b/linux-core/nouveau_bo.c
@@ -229,7 +229,7 @@ out_cleanup:
if (tmp_mem.mm_node) {
mutex_lock(&dev->struct_mutex);
if (tmp_mem.mm_node != bo->pinned_node)
- drm_mm_put_block(tmp_mem.mm_node);
+ drm_memrange_put_block(tmp_mem.mm_node);
tmp_mem.mm_node = NULL;
mutex_unlock(&dev->struct_mutex);
}
diff --git a/linux-core/nouveau_sgdma.c b/linux-core/nouveau_sgdma.c
index cc4d5a92..81704ea1 100644
--- a/linux-core/nouveau_sgdma.c
+++ b/linux-core/nouveau_sgdma.c
@@ -280,7 +280,7 @@ nouveau_sgdma_nottm_hack_init(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_ttm_backend *be;
struct drm_scatter_gather sgreq;
- struct drm_mm_node mm_node;
+ struct drm_memrange_node mm_node;
struct drm_bo_mem_reg mem;
int ret;
diff --git a/shared-core/drm.h b/shared-core/drm.h
index 7f1ccd1d..94755e4b 100644
--- a/shared-core/drm.h
+++ b/shared-core/drm.h
@@ -959,6 +959,31 @@ struct drm_mm_info_arg {
uint64_t p_size;
};
+struct drm_gem_close {
+ /** Handle of the object to be closed. */
+ uint32_t handle;
+ uint32_t pad;
+};
+
+struct drm_gem_flink {
+ /** Handle for the object being named */
+ uint32_t handle;
+
+ /** Returned global name */
+ uint32_t name;
+};
+
+struct drm_gem_open {
+ /** Name of object being opened */
+ uint32_t name;
+
+ /** Returned handle for the object */
+ uint32_t handle;
+
+ /** Returned size of the object */
+ uint64_t size;
+};
+
/**
* \name Ioctls Definitions
*/
@@ -978,7 +1003,11 @@ struct drm_mm_info_arg {
#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
-#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
+#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
+
+#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
+#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
+#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c
index e57580fe..1f9fa4f7 100644
--- a/shared-core/i915_dma.c
+++ b/shared-core/i915_dma.c
@@ -41,10 +41,14 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+ u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
+ u32 last_acthd = I915_READ(acthd_reg);
+ u32 acthd;
int i;
- for (i = 0; i < 10000; i++) {
+ for (i = 0; i < 100000; i++) {
ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+ acthd = I915_READ(acthd_reg);
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->Size;
@@ -54,13 +58,79 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
if (ring->head != last_head)
i = 0;
+ if (acthd != last_acthd)
+ i = 0;
+
last_head = ring->head;
- DRM_UDELAY(1);
+ last_acthd = acthd;
+ msleep_interruptible (10);
}
return -EBUSY;
}
+int i915_init_hardware_status(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ /* Program Hardware Status Page */
+ dev_priv->status_page_dmah =
+ drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
+
+ if (!dev_priv->status_page_dmah) {
+ DRM_ERROR("Can not allocate hardware status page\n");
+ return -ENOMEM;
+ }
+ dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
+ dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
+
+ memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+
+ I915_WRITE(0x02080, dev_priv->dma_status_page);
+ DRM_DEBUG("Enabled hardware status page\n");
+ return 0;
+}
+
+void i915_free_hardware_status(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ if (dev_priv->status_page_dmah) {
+ drm_pci_free(dev, dev_priv->status_page_dmah);
+ dev_priv->status_page_dmah = NULL;
+ /* Need to rewrite hardware status page */
+ I915_WRITE(0x02080, 0x1ffff000);
+ }
+
+ if (dev_priv->status_gfx_addr) {
+ dev_priv->status_gfx_addr = 0;
+ drm_core_ioremapfree(&dev_priv->hws_map, dev);
+ I915_WRITE(0x02080, 0x1ffff000);
+ }
+}
+
+#if I915_RING_VALIDATE
+/**
+ * Validate the cached ring tail value
+ *
+ * If the X server writes to the ring and DRM doesn't
+ * reload the head and tail pointers, it will end up writing
+ * data to the wrong place in the ring, causing havoc.
+ */
+void i915_ring_validate(struct drm_device *dev, const char *func, int line)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
+ u32 tail = I915_READ(LP_RING+RING_TAIL) & HEAD_ADDR;
+ u32 head = I915_READ(LP_RING+RING_HEAD) & HEAD_ADDR;
+
+ if (tail != ring->tail) {
+ DRM_ERROR("%s:%d head sw %x, hw %x. tail sw %x hw %x\n",
+ func, line,
+ ring->head, head, ring->tail, tail);
+ BUG_ON(1);
+ }
+}
+#endif
+
void i915_kernel_lost_context(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -80,7 +150,7 @@ static int i915_dma_cleanup(struct drm_device * dev)
* may not have been called from userspace and after dev_private
* is freed, it's too late.
*/
- if (dev->irq)
+ if (dev->irq_enabled)
drm_irq_uninstall(dev);
if (dev_priv->ring.virtual_start) {
@@ -90,18 +160,8 @@ static int i915_dma_cleanup(struct drm_device * dev)
dev_priv->ring.map.size = 0;
}
- if (dev_priv->status_page_dmah) {
- drm_pci_free(dev, dev_priv->status_page_dmah);
- dev_priv->status_page_dmah = NULL;
- /* Need to rewrite hardware status page */
- I915_WRITE(0x02080, 0x1ffff000);
- }
-
- if (dev_priv->status_gfx_addr) {
- dev_priv->status_gfx_addr = 0;
- drm_core_ioremapfree(&dev_priv->hws_map, dev);
- I915_WRITE(0x02080, 0x1ffff000);
- }
+ if (I915_NEED_GFX_HWS(dev))
+ i915_free_hardware_status(dev);
return 0;
}
@@ -182,14 +242,6 @@ static int i915_initialize(struct drm_device * dev,
return -EINVAL;
}
- if (init->mmio_offset != 0)
- dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
- if (!dev_priv->mmio_map) {
- i915_dma_cleanup(dev);
- DRM_ERROR("can not find mmio map!\n");
- return -EINVAL;
- }
-
#ifdef I915_HAVE_BUFFER
dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS;
#endif
@@ -203,27 +255,27 @@ static int i915_initialize(struct drm_device * dev,
dev_priv->sarea_priv = NULL;
}
- dev_priv->ring.Start = init->ring_start;
- dev_priv->ring.End = init->ring_end;
- dev_priv->ring.Size = init->ring_size;
- dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
+ if (init->ring_size != 0) {
+ dev_priv->ring.Size = init->ring_size;
+ dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
- dev_priv->ring.map.offset = init->ring_start;
- dev_priv->ring.map.size = init->ring_size;
- dev_priv->ring.map.type = 0;
- dev_priv->ring.map.flags = 0;
- dev_priv->ring.map.mtrr = 0;
+ dev_priv->ring.map.offset = init->ring_start;
+ dev_priv->ring.map.size = init->ring_size;
+ dev_priv->ring.map.type = 0;
+ dev_priv->ring.map.flags = 0;
+ dev_priv->ring.map.mtrr = 0;
- drm_core_ioremap(&dev_priv->ring.map, dev);
+ drm_core_ioremap(&dev_priv->ring.map, dev);
- if (dev_priv->ring.map.handle == NULL) {
- i915_dma_cleanup(dev);
- DRM_ERROR("can not ioremap virtual address for"
- " ring buffer\n");
- return -ENOMEM;
- }
+ if (dev_priv->ring.map.handle == NULL) {
+ i915_dma_cleanup(dev);
+ DRM_ERROR("can not ioremap virtual address for"
+ " ring buffer\n");
+ return -ENOMEM;
+ }
- dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+ dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+ }
dev_priv->cpp = init->cpp;
@@ -233,9 +285,6 @@ static int i915_initialize(struct drm_device * dev,
/* We are using separate values as placeholders for mechanisms for
* private backbuffer/depthbuffer usage.
*/
- dev_priv->use_mi_batchbuffer_start = 0;
- if (IS_I965G(dev)) /* 965 doesn't support older method */
- dev_priv->use_mi_batchbuffer_start = 1;
/* Allow hardware batchbuffers unless told otherwise.
*/
@@ -245,24 +294,6 @@ static int i915_initialize(struct drm_device * dev,
*/
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
- /* Program Hardware Status Page */
- if (!I915_NEED_GFX_HWS(dev)) {
- dev_priv->status_page_dmah =
- drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
-
- if (!dev_priv->status_page_dmah) {
- i915_dma_cleanup(dev);
- DRM_ERROR("Can not allocate hardware status page\n");
- return -ENOMEM;
- }
- dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
- dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
-
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
-
- I915_WRITE(0x02080, dev_priv->dma_status_page);
- }
- DRM_DEBUG("Enabled hardware status page\n");
#ifdef I915_HAVE_BUFFER
mutex_init(&dev_priv->cmdbuf_mutex);
#endif
@@ -291,11 +322,6 @@ static int i915_dma_resume(struct drm_device * dev)
return -EINVAL;
}
- if (!dev_priv->mmio_map) {
- DRM_ERROR("can not find mmio map!\n");
- return -EINVAL;
- }
-
if (dev_priv->ring.map.handle == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
@@ -459,9 +485,9 @@ static int i915_emit_cmds(struct drm_device *dev, int __user *buffer,
return 0;
}
-static int i915_emit_box(struct drm_device * dev,
- struct drm_clip_rect __user * boxes,
- int i, int DR1, int DR4)
+int i915_emit_box(struct drm_device * dev,
+ struct drm_clip_rect __user * boxes,
+ int i, int DR1, int DR4)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_clip_rect box;
@@ -517,7 +543,7 @@ void i915_emit_breadcrumb(struct drm_device *dev)
BEGIN_LP_RING(4);
OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(20);
+ OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(dev_priv->counter);
OUT_RING(0);
ADVANCE_LP_RING();
@@ -610,7 +636,14 @@ int i915_dispatch_batchbuffer(struct drm_device * dev,
return ret;
}
- if (dev_priv->use_mi_batchbuffer_start) {
+ if (IS_I830(dev) || IS_845G(dev)) {
+ BEGIN_LP_RING(4);
+ OUT_RING(MI_BATCH_BUFFER);
+ OUT_RING(batch->start | MI_BATCH_NON_SECURE);
+ OUT_RING(batch->start + batch->used - 4);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ } else {
BEGIN_LP_RING(2);
if (IS_I965G(dev)) {
OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
@@ -620,14 +653,6 @@ int i915_dispatch_batchbuffer(struct drm_device * dev,
OUT_RING(batch->start | MI_BATCH_NON_SECURE);
}
ADVANCE_LP_RING();
-
- } else {
- BEGIN_LP_RING(4);
- OUT_RING(MI_BATCH_BUFFER);
- OUT_RING(batch->start | MI_BATCH_NON_SECURE);
- OUT_RING(batch->start + batch->used - 4);
- OUT_RING(0);
- ADVANCE_LP_RING();
}
}
@@ -715,9 +740,19 @@ void i915_dispatch_flip(struct drm_device * dev, int planes, int sync)
int i915_quiescent(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
i915_kernel_lost_context(dev);
- return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
+ ret = i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
+ if (ret)
+ {
+ i915_kernel_lost_context (dev);
+ DRM_ERROR ("not quiescent head %08x tail %08x space %08x\n",
+ dev_priv->ring.head,
+ dev_priv->ring.tail,
+ dev_priv->ring.space);
+ }
+ return ret;
}
static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -854,7 +889,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
switch (param->param) {
case I915_PARAM_IRQ_ACTIVE:
- value = dev->irq ? 1 : 0;
+ value = dev->irq_enabled ? 1 : 0;
break;
case I915_PARAM_ALLOW_BATCHBUFFER:
value = dev_priv->allow_batchbuffer ? 1 : 0;
@@ -891,8 +926,6 @@ static int i915_setparam(struct drm_device *dev, void *data,
switch (param->param) {
case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
- if (!IS_I965G(dev))
- dev_priv->use_mi_batchbuffer_start = param->value;
break;
case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
dev_priv->tex_lru_log_granularity = param->value;
@@ -1026,6 +1059,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
memset(dev_priv, 0, sizeof(drm_i915_private_t));
dev->dev_private = (void *)dev_priv;
+ dev_priv->dev = dev;
/* Add register map (needed for suspend/resume) */
base = drm_get_resource_start(dev, mmio_bar);
@@ -1034,6 +1068,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
_DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
+ i915_gem_load(dev);
+
#ifdef __linux__
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
intel_init_chipset_flush_compat(dev);
@@ -1041,6 +1077,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_opregion_init(dev);
#endif
+ /* Init HWS */
+ if (!I915_NEED_GFX_HWS(dev)) {
+ ret = i915_init_hardware_status(dev);
+ if(ret)
+ return ret;
+ }
+
I915_WRITE16(HWSTAM, 0xeffe);
I915_WRITE16(IMR, 0x0);
I915_WRITE16(IER, 0x0);
@@ -1051,7 +1094,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
DRM_SPININIT(&dev_priv->user_irq_lock, "userirq");
dev_priv->user_irq_refcount = 0;
- dev_priv->irq_enable_reg = 0;
+ dev_priv->irq_mask_reg = ~0;
ret = drm_vblank_init(dev, num_pipes);
if (ret)
@@ -1093,8 +1136,9 @@ int i915_driver_unload(struct drm_device *dev)
I915_WRITE(IIR, temp);
}
- if (dev_priv->mmio_map)
- drm_rmmap(dev, dev_priv->mmio_map);
+ i915_free_hardware_status(dev);
+
+ drm_rmmap(dev, dev_priv->mmio_map);
#ifdef __linux__
intel_opregion_free(dev);
@@ -1124,6 +1168,7 @@ void i915_driver_lastclose(struct drm_device * dev)
dev_priv->val_bufs = NULL;
}
#endif
+ i915_gem_lastclose(dev);
if (drm_getsarea(dev) && dev_priv->sarea_priv)
i915_do_cleanup_pageflip(dev);
@@ -1147,12 +1192,38 @@ void i915_driver_lastclose(struct drm_device * dev)
i915_dma_cleanup(dev);
}
+int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
+{
+ struct drm_i915_file_private *i915_file_priv;
+
+ DRM_DEBUG("\n");
+ i915_file_priv = (struct drm_i915_file_private *)
+ drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES);
+
+ if (!i915_file_priv)
+ return -ENOMEM;
+
+ file_priv->driver_priv = i915_file_priv;
+
+ i915_file_priv->mm.last_gem_seqno = 0;
+ i915_file_priv->mm.last_gem_throttle_seqno = 0;
+
+ return 0;
+}
+
void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
i915_mem_release(dev, file_priv, dev_priv->agp_heap);
}
+void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
+{
+ struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+
+ drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES);
+}
+
struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
@@ -1175,6 +1246,22 @@ struct drm_ioctl_desc i915_ioctls[] = {
#ifdef I915_HAVE_BUFFER
DRM_IOCTL_DEF(DRM_I915_EXECBUFFER, i915_execbuffer, DRM_AUTH),
#endif
+ DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
+ DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/shared-core/i915_drm.h b/shared-core/i915_drm.h
index 97e77428..9feffeb5 100644
--- a/shared-core/i915_drm.h
+++ b/shared-core/i915_drm.h
@@ -176,6 +176,22 @@ typedef struct drm_i915_sarea {
#define DRM_I915_MMIO 0x10
#define DRM_I915_HWS_ADDR 0x11
#define DRM_I915_EXECBUFFER 0x12
+#define DRM_I915_GEM_INIT 0x13
+#define DRM_I915_GEM_EXECBUFFER 0x14
+#define DRM_I915_GEM_PIN 0x15
+#define DRM_I915_GEM_UNPIN 0x16
+#define DRM_I915_GEM_BUSY 0x17
+#define DRM_I915_GEM_THROTTLE 0x18
+#define DRM_I915_GEM_ENTERVT 0x19
+#define DRM_I915_GEM_LEAVEVT 0x1a
+#define DRM_I915_GEM_CREATE 0x1b
+#define DRM_I915_GEM_PREAD 0x1c
+#define DRM_I915_GEM_PWRITE 0x1d
+#define DRM_I915_GEM_MMAP 0x1e
+#define DRM_I915_GEM_SET_DOMAIN 0x1f
+#define DRM_I915_GEM_SW_FINISH 0x20
+#define DRM_I915_GEM_SET_TILING 0x21
+#define DRM_I915_GEM_GET_TILING 0x22
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -195,6 +211,22 @@ typedef struct drm_i915_sarea {
#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
#define DRM_IOCTL_I915_MMIO DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_MMIO, drm_i915_mmio)
#define DRM_IOCTL_I915_EXECBUFFER DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_EXECBUFFER, struct drm_i915_execbuffer)
+#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
+#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
+#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
+#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
+#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
+#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
+#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
+#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
+#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
+#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
+#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
+#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
+#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
+#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
+#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
+#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
/* Asynchronous page flipping:
*/
@@ -395,4 +427,292 @@ struct drm_i915_execbuffer {
struct drm_fence_arg fence_arg;
};
+struct drm_i915_gem_init {
+ /**
+ * Beginning offset in the GTT to be managed by the DRM memory
+ * manager.
+ */
+ uint64_t gtt_start;
+ /**
+ * Ending offset in the GTT to be managed by the DRM memory
+ * manager.
+ */
+ uint64_t gtt_end;
+};
+
+struct drm_i915_gem_create {
+ /**
+ * Requested size for the object.
+ *
+ * The (page-aligned) allocated size for the object will be returned.
+ */
+ uint64_t size;
+ /**
+ * Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ uint32_t handle;
+ uint32_t pad;
+};
+
+struct drm_i915_gem_pread {
+ /** Handle for the object being read. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset into the object to read from */
+ uint64_t offset;
+ /** Length of data to read */
+ uint64_t size;
+ /** Pointer to write the data into. */
+ uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */
+};
+
+struct drm_i915_gem_pwrite {
+ /** Handle for the object being written to. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset into the object to write to */
+ uint64_t offset;
+ /** Length of data to write */
+ uint64_t size;
+ /** Pointer to read the data from. */
+ uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */
+};
+
+struct drm_i915_gem_mmap {
+ /** Handle for the object being mapped. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset in the object to map. */
+ uint64_t offset;
+ /**
+ * Length of data to map.
+ *
+ * The value will be page-aligned.
+ */
+ uint64_t size;
+ /** Returned pointer the data was mapped at */
+ uint64_t addr_ptr; /* void *, but pointers are not 32/64 compatible */
+};
+
+struct drm_i915_gem_set_domain {
+ /** Handle for the object */
+ uint32_t handle;
+
+ /** New read domains */
+ uint32_t read_domains;
+
+ /** New write domain */
+ uint32_t write_domain;
+};
+
+struct drm_i915_gem_sw_finish {
+ /** Handle for the object */
+ uint32_t handle;
+};
+
+struct drm_i915_gem_relocation_entry {
+ /**
+ * Handle of the buffer being pointed to by this relocation entry.
+ *
+ * It's appealing to make this be an index into the mm_validate_entry
+ * list to refer to the buffer, but this allows the driver to create
+ * a relocation list for state buffers and not re-write it per
+ * exec using the buffer.
+ */
+ uint32_t target_handle;
+
+ /**
+ * Value to be added to the offset of the target buffer to make up
+ * the relocation entry.
+ */
+ uint32_t delta;
+
+ /** Offset in the buffer the relocation entry will be written into */
+ uint64_t offset;
+
+ /**
+ * Offset value of the target buffer that the relocation entry was last
+ * written as.
+ *
+ * If the buffer has the same offset as last time, we can skip syncing
+ * and writing the relocation. This value is written back out by
+ * the execbuffer ioctl when the relocation is written.
+ */
+ uint64_t presumed_offset;
+
+ /**
+ * Target memory domains read by this operation.
+ */
+ uint32_t read_domains;
+
+ /**
+ * Target memory domains written by this operation.
+ *
+ * Note that only one domain may be written by the whole
+ * execbuffer operation, so that where there are conflicts,
+ * the application will get -EINVAL back.
+ */
+ uint32_t write_domain;
+};
+
+/** @{
+ * Intel memory domains
+ *
+ * Most of these just align with the various caches in
+ * the system and are used to flush and invalidate as
+ * objects end up cached in different domains.
+ */
+/** CPU cache */
+#define I915_GEM_DOMAIN_CPU 0x00000001
+/** Render cache, used by 2D and 3D drawing */
+#define I915_GEM_DOMAIN_RENDER 0x00000002
+/** Sampler cache, used by texture engine */
+#define I915_GEM_DOMAIN_SAMPLER 0x00000004
+/** Command queue, used to load batch buffers */
+#define I915_GEM_DOMAIN_COMMAND 0x00000008
+/** Instruction cache, used by shader programs */
+#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
+/** Vertex address cache */
+#define I915_GEM_DOMAIN_VERTEX 0x00000020
+/** GTT domain - aperture and scanout */
+#define I915_GEM_DOMAIN_GTT 0x00000040
+/** @} */
+
+struct drm_i915_gem_exec_object {
+ /**
+ * User's handle for a buffer to be bound into the GTT for this
+ * operation.
+ */
+ uint32_t handle;
+
+ /** Number of relocations to be performed on this buffer */
+ uint32_t relocation_count;
+ /**
+ * Pointer to array of struct drm_i915_gem_relocation_entry containing
+ * the relocations to be performed in this buffer.
+ */
+ uint64_t relocs_ptr;
+
+ /** Required alignment in graphics aperture */
+ uint64_t alignment;
+
+ /**
+ * Returned value of the updated offset of the object, for future
+ * presumed_offset writes.
+ */
+ uint64_t offset;
+};
+
+struct drm_i915_gem_execbuffer {
+ /**
+ * List of buffers to be validated with their relocations to be
+ * performend on them.
+ *
+ * This is a pointer to an array of struct drm_i915_gem_validate_entry.
+ *
+ * These buffers must be listed in an order such that all relocations
+ * a buffer is performing refer to buffers that have already appeared
+ * in the validate list.
+ */
+ uint64_t buffers_ptr;
+ uint32_t buffer_count;
+
+ /** Offset in the batchbuffer to start execution from. */
+ uint32_t batch_start_offset;
+ /** Bytes used in batchbuffer from batch_start_offset */
+ uint32_t batch_len;
+ uint32_t DR1;
+ uint32_t DR4;
+ uint32_t num_cliprects;
+ uint64_t cliprects_ptr; /* struct drm_clip_rect *cliprects */
+};
+
+struct drm_i915_gem_pin {
+ /** Handle of the buffer to be pinned. */
+ uint32_t handle;
+ uint32_t pad;
+
+ /** alignment required within the aperture */
+ uint64_t alignment;
+
+ /** Returned GTT offset of the buffer. */
+ uint64_t offset;
+};
+
+struct drm_i915_gem_unpin {
+ /** Handle of the buffer to be unpinned. */
+ uint32_t handle;
+ uint32_t pad;
+};
+
+struct drm_i915_gem_busy {
+ /** Handle of the buffer to check for busy */
+ uint32_t handle;
+
+ /** Return busy status (1 if busy, 0 if idle) */
+ uint32_t busy;
+};
+
+#define I915_TILING_NONE 0
+#define I915_TILING_X 1
+#define I915_TILING_Y 2
+
+#define I915_BIT_6_SWIZZLE_NONE 0
+#define I915_BIT_6_SWIZZLE_9 1
+#define I915_BIT_6_SWIZZLE_9_10 2
+#define I915_BIT_6_SWIZZLE_9_11 3
+#define I915_BIT_6_SWIZZLE_9_10_11 4
+/* Not seen by userland */
+#define I915_BIT_6_SWIZZLE_UNKNOWN 5
+
+struct drm_i915_gem_set_tiling {
+ /** Handle of the buffer to have its tiling state updated */
+ uint32_t handle;
+
+ /**
+ * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
+ * I915_TILING_Y).
+ *
+ * This value is to be set on request, and will be updated by the
+ * kernel on successful return with the actual chosen tiling layout.
+ *
+ * The tiling mode may be demoted to I915_TILING_NONE when the system
+ * has bit 6 swizzling that can't be managed correctly by GEM.
+ *
+ * Buffer contents become undefined when changing tiling_mode.
+ */
+ uint32_t tiling_mode;
+
+ /**
+ * Stride in bytes for the object when in I915_TILING_X or
+ * I915_TILING_Y.
+ */
+ uint32_t stride;
+
+ /**
+ * Returned address bit 6 swizzling required for CPU access through
+ * mmap mapping.
+ */
+ uint32_t swizzle_mode;
+};
+
+struct drm_i915_gem_get_tiling {
+ /** Handle of the buffer to get tiling state for. */
+ uint32_t handle;
+
+ /**
+ * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
+ * I915_TILING_Y).
+ */
+ uint32_t tiling_mode;
+
+ /**
+ * Returned address bit 6 swizzling required for CPU access through
+ * mmap mapping.
+ */
+ uint32_t swizzle_mode;
+};
+
#endif /* _I915_DRM_H_ */
diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h
index a77fcf04..9d3a37df 100644
--- a/shared-core/i915_drv.h
+++ b/shared-core/i915_drv.h
@@ -37,7 +37,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20080312"
+#define DRIVER_DATE "20080611"
#if defined(__linux__)
#define I915_HAVE_FENCE
@@ -61,7 +61,7 @@
*/
#define DRIVER_MAJOR 1
#if defined(I915_HAVE_FENCE) && defined(I915_HAVE_BUFFER)
-#define DRIVER_MINOR 13
+#define DRIVER_MINOR 14
#else
#define DRIVER_MINOR 6
#endif
@@ -77,16 +77,23 @@ enum pipe {
struct drm_i915_validate_buffer;
#endif
+#define WATCH_COHERENCY 0
+#define WATCH_BUF 0
+#define WATCH_EXEC 0
+#define WATCH_LRU 0
+#define WATCH_RELOC 0
+#define WATCH_INACTIVE 0
+#define WATCH_PWRITE 0
+
typedef struct _drm_i915_ring_buffer {
int tail_mask;
- unsigned long Start;
- unsigned long End;
unsigned long Size;
u8 *virtual_start;
int head;
int tail;
int space;
drm_local_map_t map;
+ struct drm_gem_object *ring_obj;
} drm_i915_ring_buffer_t;
struct mem_block {
@@ -122,6 +129,8 @@ struct intel_opregion {
#endif
typedef struct drm_i915_private {
+ struct drm_device *dev;
+
drm_local_map_t *sarea;
drm_local_map_t *mmio_map;
@@ -134,13 +143,12 @@ typedef struct drm_i915_private {
uint32_t counter;
unsigned int status_gfx_addr;
drm_local_map_t hws_map;
+ struct drm_gem_object *hws_obj;
unsigned int cpp;
- int use_mi_batchbuffer_start;
wait_queue_head_t irq_queue;
atomic_t irq_received;
- atomic_t irq_emitted;
int tex_lru_log_granularity;
int allow_batchbuffer;
@@ -150,7 +158,7 @@ typedef struct drm_i915_private {
DRM_SPINTYPE user_irq_lock;
int user_irq_refcount;
int fence_irq_on;
- uint32_t irq_enable_reg;
+ uint32_t irq_mask_reg;
int irq_enabled;
#ifdef I915_HAVE_FENCE
@@ -267,8 +275,97 @@ typedef struct drm_i915_private {
u8 saveDACMASK;
u8 saveDACDATA[256*3]; /* 256 3-byte colors */
u8 saveCR[37];
+
+ struct {
+ struct drm_memrange gtt_space;
+
+ /**
+ * List of objects currently involved in rendering from the
+ * ringbuffer.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head active_list;
+
+ /**
+ * List of objects which are not in the ringbuffer but which
+ * still have a write_domain which needs to be flushed before
+ * unbinding.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head flushing_list;
+
+ /**
+ * LRU list of objects which are not in the ringbuffer and
+ * are ready to unbind, but are still in the GTT.
+ *
+ * A reference is not held on the buffer while on this list,
+ * as merely being GTT-bound shouldn't prevent its being
+ * freed, and we'll pull it off the list in the free path.
+ */
+ struct list_head inactive_list;
+
+ /**
+ * List of breadcrumbs associated with GPU requests currently
+ * outstanding.
+ */
+ struct list_head request_list;
+
+ /**
+ * We leave the user IRQ off as much as possible,
+ * but this means that requests will finish and never
+ * be retired once the system goes idle. Set a timer to
+ * fire periodically while the ring is running. When it
+ * fires, go retire requests.
+ */
+ struct delayed_work retire_work;
+
+ uint32_t next_gem_seqno;
+
+ /**
+ * Waiting sequence number, if any
+ */
+ uint32_t waiting_gem_seqno;
+
+ /**
+ * Last seq seen at irq time
+ */
+ uint32_t irq_gem_seqno;
+
+ /**
+ * Flag if the X Server, and thus DRM, is not currently in
+ * control of the device.
+ *
+ * This is set between LeaveVT and EnterVT. It needs to be
+ * replaced with a semaphore. It also needs to be
+ * transitioned away from for kernel modesetting.
+ */
+ int suspended;
+
+ /**
+ * Flag if the hardware appears to be wedged.
+ *
+ * This is set when attempts to idle the device timeout.
+ * It prevents command submission from occuring and makes
+ * every pending request fail
+ */
+ int wedged;
+
+ /** Bit 6 swizzling required for X tiling */
+ uint32_t bit_6_swizzle_x;
+ /** Bit 6 swizzling required for Y tiling */
+ uint32_t bit_6_swizzle_y;
+ } mm;
} drm_i915_private_t;
+struct drm_i915_file_private {
+ struct {
+ uint32_t last_gem_seqno;
+ uint32_t last_gem_throttle_seqno;
+ } mm;
+};
+
enum intel_chip_family {
CHIP_I8XX = 0x01,
CHIP_I9XX = 0x02,
@@ -276,6 +373,83 @@ enum intel_chip_family {
CHIP_I965 = 0x08,
};
+/** driver private structure attached to each drm_gem_object */
+struct drm_i915_gem_object {
+ struct drm_gem_object *obj;
+
+ /** Current space allocated to this object in the GTT, if any. */
+ struct drm_memrange_node *gtt_space;
+
+ /** This object's place on the active/flushing/inactive lists */
+ struct list_head list;
+
+ /**
+ * This is set if the object is on the active or flushing lists
+ * (has pending rendering), and is not set if it's on inactive (ready
+ * to be unbound).
+ */
+ int active;
+
+ /**
+ * This is set if the object has been written to since last bound
+ * to the GTT
+ */
+ int dirty;
+
+ /** AGP memory structure for our GTT binding. */
+ DRM_AGP_MEM *agp_mem;
+
+ struct page **page_list;
+
+ /**
+ * Current offset of the object in GTT space.
+ *
+ * This is the same as gtt_space->start
+ */
+ uint32_t gtt_offset;
+
+ /** Boolean whether this object has a valid gtt offset. */
+ int gtt_bound;
+
+ /** How many users have pinned this object in GTT space */
+ int pin_count;
+
+ /** Breadcrumb of last rendering to the buffer. */
+ uint32_t last_rendering_seqno;
+
+ /** Current tiling mode for the object. */
+ uint32_t tiling_mode;
+
+ /**
+ * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when
+ * GEM_DOMAIN_CPU is not in the object's read domain.
+ */
+ uint8_t *page_cpu_valid;
+};
+
+/**
+ * Request queue structure.
+ *
+ * The request queue allows us to note sequence numbers that have been emitted
+ * and may be associated with active buffers to be retired.
+ *
+ * By keeping this list, we can avoid having to do questionable
+ * sequence-number comparisons on buffer last_rendering_seqnos, and associate
+ * an emission time with seqnos for tracking how far ahead of the GPU we are.
+ */
+struct drm_i915_gem_request {
+ /** GEM sequence number associated with this request. */
+ uint32_t seqno;
+
+ /** Time at which this request was emitted, in jiffies. */
+ unsigned long emitted_jiffies;
+
+ /** Cache domains that were flushed at the start of the request. */
+ uint32_t flush_domains;
+
+ struct list_head list;
+};
+
extern struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
@@ -284,8 +458,11 @@ extern void i915_kernel_lost_context(struct drm_device * dev);
extern int i915_driver_load(struct drm_device *, unsigned long flags);
extern int i915_driver_unload(struct drm_device *);
extern void i915_driver_lastclose(struct drm_device * dev);
+extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
extern void i915_driver_preclose(struct drm_device *dev,
struct drm_file *file_priv);
+extern void i915_driver_postclose(struct drm_device *dev,
+ struct drm_file *file_priv);
extern int i915_driver_device_is_agp(struct drm_device * dev);
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
@@ -297,6 +474,10 @@ extern int i915_dispatch_batchbuffer(struct drm_device * dev,
drm_i915_batchbuffer_t * batch);
extern int i915_quiescent(struct drm_device *dev);
+int i915_emit_box(struct drm_device * dev,
+ struct drm_clip_rect __user * boxes,
+ int i, int DR1, int DR4);
+
/* i915_irq.c */
extern int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -313,6 +494,7 @@ extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_emit_irq(struct drm_device * dev);
+extern int i915_wait_irq(struct drm_device * dev, int irq_nr);
extern int i915_enable_vblank(struct drm_device *dev, int crtc);
extern void i915_disable_vblank(struct drm_device *dev, int crtc);
extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
@@ -361,8 +543,68 @@ void i915_flush_ttm(struct drm_ttm *ttm);
/* i915_execbuf.c */
int i915_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+/* i915_gem.c */
+int i915_gem_init_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_set_tiling(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_get_tiling(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+void i915_gem_load(struct drm_device *dev);
+int i915_gem_proc_init(struct drm_minor *minor);
+void i915_gem_proc_cleanup(struct drm_minor *minor);
+int i915_gem_init_object(struct drm_gem_object *obj);
+void i915_gem_free_object(struct drm_gem_object *obj);
+int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
+void i915_gem_object_unpin(struct drm_gem_object *obj);
+void i915_gem_lastclose(struct drm_device *dev);
+uint32_t i915_get_gem_seqno(struct drm_device *dev);
+void i915_gem_retire_requests(struct drm_device *dev);
+void i915_gem_retire_work_handler(struct work_struct *work);
+void i915_gem_clflush_object(struct drm_gem_object *obj);
+#endif
+/* i915_gem_tiling.c */
+void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
+
+/* i915_gem_debug.c */
+void i915_gem_dump_object(struct drm_gem_object *obj, int len,
+ const char *where, uint32_t mark);
+#if WATCH_INACTIVE
+void i915_verify_inactive(struct drm_device *dev, char *file, int line);
+#else
+#define i915_verify_inactive(dev,file,line)
#endif
+void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
+void i915_gem_dump_object(struct drm_gem_object *obj, int len,
+ const char *where, uint32_t mark);
+void i915_dump_lru(struct drm_device *dev, const char *where);
#ifdef __linux__
/* i915_opregion.c */
@@ -391,16 +633,25 @@ typedef boolean_t bool;
#endif
#define I915_VERBOSE 0
+#define I915_RING_VALIDATE 0
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
#define RING_LOCALS unsigned int outring, ringmask, outcount; \
volatile char *virt;
+#if I915_RING_VALIDATE
+void i915_ring_validate(struct drm_device *dev, const char *func, int line);
+#define I915_RING_DO_VALIDATE(dev) i915_ring_validate(dev, __FUNCTION__, __LINE__)
+#else
+#define I915_RING_DO_VALIDATE(dev)
+#endif
+
#define BEGIN_LP_RING(n) do { \
if (I915_VERBOSE) \
DRM_DEBUG("BEGIN_LP_RING(%d)\n", \
(n)); \
+ I915_RING_DO_VALIDATE(dev); \
if (dev_priv->ring.space < (n)*4) \
i915_wait_ring(dev, (n)*4, __FUNCTION__); \
outcount = 0; \
@@ -419,6 +670,7 @@ typedef boolean_t bool;
#define ADVANCE_LP_RING() do { \
if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring); \
+ I915_RING_DO_VALIDATE(dev); \
dev_priv->ring.tail = outring; \
dev_priv->ring.space -= outcount * 4; \
I915_WRITE(PRB0_TAIL, outring); \
@@ -426,6 +678,39 @@ typedef boolean_t bool;
extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
+#define BREADCRUMB_BITS 31
+#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
+
+#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5])
+/**
+ * Reads a dword out of the status page, which is written to from the command
+ * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
+ * MI_STORE_DATA_IMM.
+ *
+ * The following dwords have a reserved meaning:
+ * 0: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
+ * 4: ring 0 head pointer
+ * 5: ring 1 head pointer (915-class)
+ * 6: ring 2 head pointer (915-class)
+ *
+ * The area from dword 0x10 to 0x3ff is available for driver usage.
+ */
+#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
+#define I915_GEM_HWS_INDEX 0x10
+
+/* MCH MMIO space */
+/** 915-945 and GM965 MCH register controlling DRAM channel access */
+#define DCC 0x200
+#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
+#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0)
+#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
+#define DCC_ADDRESSING_MODE_MASK (3 << 0)
+#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
+
+/** 965 MCH register controlling DRAM channel configuration */
+#define CHDECMISC 0x111
+#define CHDECMISC_FLEXMEMORY (1 << 1)
+
/*
* The Bridge device's PCI config space has information about the
* fb aperture size and the amount of pre-reserved memory.
@@ -526,33 +811,13 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
+#define MI_STORE_DWORD_INDEX_SHIFT 2
#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1)
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
#define MI_BATCH_NON_SECURE (1)
#define MI_BATCH_NON_SECURE_I965 (1<<8)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
-#define BREADCRUMB_BITS 31
-#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
-
-#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5])
-
-/**
- * Reads a dword out of the status page, which is written to from the command
- * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
- * MI_STORE_DATA_IMM.
- *
- * The following dwords have a reserved meaning:
- * 0: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
- * 4: ring 0 head pointer
- * 5: ring 1 head pointer (915-class)
- * 6: ring 2 head pointer (915-class)
- *
- * The area from dword 0x10 to 0x3ff is available for driver usage.
- */
-#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
-#define I915_GEM_HWS_INDEX 0x10
-
/*
* 3D instructions used by the kernel
*/
@@ -575,6 +840,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
+#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
@@ -618,7 +884,10 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define PRB1_HEAD 0x02044 /* 915+ only */
#define PRB1_START 0x02048 /* 915+ only */
#define PRB1_CTL 0x0204c /* 915+ only */
+#define ACTHD_I965 0x02074
#define HWS_PGA 0x02080
+#define HWS_ADDRESS_MASK 0xfffff000
+#define HWS_START_ADDRES_SHIFT 4
#define IPEIR 0x02088
#define NOPID 0x02094
#define HWSTAM 0x02098
@@ -648,6 +917,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define EMR 0x020b4
#define ESR 0x020b8
#define INSTPM 0x020c0
+#define ACTHD 0x020c8
#define FW_BLC 0x020d8
#define FW_BLC_SELF 0x020e0 /* 915+ only */
#define MI_ARB_STATE 0x020e4 /* 915+ only */
@@ -697,7 +967,6 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
/*
* GPIO regs
*/
-
#define GPIOA 0x5010
#define GPIOB 0x5014
#define GPIOC 0x5018
diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c
index 135d6159..fe617557 100644
--- a/shared-core/i915_irq.c
+++ b/shared-core/i915_irq.c
@@ -33,6 +33,33 @@
#define MAX_NOPID ((u32)~0)
+/*
+ * These are the interrupts used by the driver
+ */
+#define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
+
+static inline void
+i915_enable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
+{
+ if ((dev_priv->irq_mask_reg & mask) != 0) {
+ dev_priv->irq_mask_reg &= ~mask;
+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
+ (void) I915_READ(IMR);
+ }
+}
+
+static inline void
+i915_disable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
+{
+ if ((dev_priv->irq_mask_reg & mask) != mask) {
+ dev_priv->irq_mask_reg |= mask;
+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
+ (void) I915_READ(IMR);
+ }
+}
+
/**
* i915_get_pipe - return the the pipe associated with a given plane
* @dev: DRM device
@@ -403,12 +430,23 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 iir;
- u32 pipea_stats, pipeb_stats;
+ u32 pipea_stats = 0, pipeb_stats = 0;
int vblank = 0;
+ if (dev->pdev->msi_enabled)
+ I915_WRITE(IMR, ~0);
iir = I915_READ(IIR);
- if (iir == 0)
+#if 0
+ DRM_DEBUG("flag=%08x\n", iir);
+#endif
+ atomic_inc(&dev_priv->irq_received);
+ if (iir == 0) {
+ if (dev->pdev->msi_enabled) {
+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
+ (void) I915_READ(IMR);
+ }
return IRQ_NONE;
+ }
/*
* Clear the PIPE(A|B)STAT regs before the IIR otherwise
@@ -422,7 +460,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
vblank++;
drm_handle_vblank(dev, i915_get_plane(dev, 0));
}
-
I915_WRITE(PIPEASTAT, pipea_stats);
}
if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
@@ -458,9 +495,12 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
I915_WRITE(IIR, iir);
- (void) I915_READ(IIR);
+ if (dev->pdev->msi_enabled)
+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
+ (void) I915_READ(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT) {
+ dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
DRM_WAKEUP(&dev_priv->irq_queue);
#ifdef I915_HAVE_FENCE
i915_fence_handler(dev);
@@ -497,35 +537,40 @@ int i915_emit_irq(struct drm_device *dev)
void i915_user_irq_on(drm_i915_private_t *dev_priv)
{
DRM_SPINLOCK(&dev_priv->user_irq_lock);
- if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1)){
- dev_priv->irq_enable_reg |= I915_USER_INTERRUPT;
- I915_WRITE(IER, dev_priv->irq_enable_reg);
- }
+ if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1))
+ i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
-
}
void i915_user_irq_off(drm_i915_private_t *dev_priv)
{
DRM_SPINLOCK(&dev_priv->user_irq_lock);
- if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
- // dev_priv->irq_enable_reg &= ~I915_USER_INTERRUPT;
- // I915_WRITE(IER, dev_priv->irq_enable_reg);
- }
+ BUG_ON(dev_priv->irq_enabled && dev_priv->user_irq_refcount <= 0);
+ if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0))
+ i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
}
-static int i915_wait_irq(struct drm_device * dev, int irq_nr)
+int i915_wait_irq(struct drm_device * dev, int irq_nr)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = 0;
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
READ_BREADCRUMB(dev_priv));
- if (READ_BREADCRUMB(dev_priv) >= irq_nr)
+ if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
+ if (dev_priv->sarea_priv)
+ dev_priv->sarea_priv->last_dispatch =
+ READ_BREADCRUMB(dev_priv);
return 0;
+ }
i915_user_irq_on(dev_priv);
DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
@@ -590,16 +635,17 @@ int i915_enable_vblank(struct drm_device *dev, int plane)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe = i915_get_pipe(dev, plane);
u32 pipestat_reg = 0;
+ u32 mask_reg = 0;
u32 pipestat;
switch (pipe) {
case 0:
pipestat_reg = PIPEASTAT;
- dev_priv->irq_enable_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
+ mask_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
break;
case 1:
pipestat_reg = PIPEBSTAT;
- dev_priv->irq_enable_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+ mask_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
break;
default:
DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
@@ -625,7 +671,9 @@ int i915_enable_vblank(struct drm_device *dev, int plane)
PIPE_VBLANK_INTERRUPT_STATUS);
I915_WRITE(pipestat_reg, pipestat);
}
- I915_WRITE(IER, dev_priv->irq_enable_reg);
+ DRM_SPINLOCK(&dev_priv->user_irq_lock);
+ i915_enable_irq(dev_priv, mask_reg);
+ DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
return 0;
}
@@ -635,16 +683,17 @@ void i915_disable_vblank(struct drm_device *dev, int plane)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe = i915_get_pipe(dev, plane);
u32 pipestat_reg = 0;
+ u32 mask_reg = 0;
u32 pipestat;
switch (pipe) {
case 0:
pipestat_reg = PIPEASTAT;
- dev_priv->irq_enable_reg &= ~I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
+ mask_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
break;
case 1:
pipestat_reg = PIPEBSTAT;
- dev_priv->irq_enable_reg &= ~I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+ mask_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
break;
default:
DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
@@ -652,7 +701,9 @@ void i915_disable_vblank(struct drm_device *dev, int plane)
break;
}
- I915_WRITE(IER, dev_priv->irq_enable_reg);
+ DRM_SPINLOCK(&dev_priv->user_irq_lock);
+ i915_disable_irq(dev_priv, mask_reg);
+ DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
if (pipestat_reg)
{
@@ -665,20 +716,23 @@ void i915_disable_vblank(struct drm_device *dev, int plane)
pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
PIPE_VBLANK_INTERRUPT_STATUS);
I915_WRITE(pipestat_reg, pipestat);
+ (void) I915_READ(pipestat_reg);
}
}
void i915_enable_interrupt (struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
- dev_priv->irq_enable_reg |= I915_USER_INTERRUPT;
+
+ dev_priv->irq_mask_reg = ~0;
+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
+ I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
+ (void) I915_READ (IER);
#ifdef __linux__
opregion_enable_asle(dev);
#endif
- I915_WRITE(IER, dev_priv->irq_enable_reg);
dev_priv->irq_enabled = 1;
}
diff --git a/tests/Makefile.am b/tests/Makefile.am
index dce1754e..718cc436 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -22,7 +22,10 @@ TESTS = auth \
getstats \
lock \
setversion \
- updatedraw
+ updatedraw \
+ gem_basic \
+ gem_readwrite \
+ gem_mmap
EXTRA_PROGRAMS = $(TESTS)
CLEANFILES = $(EXTRA_PROGRAMS) $(EXTRA_LTLIBRARIES)
diff --git a/tests/drmtest.c b/tests/drmtest.c
index cae99a0c..5453b105 100644
--- a/tests/drmtest.c
+++ b/tests/drmtest.c
@@ -26,6 +26,7 @@
*/
#include <fcntl.h>
+#include <sys/stat.h>
#include "drmtest.h"
/** Open the first DRM device we can find, searching up to 16 device nodes */
@@ -80,4 +81,3 @@ int drm_open_any_master(void)
fprintf(stderr, "Couldn't find an un-controlled DRM device\n");
abort();
}
-
diff --git a/tests/gem_basic.c b/tests/gem_basic.c
new file mode 100644
index 00000000..b2176fba
--- /dev/null
+++ b/tests/gem_basic.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include "drm.h"
+#include "i915_drm.h"
+
+static void
+test_bad_close(int fd)
+{
+ struct drm_gem_close close;
+ int ret;
+
+ printf("Testing error return on bad close ioctl.\n");
+
+ close.handle = 0x10101010;
+ ret = ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close);
+
+ assert(ret == -1 && errno == EINVAL);
+}
+
+static void
+test_create_close(int fd)
+{
+ struct drm_i915_gem_create create;
+ struct drm_gem_close close;
+ int ret;
+
+ printf("Testing creating and closing an object.\n");
+
+ memset(&create, 0, sizeof(create));
+ create.size = 16 * 1024;
+ ret = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
+ assert(ret == 0);
+
+ close.handle = create.handle;
+ ret = ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close);
+}
+
+static void
+test_create_fd_close(int fd)
+{
+ struct drm_i915_gem_create create;
+ int ret;
+
+ printf("Testing closing with an object allocated.\n");
+
+ memset(&create, 0, sizeof(create));
+ create.size = 16 * 1024;
+ ret = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
+ assert(ret == 0);
+
+ close(fd);
+}
+
+int main(int argc, char **argv)
+{
+ int fd;
+
+ fd = drm_open_any();
+
+ test_bad_close(fd);
+ test_create_close(fd);
+ test_create_fd_close(fd);
+
+ return 0;
+}
diff --git a/tests/gem_mmap.c b/tests/gem_mmap.c
new file mode 100644
index 00000000..b5c15463
--- /dev/null
+++ b/tests/gem_mmap.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include "drm.h"
+#include "i915_drm.h"
+
+#define OBJECT_SIZE 16384
+
+int do_read(int fd, int handle, void *buf, int offset, int size)
+{
+ struct drm_i915_gem_pread read;
+
+ /* Ensure that we don't have any convenient data in buf in case
+ * we fail.
+ */
+ memset(buf, 0xd0, size);
+
+ memset(&read, 0, sizeof(read));
+ read.handle = handle;
+ read.data_ptr = (uintptr_t)buf;
+ read.size = size;
+ read.offset = offset;
+
+ return ioctl(fd, DRM_IOCTL_I915_GEM_PREAD, &read);
+}
+
+int do_write(int fd, int handle, void *buf, int offset, int size)
+{
+ struct drm_i915_gem_pwrite write;
+
+ memset(&write, 0, sizeof(write));
+ write.handle = handle;
+ write.data_ptr = (uintptr_t)buf;
+ write.size = size;
+ write.offset = offset;
+
+ return ioctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &write);
+}
+
+int main(int argc, char **argv)
+{
+ int fd;
+ struct drm_i915_gem_create create;
+ struct drm_i915_gem_mmap mmap;
+ struct drm_gem_close unref;
+ uint8_t expected[OBJECT_SIZE];
+ uint8_t buf[OBJECT_SIZE];
+ uint8_t *addr;
+ int ret;
+ int handle;
+
+ fd = drm_open_any();
+
+ memset(&mmap, 0, sizeof(mmap));
+ mmap.handle = 0x10101010;
+ mmap.offset = 0;
+ mmap.size = 4096;
+ printf("Testing mmaping of bad object.\n");
+ ret = ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap);
+ assert(ret == -1 && errno == EBADF);
+
+ memset(&create, 0, sizeof(create));
+ create.size = OBJECT_SIZE;
+ ret = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
+ assert(ret == 0);
+ handle = create.handle;
+
+ printf("Testing mmaping of newly created object.\n");
+ mmap.handle = handle;
+ mmap.offset = 0;
+ mmap.size = OBJECT_SIZE;
+ ret = ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap);
+ assert(ret == 0);
+ addr = (uint8_t *)(uintptr_t)mmap.addr_ptr;
+
+ printf("Testing contents of newly created object.\n");
+ memset(expected, 0, sizeof(expected));
+ assert(memcmp(addr, expected, sizeof(expected)) == 0);
+
+ printf("Testing coherency of writes and mmap reads.\n");
+ memset(buf, 0, sizeof(buf));
+ memset(buf + 1024, 0x01, 1024);
+ memset(expected + 1024, 0x01, 1024);
+ ret = do_write(fd, handle, buf, 0, OBJECT_SIZE);
+ assert(ret == 0);
+ assert(memcmp(buf, addr, sizeof(buf)) == 0);
+
+ printf("Testing that mapping stays after close\n");
+ unref.handle = handle;
+ ret = ioctl(fd, DRM_IOCTL_GEM_CLOSE, &unref);
+ assert(ret == 0);
+ assert(memcmp(buf, addr, sizeof(buf)) == 0);
+
+ printf("Testing unmapping\n");
+ munmap(addr, OBJECT_SIZE);
+
+ close(fd);
+
+ return 0;
+}
diff --git a/tests/gem_readwrite.c b/tests/gem_readwrite.c
new file mode 100644
index 00000000..bd1d232b
--- /dev/null
+++ b/tests/gem_readwrite.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include "drm.h"
+#include "i915_drm.h"
+
+#define OBJECT_SIZE 16384
+
+int do_read(int fd, int handle, void *buf, int offset, int size)
+{
+ struct drm_i915_gem_pread read;
+
+ /* Ensure that we don't have any convenient data in buf in case
+ * we fail.
+ */
+ memset(buf, 0xd0, size);
+
+ memset(&read, 0, sizeof(read));
+ read.handle = handle;
+ read.data_ptr = (uintptr_t)buf;
+ read.size = size;
+ read.offset = offset;
+
+ return ioctl(fd, DRM_IOCTL_I915_GEM_PREAD, &read);
+}
+
+int do_write(int fd, int handle, void *buf, int offset, int size)
+{
+ struct drm_i915_gem_pwrite write;
+
+ memset(&write, 0, sizeof(write));
+ write.handle = handle;
+ write.data_ptr = (uintptr_t)buf;
+ write.size = size;
+ write.offset = offset;
+
+ return ioctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &write);
+}
+
+int main(int argc, char **argv)
+{
+ int fd;
+ struct drm_i915_gem_create create;
+ uint8_t expected[OBJECT_SIZE];
+ uint8_t buf[OBJECT_SIZE];
+ int ret;
+ int handle;
+
+ fd = drm_open_any();
+
+ memset(&create, 0, sizeof(create));
+ create.size = OBJECT_SIZE;
+ ret = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
+ assert(ret == 0);
+ handle = create.handle;
+
+ printf("Testing contents of newly created object.\n");
+ ret = do_read(fd, handle, buf, 0, OBJECT_SIZE);
+ assert(ret == 0);
+ memset(&expected, 0, sizeof(expected));
+ assert(memcmp(expected, buf, sizeof(expected)) == 0);
+
+ printf("Testing read beyond end of buffer.\n");
+ ret = do_read(fd, handle, buf, OBJECT_SIZE / 2, OBJECT_SIZE);
+ printf("%d %d\n", ret, errno);
+ assert(ret == -1 && errno == EINVAL);
+
+ printf("Testing full write of buffer\n");
+ memset(buf, 0, sizeof(buf));
+ memset(buf + 1024, 0x01, 1024);
+ memset(expected + 1024, 0x01, 1024);
+ ret = do_write(fd, handle, buf, 0, OBJECT_SIZE);
+ assert(ret == 0);
+ ret = do_read(fd, handle, buf, 0, OBJECT_SIZE);
+ assert(ret == 0);
+ assert(memcmp(buf, expected, sizeof(buf)) == 0);
+
+ printf("Testing partial write of buffer\n");
+ memset(buf + 4096, 0x02, 1024);
+ memset(expected + 4096, 0x02, 1024);
+ ret = do_write(fd, handle, buf + 4096, 4096, 1024);
+ assert(ret == 0);
+ ret = do_read(fd, handle, buf, 0, OBJECT_SIZE);
+ assert(ret == 0);
+ assert(memcmp(buf, expected, sizeof(buf)) == 0);
+
+ printf("Testing partial read of buffer\n");
+ ret = do_read(fd, handle, buf, 512, 1024);
+ assert(ret == 0);
+ assert(memcmp(buf, expected + 512, 1024) == 0);
+
+ printf("Testing read of bad buffer handle\n");
+ ret = do_read(fd, 1234, buf, 0, 1024);
+ assert(ret == -1 && errno == EBADF);
+
+ printf("Testing write of bad buffer handle\n");
+ ret = do_write(fd, 1234, buf, 0, 1024);
+ assert(ret == -1 && errno == EBADF);
+
+ close(fd);
+
+ return 0;
+}