summaryrefslogtreecommitdiff
path: root/linux-core
diff options
context:
space:
mode:
authorEric Anholt <eric@anholt.net>2008-08-08 14:05:01 -0700
committerEric Anholt <eric@anholt.net>2008-08-08 14:05:01 -0700
commite1b8e79796b172c356af98eb49107c8abbebfe5a (patch)
tree4af674ca495ca93535f057ea4c0385c57825c2e4 /linux-core
parent0c47151a571827905c34649208e22f8ec0175d62 (diff)
parent46e9274e8538e5b0517f611dca99dde611f4e95d (diff)
Merge branch 'drm-gem'
Conflicts: shared-core/i915_dma.c This brings in kernel support and userland interface for intel GEM.
Diffstat (limited to 'linux-core')
-rw-r--r--linux-core/Makefile6
-rw-r--r--linux-core/Makefile.kernel5
-rw-r--r--linux-core/drm-gem.txt805
-rw-r--r--linux-core/drmP.h159
-rw-r--r--linux-core/drm_agpsupport.c43
-rw-r--r--linux-core/drm_compat.h2
-rw-r--r--linux-core/drm_drv.c6
-rw-r--r--linux-core/drm_fops.c6
-rw-r--r--linux-core/drm_gem.c420
-rw-r--r--linux-core/drm_irq.c21
-rw-r--r--linux-core/drm_lock.c21
-rw-r--r--linux-core/drm_memory.c2
-rw-r--r--linux-core/drm_mm.c2
-rw-r--r--linux-core/drm_objects.h7
-rw-r--r--linux-core/drm_proc.c84
-rw-r--r--linux-core/drm_stub.c31
-rw-r--r--linux-core/i915_drv.c35
-rw-r--r--linux-core/i915_gem.c2502
-rw-r--r--linux-core/i915_gem_debug.c202
-rw-r--r--linux-core/i915_gem_proc.c293
-rw-r--r--linux-core/i915_gem_tiling.c309
-rw-r--r--linux-core/i915_opregion.c19
22 files changed, 4932 insertions, 48 deletions
diff --git a/linux-core/Makefile b/linux-core/Makefile
index 3af6f370..55dfb77c 100644
--- a/linux-core/Makefile
+++ b/linux-core/Makefile
@@ -116,7 +116,7 @@ V := $(shell if [ -f $(BOOTVERSION_PREFIX)version.h ]; then \
ifeq ($(V),"$(RUNNING_REL)")
HEADERFROMBOOT := 1
-GETCONFIG := MAKEFILES=$(shell pwd)/.config
+GETCONFIG := MAKEFILES=$(shell /bin/pwd)/.config
HAVECONFIG := y
endif
@@ -163,7 +163,7 @@ endif
all: modules
modules: includes
- +make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`pwd` DRMSRCDIR=`pwd` modules
+ +make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`/bin/pwd` DRMSRCDIR=`/bin/pwd` modules
ifeq ($(HEADERFROMBOOT),1)
@@ -239,7 +239,7 @@ drmstat: drmstat.c
$(CC) $(PRGCFLAGS) $< -o $@ $(DRMSTATLIBS)
install:
- make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`pwd` DRMSRCDIR=`pwd` modules_install
+ make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`/bin/pwd` DRMSRCDIR=`/bin/pwd` modules_install
else
diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel
index 45a6b1f9..e5af2ec4 100644
--- a/linux-core/Makefile.kernel
+++ b/linux-core/Makefile.kernel
@@ -14,14 +14,15 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
drm_memory_debug.o ati_pcigart.o drm_sman.o \
drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \
drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_bo_lock.o \
- drm_regman.o drm_vm_nopage_compat.o
+ drm_regman.o drm_vm_nopage_compat.o drm_gem.o
tdfx-objs := tdfx_drv.o
r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
i810-objs := i810_drv.o i810_dma.o
i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
i915_buffer.o i915_compat.o i915_execbuf.o i915_suspend.o \
- i915_opregion.o
+ i915_opregion.o \
+ i915_gem.o i915_gem_debug.o i915_gem_proc.o i915_gem_tiling.o
nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \
nouveau_sgdma.o nouveau_dma.o nouveau_bo.o nouveau_fence.o \
diff --git a/linux-core/drm-gem.txt b/linux-core/drm-gem.txt
new file mode 100644
index 00000000..5cda87f8
--- /dev/null
+++ b/linux-core/drm-gem.txt
@@ -0,0 +1,805 @@
+ The Graphics Execution Manager
+ Part of the Direct Rendering Manager
+ ==============================
+
+ Keith Packard <keithp@keithp.com>
+ Eric Anholt <eric@anholt.net>
+ 2008-5-9
+
+Contents:
+
+ 1. GEM Overview
+ 2. API overview and conventions
+ 3. Object Creation/Destruction
+ 4. Reading/writing contents
+ 5. Mapping objects to userspace
+ 6. Memory Domains
+ 7. Execution (Intel specific)
+ 8. Other misc Intel-specific functions
+
+1. Graphics Execution Manager Overview
+
+Gem is designed to manage graphics memory, control access to the graphics
+device execution context and handle the essentially NUMA environment unique
+to modern graphics hardware. Gem allows multiple applications to share
+graphics device resources without the need to constantly reload the entire
+graphics card. Data may be shared between multiple applications with gem
+ensuring that the correct memory synchronization occurs.
+
+Graphics data can consume arbitrary amounts of memory, with 3D applications
+constructing ever larger sets of textures and vertices. With graphics cards
+memory space growing larger every year, and graphics APIs growing more
+complex, we can no longer insist that each application save a complete copy
+of their graphics state so that the card can be re-initialized from user
+space at each context switch. Ensuring that graphics data remains persistent
+across context switches allows applications significant new functionality
+while also improving performance for existing APIs.
+
+Modern linux desktops include significant 3D rendering as a fundemental
+component of the desktop image construction process. 2D and 3D applications
+paint their content to offscreen storage and the central 'compositing
+manager' constructs the final screen image from those window contents. This
+means that pixel image data from these applications must move within reach
+of the compositing manager and used as source operands for screen image
+rendering operations.
+
+Gem provides simple mechanisms to manage graphics data and control execution
+flow within the linux operating system. Using many existing kernel
+subsystems, it does this with a modest amount of code.
+
+2. API Overview and Conventions
+
+All APIs here are defined in terms of ioctls appplied to the DRM file
+descriptor. To create and manipulate objects, an application must be
+'authorized' using the DRI or DRI2 protocols with the X server. To relax
+that, we will need to implement some better access control mechanisms within
+the hardware portion of the driver to prevent inappropriate
+cross-application data access.
+
+Any DRM driver which does not support GEM will return -ENODEV for all of
+these ioctls. Invalid object handles return -EINVAL. Invalid object names
+return -ENOENT. Other errors are as documented in the specific API below.
+
+To avoid the need to translate ioctl contents on mixed-size systems (with
+32-bit user space running on a 64-bit kernel), the ioctl data structures
+contain explicitly sized objects, using 64-bits for all size and pointer
+data and 32-bits for identifiers. In addition, the 64-bit objects are all
+carefully aligned on 64-bit boundaries. Because of this, all pointers in the
+ioctl data structures are passed as uint64_t values. Suitable casts will
+be necessary.
+
+One significant operation which is explicitly left out of this API is object
+locking. Applications are expected to perform locking of shared objects
+outside of the GEM api. This kind of locking is not necessary to safely
+manipulate the graphics engine, and with multiple objects interacting in
+unknown ways, per-object locking would likely introduce all kinds of
+lock-order issues. Punting this to the application seems like the only
+sensible plan. Given that DRM already offers a global lock on the hardware,
+this doesn't change the current situation.
+
+3. Object Creation and Destruction
+
+Gem provides explicit memory management primitives. System pages are
+allocated when the object is created, either as the fundemental storage for
+hardware where system memory is used by the graphics processor directly, or
+as backing store for graphics-processor resident memory.
+
+Objects are referenced from user space using handles. These are, for all
+intents and purposes, equivalent to file descriptors. We could simply use
+file descriptors were it not for the small limit (1024) of file descriptors
+available to applications, and for the fact that the X server (a rather
+significant user of this API) uses 'select' and has a limited maximum file
+descriptor for that operation. Given the ability to allocate more file
+descriptors, and given the ability to place these 'higher' in the file
+descriptor space, we'd love to simply use file descriptors.
+
+Objects may be published with a name so that other applications can access
+them. The name remains valid as long as the object exists. Right now, our
+DRI APIs use 32-bit integer names, so that's what we expose here
+
+ A. Creation
+
+ struct drm_gem_create {
+ /**
+ * Requested size for the object.
+ *
+ * The (page-aligned) allocated size for the object
+ * will be returned.
+ */
+ uint64_t size;
+ /**
+ * Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ uint32_t handle;
+ uint32_t pad;
+ };
+
+ /* usage */
+ create.size = 16384;
+ ret = ioctl (fd, DRM_IOCTL_GEM_CREATE, &create);
+ if (ret == 0)
+ return create.handle;
+
+ Note that the size is rounded up to a page boundary, and that
+ the rounded-up size is returned in 'size'. No name is assigned to
+ this object, making it local to this process.
+
+ If insufficient memory is availabe, -ENOMEM will be returned.
+
+ B. Closing
+
+ struct drm_gem_close {
+ /** Handle of the object to be closed. */
+ uint32_t handle;
+ uint32_t pad;
+ };
+
+
+ /* usage */
+ close.handle = <handle>;
+ ret = ioctl (fd, DRM_IOCTL_GEM_CLOSE, &close);
+
+ This call makes the specified handle invalid, and if no other
+ applications are using the object, any necessary graphics hardware
+ synchronization is performed and the resources used by the object
+ released.
+
+ C. Naming
+
+ struct drm_gem_flink {
+ /** Handle for the object being named */
+ uint32_t handle;
+
+ /** Returned global name */
+ uint32_t name;
+ };
+
+ /* usage */
+ flink.handle = <handle>;
+ ret = ioctl (fd, DRM_IOCTL_GEM_FLINK, &flink);
+ if (ret == 0)
+ return flink.name;
+
+ Flink creates a name for the object and returns it to the
+ application. This name can be used by other applications to gain
+ access to the same object.
+
+ D. Opening by name
+
+ struct drm_gem_open {
+ /** Name of object being opened */
+ uint32_t name;
+
+ /** Returned handle for the object */
+ uint32_t handle;
+
+ /** Returned size of the object */
+ uint64_t size;
+ };
+
+ /* usage */
+ open.name = <name>;
+ ret = ioctl (fd, DRM_IOCTL_GEM_OPEN, &open);
+ if (ret == 0) {
+ *sizep = open.size;
+ return open.handle;
+ }
+
+ Open accesses an existing object and returns a handle for it. If the
+ object doesn't exist, -ENOENT is returned. The size of the object is
+ also returned. This handle has all the same capabilities as the
+ handle used to create the object. In particular, the object is not
+ destroyed until all handles are closed.
+
+4. Basic read/write operations
+
+By default, gem objects are not mapped to the applications address space,
+getting data in and out of them is done with I/O operations instead. This
+allows the data to reside in otherwise unmapped pages, including pages in
+video memory on an attached discrete graphics card. In addition, using
+explicit I/O operations allows better control over cache contents, as
+graphics devices are generally not cache coherent with the CPU, mapping
+pages used for graphics into an application address space requires the use
+of expensive cache flushing operations. Providing direct control over
+graphics data access ensures that data are handled in the most efficient
+possible fashion.
+
+ A. Reading
+
+ struct drm_gem_pread {
+ /** Handle for the object being read. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset into the object to read from */
+ uint64_t offset;
+ /** Length of data to read */
+ uint64_t size;
+ /** Pointer to write the data into. */
+ uint64_t data_ptr; /* void * */
+ };
+
+ This copies data into the specified object at the specified
+ position. Any necessary graphics device synchronization and
+ flushing will be done automatically.
+
+ struct drm_gem_pwrite {
+ /** Handle for the object being written to. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset into the object to write to */
+ uint64_t offset;
+ /** Length of data to write */
+ uint64_t size;
+ /** Pointer to read the data from. */
+ uint64_t data_ptr; /* void * */
+ };
+
+ This copies data out of the specified object into the
+ waiting user memory. Again, device synchronization will
+ be handled by the kernel to ensure user space sees a
+ consistent view of the graphics device.
+
+5. Mapping objects to user space
+
+For most objects, reading/writing is the preferred interaction mode.
+However, when the CPU is involved in rendering to cover deficiencies in
+hardware support for particular operations, the CPU will want to directly
+access the relevant objects.
+
+Because mmap is fairly heavyweight, we allow applications to retain maps to
+objects persistently and then update how they're using the memory through a
+separate interface. Applications which fail to use this separate interface
+may exhibit unpredictable behaviour as memory consistency will not be
+preserved.
+
+ A. Mapping
+
+ struct drm_gem_mmap {
+ /** Handle for the object being mapped. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset in the object to map. */
+ uint64_t offset;
+ /**
+ * Length of data to map.
+ *
+ * The value will be page-aligned.
+ */
+ uint64_t size;
+ /** Returned pointer the data was mapped at */
+ uint64_t addr_ptr; /* void * */
+ };
+
+ /* usage */
+ mmap.handle = <handle>;
+ mmap.offset = <offset>;
+ mmap.size = <size>;
+ ret = ioctl (fd, DRM_IOCTL_GEM_MMAP, &mmap);
+ if (ret == 0)
+ return (void *) (uintptr_t) mmap.addr_ptr;
+
+
+ B. Unmapping
+
+ munmap (addr, length);
+
+ Nothing strange here, just use the normal munmap syscall.
+
+6. Memory Domains
+
+Graphics devices remain a strong bastion of non cache-coherent memory. As a
+result, accessing data through one functional unit will end up loading that
+cache with data which then needs to be manually synchronized when that data
+is used with another functional unit.
+
+Tracking where data are resident is done by identifying how functional units
+deal with caches. Each cache is labeled as a separate memory domain. Then,
+each sequence of operations is expected to load data into various read
+domains and leave data in at most one write domain. Gem tracks the read and
+write memory domains of each object and performs the necessary
+synchronization operations when objects move from one domain set to another.
+
+For example, if operation 'A' constructs an image that is immediately used
+by operation 'B', then when the read domain for 'B' is not the same as the
+write domain for 'A', then the write domain must be flushed, and the read
+domain invalidated. If these two operations are both executed in the same
+command queue, then the flush operation can go inbetween them in the same
+queue, avoiding any kind of CPU-based synchronization and leaving the GPU to
+do the work itself.
+
+6.1 Memory Domains (GPU-independent)
+
+ * DRM_GEM_DOMAIN_CPU.
+
+ Objects in this domain are using caches which are connected to the CPU.
+ Moving objects from non-CPU domains into the CPU domain can involve waiting
+ for the GPU to finish with operations using this object. Moving objects
+ from this domain to a GPU domain can involve flushing CPU caches and chipset
+ buffers.
+
+6.1 GPU-independent memory domain ioctl
+
+This ioctl is independent of the GPU in use. So far, no use other than
+synchronizing objects to the CPU domain have been found; if that turns out
+to be generally true, this ioctl may be simplified further.
+
+ A. Explicit domain control
+
+ struct drm_gem_set_domain {
+ /** Handle for the object */
+ uint32_t handle;
+
+ /** New read domains */
+ uint32_t read_domains;
+
+ /** New write domain */
+ uint32_t write_domain;
+ };
+
+ /* usage */
+ set_domain.handle = <handle>;
+ set_domain.read_domains = <read_domains>;
+ set_domain.write_domain = <write_domain>;
+ ret = ioctl (fd, DRM_IOCTL_GEM_SET_DOMAIN, &set_domain);
+
+ When the application wants to explicitly manage memory domains for
+ an object, it can use this function. Usually, this is only used
+ when the application wants to synchronize object contents between
+ the GPU and CPU-based application rendering. In that case,
+ the <read_domains> would be set to DRM_GEM_DOMAIN_CPU, and if the
+ application were going to write to the object, the <write_domain>
+ would also be set to DRM_GEM_DOMAIN_CPU. After the call, gem
+ guarantees that all previous rendering operations involving this
+ object are complete. The application is then free to access the
+ object through the address returned by the mmap call. Afterwards,
+ when the application again uses the object through the GPU, any
+ necessary CPU flushing will occur and the object will be correctly
+ synchronized with the GPU.
+
+ Note that this synchronization is not required for any accesses
+ going through the driver itself. The pread, pwrite and execbuffer
+ ioctls all perform the necessary domain management internally.
+ Explicit synchronization is only necessary when accessing the object
+ through the mmap'd address.
+
+7. Execution (Intel specific)
+
+Managing the command buffers is inherently chip-specific, so the core of gem
+doesn't have any intrinsic functions. Rather, execution is left to the
+device-specific portions of the driver.
+
+The Intel DRM_I915_GEM_EXECBUFFER ioctl takes a list of gem objects, all of
+which are mapped to the graphics device. The last object in the list is the
+command buffer.
+
+7.1. Relocations
+
+Command buffers often refer to other objects, and to allow the kernel driver
+to move objects around, a sequence of relocations is associated with each
+object. Device-specific relocation operations are used to place the
+target-object relative value into the object.
+
+The Intel driver has a single relocation type:
+
+ struct drm_i915_gem_relocation_entry {
+ /**
+ * Handle of the buffer being pointed to by this
+ * relocation entry.
+ *
+ * It's appealing to make this be an index into the
+ * mm_validate_entry list to refer to the buffer,
+ * but this allows the driver to create a relocation
+ * list for state buffers and not re-write it per
+ * exec using the buffer.
+ */
+ uint32_t target_handle;
+
+ /**
+ * Value to be added to the offset of the target
+ * buffer to make up the relocation entry.
+ */
+ uint32_t delta;
+
+ /**
+ * Offset in the buffer the relocation entry will be
+ * written into
+ */
+ uint64_t offset;
+
+ /**
+ * Offset value of the target buffer that the
+ * relocation entry was last written as.
+ *
+ * If the buffer has the same offset as last time, we
+ * can skip syncing and writing the relocation. This
+ * value is written back out by the execbuffer ioctl
+ * when the relocation is written.
+ */
+ uint64_t presumed_offset;
+
+ /**
+ * Target memory domains read by this operation.
+ */
+ uint32_t read_domains;
+
+ /*
+ * Target memory domains written by this operation.
+ *
+ * Note that only one domain may be written by the
+ * whole execbuffer operation, so that where there are
+ * conflicts, the application will get -EINVAL back.
+ */
+ uint32_t write_domain;
+ };
+
+ 'target_handle', the handle to the target object. This object must
+ be one of the objects listed in the execbuffer request or
+ bad things will happen. The kernel doesn't check for this.
+
+ 'offset' is where, in the source object, the relocation data
+ are written. Each relocation value is a 32-bit value consisting
+ of the location of the target object in the GPU memory space plus
+ the 'delta' value included in the relocation.
+
+ 'presumed_offset' is where user-space believes the target object
+ lies in GPU memory space. If this value matches where the object
+ actually is, then no relocation data are written, the kernel
+ assumes that user space has set up data in the source object
+ using this presumption. This offers a fairly important optimization
+ as writing relocation data requires mapping of the source object
+ into the kernel memory space.
+
+ 'read_domains' and 'write_domains' list the usage by the source
+ object of the target object. The kernel unions all of the domain
+ information from all relocations in the execbuffer request. No more
+ than one write_domain is allowed, otherwise an EINVAL error is
+ returned. read_domains must contain write_domain. This domain
+ information is used to synchronize buffer contents as described
+ above in the section on domains.
+
+7.1.1 Memory Domains (Intel specific)
+
+The Intel GPU has several internal caches which are not coherent and hence
+require explicit synchronization. Memory domains provide the necessary data
+to synchronize what is needed while leaving other cache contents intact.
+
+ * DRM_GEM_DOMAIN_I915_RENDER.
+ The GPU 3D and 2D rendering operations use a unified rendering cache, so
+ operations doing 3D painting and 2D blts will use this domain
+
+ * DRM_GEM_DOMAIN_I915_SAMPLER
+ Textures are loaded by the sampler through a separate cache, so
+ any texture reading will use this domain. Note that the sampler
+ and renderer use different caches, so moving an object from render target
+ to texture source will require a domain transfer.
+
+ * DRM_GEM_DOMAIN_I915_COMMAND
+ The command buffer doesn't have an explicit cache (although it does
+ read ahead quite a bit), so this domain just indicates that the object
+ needs to be flushed to the GPU.
+
+ * DRM_GEM_DOMAIN_I915_INSTRUCTION
+ All of the programs on Gen4 and later chips use an instruction cache to
+ speed program execution. It must be explicitly flushed when new programs
+ are written to memory by the CPU.
+
+ * DRM_GEM_DOMAIN_I915_VERTEX
+ Vertex data uses two different vertex caches, but they're
+ both flushed with the same instruction.
+
+7.2 Execution object list (Intel specific)
+
+ struct drm_i915_gem_exec_object {
+ /**
+ * User's handle for a buffer to be bound into the GTT
+ * for this operation.
+ */
+ uint32_t handle;
+
+ /**
+ * List of relocations to be performed on this buffer
+ */
+ uint32_t relocation_count;
+ /* struct drm_i915_gem_relocation_entry *relocs */
+ uint64_t relocs_ptr;
+
+ /**
+ * Required alignment in graphics aperture
+ */
+ uint64_t alignment;
+
+ /**
+ * Returned value of the updated offset of the object,
+ * for future presumed_offset writes.
+ */
+ uint64_t offset;
+ };
+
+ Each object involved in a particular execution operation must be
+ listed using one of these structures.
+
+ 'handle' references the object.
+
+ 'relocs_ptr' is a user-mode pointer to a array of 'relocation_count'
+ drm_i915_gem_relocation_entry structs (see above) that
+ define the relocations necessary in this buffer. Note that all
+ relocations must reference other exec_object structures in the same
+ execbuffer ioctl and that those other buffers must come earlier in
+ the exec_object array. In other words, the dependencies mapped by the
+ exec_object relocations must form a directed acyclic graph.
+
+ 'alignment' is the byte alignment necessary for this buffer. Each
+ object has specific alignment requirements, as the kernel doesn't
+ know what each object is being used for, those requirements must be
+ provided by user mode. If an object is used in two different ways,
+ it's quite possible that the alignment requirements will differ.
+
+ 'offset' is a return value, receiving the location of the object
+ during this execbuffer operation. The application should use this
+ as the presumed offset in future operations; if the object does not
+ move, then kernel need not write relocation data.
+
+7.3 Execbuffer ioctl (Intel specific)
+
+ struct drm_i915_gem_execbuffer {
+ /**
+ * List of buffers to be validated with their
+ * relocations to be performend on them.
+ *
+ * These buffers must be listed in an order such that
+ * all relocations a buffer is performing refer to
+ * buffers that have already appeared in the validate
+ * list.
+ */
+ /* struct drm_i915_gem_validate_entry *buffers */
+ uint64_t buffers_ptr;
+ uint32_t buffer_count;
+
+ /**
+ * Offset in the batchbuffer to start execution from.
+ */
+ uint32_t batch_start_offset;
+
+ /**
+ * Bytes used in batchbuffer from batch_start_offset
+ */
+ uint32_t batch_len;
+ uint32_t DR1;
+ uint32_t DR4;
+ uint32_t num_cliprects;
+ uint64_t cliprects_ptr; /* struct drm_clip_rect *cliprects */
+ };
+
+
+ 'buffers_ptr' is a user-mode pointer to an array of 'buffer_count'
+ drm_i915_gem_exec_object structures which contains the complete set
+ of objects required for this execbuffer operation. The last entry in
+ this array, the 'batch buffer', is the buffer of commands which will
+ be linked to the ring and executed.
+
+ 'batch_start_offset' is the byte offset within the batch buffer which
+ contains the first command to execute. So far, we haven't found a
+ reason to use anything other than '0' here, but the thought was that
+ some space might be allocated for additional initialization which
+ could be skipped in some cases. This must be a multiple of 4.
+
+ 'batch_len' is the length, in bytes, of the data to be executed
+ (i.e., the amount of data after batch_start_offset). This must
+ be a multiple of 4.
+
+ 'num_cliprects' and 'cliprects_ptr' reference an array of
+ drm_clip_rect structures that is num_cliprects long. The entire
+ batch buffer will be executed multiple times, once for each
+ rectangle in this list. If num_cliprects is 0, then no clipping
+ rectangle will be set.
+
+ 'DR1' and 'DR4' are portions of the 3DSTATE_DRAWING_RECTANGLE
+ command which will be queued when this operation is clipped
+ (num_cliprects != 0).
+
+ DR1 bit definition
+ 31 Fast Scissor Clip Disable (debug only).
+ Disables a hardware optimization that
+ improves performance. This should have
+ no visible effect, other than reducing
+ performance
+
+ 30 Depth Buffer Coordinate Offset Disable.
+ This disables the addition of the
+ depth buffer offset bits which are used
+ to change the location of the depth buffer
+ relative to the front buffer.
+
+ 27:26 X Dither Offset. Specifies the X pixel
+ offset to use when accessing the dither table
+
+ 25:24 Y Dither Offset. Specifies the Y pixel
+ offset to use when accessing the dither
+ table.
+
+ DR4 bit definition
+ 31:16 Drawing Rectangle Origin Y. Specifies the Y
+ origin of coordinates relative to the
+ draw buffer.
+
+ 15:0 Drawing Rectangle Origin X. Specifies the X
+ origin of coordinates relative to the
+ draw buffer.
+
+ As you can see, these two fields are necessary for correctly
+ offsetting drawing within a buffer which contains multiple surfaces.
+ Note that DR1 is only used on Gen3 and earlier hardware and that
+ newer hardware sticks the dither offset elsewhere.
+
+7.3.1 Detailed Execution Description
+
+ Execution of a single batch buffer requires several preparatory
+ steps to make the objects visible to the graphics engine and resolve
+ relocations to account for their current addresses.
+
+ A. Mapping and Relocation
+
+ Each exec_object structure in the array is examined in turn.
+
+ If the object is not already bound to the GTT, it is assigned a
+ location in the graphics address space. If no space is available in
+ the GTT, some other object will be evicted. This may require waiting
+ for previous execbuffer requests to complete before that object can
+ be unmapped. With the location assigned, the pages for the object
+ are pinned in memory using find_or_create_page and the GTT entries
+ updated to point at the relevant pages using drm_agp_bind_pages.
+
+ Then the array of relocations is traversed. Each relocation record
+ looks up the target object and, if the presumed offset does not
+ match the current offset (remember that this buffer has already been
+ assigned an address as it must have been mapped earlier), the
+ relocation value is computed using the current offset. If the
+ object is currently in use by the graphics engine, writing the data
+ out must be preceeded by a delay while the object is still busy.
+ Once it is idle, then the page containing the relocation is mapped
+ by the CPU and the updated relocation data written out.
+
+ The read_domains and write_domain entries in each relocation are
+ used to compute the new read_domains and write_domain values for the
+ target buffers. The actual execution of the domain changes must wait
+ until all of the exec_object entries have been evaluated as the
+ complete set of domain information will not be available until then.
+
+ B. Memory Domain Resolution
+
+ After all of the new memory domain data has been pulled out of the
+ relocations and computed for each object, the list of objects is
+ again traversed and the new memory domains compared against the
+ current memory domains. There are two basic operations involved here:
+
+ * Flushing the current write domain. If the new read domains
+ are not equal to the current write domain, then the current
+ write domain must be flushed. Otherwise, reads will not see data
+ present in the write domain cache. In addition, any new read domains
+ other than the current write domain must be invalidated to ensure
+ that the flushed data are re-read into their caches.
+
+ * Invaliding new read domains. Any domains which were not currently
+ used for this object must be invalidated as old objects which
+ were mapped at the same location may have stale data in the new
+ domain caches.
+
+ If the CPU cache is being invalidated and some GPU cache is being
+ flushed, then we'll have to wait for rendering to complete so that
+ any pending GPU writes will be complete before we flush the GPU
+ cache.
+
+ If the CPU cache is being flushed, then we use 'clflush' to get data
+ written from the CPU.
+
+ Because the GPU caches cannot be partially flushed or invalidated,
+ we don't actually flush them during this traversal stage. Rather, we
+ gather the invalidate and flush bits up in the device structure.
+
+ Once all of the object domain changes have been evaluated, then the
+ gathered invalidate and flush bits are examined. For any GPU flush
+ operations, we emit a single MI_FLUSH command that performs all of
+ the necessary flushes. We then look to see if the CPU cache was
+ flushed. If so, we use the chipset flush magic (writing to a special
+ page) to get the data out of the chipset and into memory.
+
+ C. Queuing Batch Buffer to the Ring
+
+ With all of the objects resident in graphics memory space, and all
+ of the caches prepared with appropriate data, the batch buffer
+ object can be queued to the ring. If there are clip rectangles, then
+ the buffer is queued once per rectangle, with suitable clipping
+ inserted into the ring just before the batch buffer.
+
+ D. Creating an IRQ Cookie
+
+ Right after the batch buffer is placed in the ring, a request to
+ generate an IRQ is added to the ring along with a command to write a
+ marker into memory. When the IRQ fires, the driver can look at the
+ memory location to see where in the ring the GPU has passed. This
+ magic cookie value is stored in each object used in this execbuffer
+ command; it is used whereever you saw 'wait for rendering' above in
+ this document.
+
+ E. Writing back the new object offsets
+
+ So that the application has a better idea what to use for
+ 'presumed_offset' values later, the current object offsets are
+ written back to the exec_object structures.
+
+
+8. Other misc Intel-specific functions.
+
+To complete the driver, a few other functions were necessary.
+
+8.1 Initialization from the X server
+
+As the X server is currently responsible for apportioning memory between 2D
+and 3D, it must tell the kernel which region of the GTT aperture is
+available for 3D objects to be mapped into.
+
+ struct drm_i915_gem_init {
+ /**
+ * Beginning offset in the GTT to be managed by the
+ * DRM memory manager.
+ */
+ uint64_t gtt_start;
+ /**
+ * Ending offset in the GTT to be managed by the DRM
+ * memory manager.
+ */
+ uint64_t gtt_end;
+ };
+ /* usage */
+ init.gtt_start = <gtt_start>;
+ init.gtt_end = <gtt_end>;
+ ret = ioctl (fd, DRM_IOCTL_I915_GEM_INIT, &init);
+
+ The GTT aperture between gtt_start and gtt_end will be used to map
+ objects. This also tells the kernel that the ring can be used,
+ pulling the ring addresses from the device registers.
+
+8.2 Pinning objects in the GTT
+
+For scan-out buffers and the current shared depth and back buffers, we need
+to have them always available in the GTT, at least for now. Pinning means to
+lock their pages in memory along with keeping them at a fixed offset in the
+graphics aperture. These operations are available only to root.
+
+ struct drm_i915_gem_pin {
+ /** Handle of the buffer to be pinned. */
+ uint32_t handle;
+ uint32_t pad;
+
+ /** alignment required within the aperture */
+ uint64_t alignment;
+
+ /** Returned GTT offset of the buffer. */
+ uint64_t offset;
+ };
+
+ /* usage */
+ pin.handle = <handle>;
+ pin.alignment = <alignment>;
+ ret = ioctl (fd, DRM_IOCTL_I915_GEM_PIN, &pin);
+ if (ret == 0)
+ return pin.offset;
+
+ Pinning an object ensures that it will not be evicted from the GTT
+ or moved. It will stay resident until destroyed or unpinned.
+
+ struct drm_i915_gem_unpin {
+ /** Handle of the buffer to be unpinned. */
+ uint32_t handle;
+ uint32_t pad;
+ };
+
+ /* usage */
+ unpin.handle = <handle>;
+ ret = ioctl (fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
+
+ Unpinning an object makes it possible to evict this object from the
+ GTT. It doesn't ensure that it will be evicted, just that it may.
+
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index 19168cd7..36a3d898 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -54,6 +54,7 @@
#include <linux/smp_lock.h> /* For (un)lock_kernel */
#include <linux/dma-mapping.h>
#include <linux/mm.h>
+#include <linux/kref.h>
#include <linux/pagemap.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
#include <linux/mutex.h>
@@ -89,6 +90,10 @@
struct drm_device;
struct drm_file;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+typedef unsigned long uintptr_t;
+#endif
+
/* If you want the memory alloc debug functionality, change define below */
/* #define DEBUG_MEMORY */
@@ -107,7 +112,7 @@ struct drm_file;
#define DRIVER_IRQ_SHARED 0x80
#define DRIVER_DMA_QUEUE 0x100
#define DRIVER_FB_DMA 0x200
-
+#define DRIVER_GEM 0x400
/*@}*/
@@ -427,6 +432,11 @@ struct drm_file {
struct list_head refd_objects;
+ /** Mapping of mm object handles to object pointers. */
+ struct idr object_idr;
+ /** Lock for synchronization of access to object_idr. */
+ spinlock_t table_lock;
+
struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES];
struct file *filp;
void *driver_priv;
@@ -604,6 +614,56 @@ struct drm_ati_pcigart_info {
int table_size;
};
+/**
+ * This structure defines the drm_mm memory object, which will be used by the
+ * DRM for its buffer objects.
+ */
+struct drm_gem_object {
+ /** Reference count of this object */
+ struct kref refcount;
+
+ /** Handle count of this object. Each handle also holds a reference */
+ struct kref handlecount;
+
+ /** Related drm device */
+ struct drm_device *dev;
+
+ /** File representing the shmem storage */
+ struct file *filp;
+
+ /**
+ * Size of the object, in bytes. Immutable over the object's
+ * lifetime.
+ */
+ size_t size;
+
+ /**
+ * Global name for this object, starts at 1. 0 means unnamed.
+ * Access is covered by the object_name_lock in the related drm_device
+ */
+ int name;
+
+ /**
+ * Memory domains. These monitor which caches contain read/write data
+ * related to the object. When transitioning from one set of domains
+ * to another, the driver is called to ensure that caches are suitably
+ * flushed and invalidated
+ */
+ uint32_t read_domains;
+ uint32_t write_domain;
+
+ /**
+ * While validating an exec operation, the
+ * new read/write domain values are computed here.
+ * They will be transferred to the above values
+ * at the point that any cache flushing occurs
+ */
+ uint32_t pending_read_domains;
+ uint32_t pending_write_domain;
+
+ void *driver_private;
+};
+
#include "drm_objects.h"
/**
@@ -705,6 +765,18 @@ struct drm_driver {
void (*set_version) (struct drm_device *dev,
struct drm_set_version *sv);
+ int (*proc_init)(struct drm_minor *minor);
+ void (*proc_cleanup)(struct drm_minor *minor);
+
+ /**
+ * Driver-specific constructor for drm_gem_objects, to set up
+ * obj->driver_private.
+ *
+ * Returns 0 on success.
+ */
+ int (*gem_init_object) (struct drm_gem_object *obj);
+ void (*gem_free_object) (struct drm_gem_object *obj);
+
struct drm_fence_driver *fence_driver;
struct drm_bo_driver *bo_driver;
@@ -892,6 +964,21 @@ struct drm_device {
spinlock_t drw_lock;
struct idr drw_idr;
/*@} */
+
+ /** \name GEM information */
+ /*@{ */
+ spinlock_t object_name_lock;
+ struct idr object_name_idr;
+ atomic_t object_count;
+ atomic_t object_memory;
+ atomic_t pin_count;
+ atomic_t pin_memory;
+ atomic_t gtt_count;
+ atomic_t gtt_memory;
+ uint32_t gtt_total;
+ uint32_t invalidate_domains; /* domains pending invalidation */
+ uint32_t flush_domains; /* domains pending flush */
+ /*@} */
};
#if __OS_HAS_AGP
@@ -1007,6 +1094,10 @@ extern void drm_free_pages(unsigned long address, int order, int area);
extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type);
extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);
extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
+extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
+ struct page **pages,
+ unsigned long num_pages,
+ uint32_t gtt_offset);
extern int drm_unbind_agp(DRM_AGP_MEM * handle);
extern void drm_free_memctl(size_t size);
@@ -1200,7 +1291,7 @@ extern void drm_agp_chipset_flush(struct drm_device *dev);
extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
struct drm_driver *driver);
extern int drm_put_dev(struct drm_device *dev);
-extern int drm_put_minor(struct drm_minor **minor);
+extern int drm_put_minor(struct drm_device *dev);
extern unsigned int drm_debug; /* 1 to enable debug output */
extern struct class *drm_class;
@@ -1260,6 +1351,70 @@ static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
return block->mm;
}
+/* Graphics Execution Manager library functions (drm_gem.c) */
+int
+drm_gem_init (struct drm_device *dev);
+
+void
+drm_gem_object_free (struct kref *kref);
+
+struct drm_gem_object *
+drm_gem_object_alloc(struct drm_device *dev, size_t size);
+
+void
+drm_gem_object_handle_free (struct kref *kref);
+
+static inline void drm_gem_object_reference(struct drm_gem_object *obj)
+{
+ kref_get(&obj->refcount);
+}
+
+static inline void drm_gem_object_unreference(struct drm_gem_object *obj)
+{
+ if (obj == NULL)
+ return;
+
+ kref_put (&obj->refcount, drm_gem_object_free);
+}
+
+int
+drm_gem_handle_create(struct drm_file *file_priv,
+ struct drm_gem_object *obj,
+ int *handlep);
+
+static inline void drm_gem_object_handle_reference (struct drm_gem_object *obj)
+{
+ drm_gem_object_reference (obj);
+ kref_get(&obj->handlecount);
+}
+
+static inline void drm_gem_object_handle_unreference (struct drm_gem_object *obj)
+{
+ if (obj == NULL)
+ return;
+
+ /*
+ * Must bump handle count first as this may be the last
+ * ref, in which case the object would disappear before we
+ * checked for a name
+ */
+ kref_put (&obj->handlecount, drm_gem_object_handle_free);
+ drm_gem_object_unreference (obj);
+}
+
+struct drm_gem_object *
+drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
+ int handle);
+int drm_gem_close_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_gem_open_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
+void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
+
extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
extern void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev);
extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c
index 80663717..6ccb4b6d 100644
--- a/linux-core/drm_agpsupport.c
+++ b/linux-core/drm_agpsupport.c
@@ -484,7 +484,50 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
return agp_unbind_memory(handle);
}
+/**
+ * Binds a collection of pages into AGP memory at the given offset, returning
+ * the AGP memory structure containing them.
+ *
+ * No reference is held on the pages during this time -- it is up to the
+ * caller to handle that.
+ */
+DRM_AGP_MEM *
+drm_agp_bind_pages(struct drm_device *dev,
+ struct page **pages,
+ unsigned long num_pages,
+ uint32_t gtt_offset)
+{
+ DRM_AGP_MEM *mem;
+ int ret, i;
+ DRM_DEBUG("drm_agp_populate_ttm\n");
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
+ mem = drm_agp_allocate_memory(num_pages, AGP_USER_MEMORY);
+#else
+ mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages,
+ AGP_USER_MEMORY);
+#endif
+ if (mem == NULL) {
+ DRM_ERROR("Failed to allocate memory for %ld pages\n",
+ num_pages);
+ return NULL;
+ }
+
+ for (i = 0; i < num_pages; i++)
+ mem->memory[i] = phys_to_gart(page_to_phys(pages[i]));
+ mem->page_count = num_pages;
+
+ mem->is_flushed = TRUE;
+ ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
+ if (ret != 0) {
+ DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
+ agp_free_memory(mem);
+ return NULL;
+ }
+
+ return mem;
+}
+EXPORT_SYMBOL(drm_agp_bind_pages);
/*
* AGP ttm backend interface.
diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h
index efeee83b..7ddc2c97 100644
--- a/linux-core/drm_compat.h
+++ b/linux-core/drm_compat.h
@@ -332,7 +332,7 @@ typedef _Bool bool;
#endif
-#if (defined(CONFIG_X86) && defined(CONFIG_X86_32) && defined(CONFIG_HIMEM))
+#if (defined(CONFIG_X86) && defined(CONFIG_X86_32) && defined(CONFIG_HIGHMEM))
#define DRM_KMAP_ATOMIC_PROT_PFN
extern void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
pgprot_t protection);
diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c
index 6ac2adf3..efd6416f 100644
--- a/linux-core/drm_drv.c
+++ b/linux-core/drm_drv.c
@@ -150,6 +150,10 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_BO_VERSION, drm_bo_version_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MM_INFO, drm_mm_info_ioctl, 0),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
@@ -418,7 +422,7 @@ static void drm_cleanup(struct drm_device * dev)
drm_mm_takedown(&dev->offset_manager);
drm_ht_remove(&dev->object_hash);
- drm_put_minor(&dev->primary);
+ drm_put_minor(dev);
if (drm_put_dev(dev))
DRM_ERROR("Cannot unload module\n");
}
diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c
index 3bc25f24..ec521101 100644
--- a/linux-core/drm_fops.c
+++ b/linux-core/drm_fops.c
@@ -274,6 +274,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
goto out_free;
}
+ if (dev->driver->driver_features & DRIVER_GEM)
+ drm_gem_open(dev, priv);
+
if (dev->driver->open) {
ret = dev->driver->open(dev, priv);
if (ret < 0)
@@ -444,6 +447,9 @@ int drm_release(struct inode *inode, struct file *filp)
dev->driver->reclaim_buffers(dev, file_priv);
}
+ if (dev->driver->driver_features & DRIVER_GEM)
+ drm_gem_release(dev, file_priv);
+
drm_fasync(-1, filp, 0);
mutex_lock(&dev->ctxlist_mutex);
diff --git a/linux-core/drm_gem.c b/linux-core/drm_gem.c
new file mode 100644
index 00000000..434155b3
--- /dev/null
+++ b/linux-core/drm_gem.c
@@ -0,0 +1,420 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/module.h>
+#include <linux/mman.h>
+#include <linux/pagemap.h>
+#include "drmP.h"
+
+/** @file drm_gem.c
+ *
+ * This file provides some of the base ioctls and library routines for
+ * the graphics memory manager implemented by each device driver.
+ *
+ * Because various devices have different requirements in terms of
+ * synchronization and migration strategies, implementing that is left up to
+ * the driver, and all that the general API provides should be generic --
+ * allocating objects, reading/writing data with the cpu, freeing objects.
+ * Even there, platform-dependent optimizations for reading/writing data with
+ * the CPU mean we'll likely hook those out to driver-specific calls. However,
+ * the DRI2 implementation wants to have at least allocate/mmap be generic.
+ *
+ * The goal was to have swap-backed object allocation managed through
+ * struct file. However, file descriptors as handles to a struct file have
+ * two major failings:
+ * - Process limits prevent more than 1024 or so being used at a time by
+ * default.
+ * - Inability to allocate high fds will aggravate the X Server's select()
+ * handling, and likely that of many GL client applications as well.
+ *
+ * This led to a plan of using our own integer IDs (called handles, following
+ * DRM terminology) to mimic fds, and implement the fd syscalls we need as
+ * ioctls. The objects themselves will still include the struct file so
+ * that we can transition to fds if the required kernel infrastructure shows
+ * up at a later date, and as our interface with shmfs for memory allocation.
+ */
+
+/**
+ * Initialize the GEM device fields
+ */
+
+int
+drm_gem_init(struct drm_device *dev)
+{
+ spin_lock_init(&dev->object_name_lock);
+ idr_init(&dev->object_name_idr);
+ atomic_set(&dev->object_count, 0);
+ atomic_set(&dev->object_memory, 0);
+ atomic_set(&dev->pin_count, 0);
+ atomic_set(&dev->pin_memory, 0);
+ atomic_set(&dev->gtt_count, 0);
+ atomic_set(&dev->gtt_memory, 0);
+ return 0;
+}
+
+/**
+ * Allocate a GEM object of the specified size with shmfs backing store
+ */
+struct drm_gem_object *
+drm_gem_object_alloc(struct drm_device *dev, size_t size)
+{
+ struct drm_gem_object *obj;
+
+ BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+
+ obj = kcalloc(1, sizeof(*obj), GFP_KERNEL);
+
+ obj->dev = dev;
+ obj->filp = shmem_file_setup("drm mm object", size, 0);
+ if (IS_ERR(obj->filp)) {
+ kfree(obj);
+ return NULL;
+ }
+
+ kref_init(&obj->refcount);
+ kref_init(&obj->handlecount);
+ obj->size = size;
+ if (dev->driver->gem_init_object != NULL &&
+ dev->driver->gem_init_object(obj) != 0) {
+ fput(obj->filp);
+ kfree(obj);
+ return NULL;
+ }
+ atomic_inc(&dev->object_count);
+ atomic_add(obj->size, &dev->object_memory);
+ return obj;
+}
+EXPORT_SYMBOL(drm_gem_object_alloc);
+
+/**
+ * Removes the mapping from handle to filp for this object.
+ */
+static int
+drm_gem_handle_delete(struct drm_file *filp, int handle)
+{
+ struct drm_device *dev;
+ struct drm_gem_object *obj;
+
+ /* This is gross. The idr system doesn't let us try a delete and
+ * return an error code. It just spews if you fail at deleting.
+ * So, we have to grab a lock around finding the object and then
+ * doing the delete on it and dropping the refcount, or the user
+ * could race us to double-decrement the refcount and cause a
+ * use-after-free later. Given the frequency of our handle lookups,
+ * we may want to use ida for number allocation and a hash table
+ * for the pointers, anyway.
+ */
+ spin_lock(&filp->table_lock);
+
+ /* Check if we currently have a reference on the object */
+ obj = idr_find(&filp->object_idr, handle);
+ if (obj == NULL) {
+ spin_unlock(&filp->table_lock);
+ return -EINVAL;
+ }
+ dev = obj->dev;
+
+ /* Release reference and decrement refcount. */
+ idr_remove(&filp->object_idr, handle);
+ spin_unlock(&filp->table_lock);
+
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_handle_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+/**
+ * Create a handle for this object. This adds a handle reference
+ * to the object, which includes a regular reference count. Callers
+ * will likely want to dereference the object afterwards.
+ */
+int
+drm_gem_handle_create(struct drm_file *file_priv,
+ struct drm_gem_object *obj,
+ int *handlep)
+{
+ int ret;
+
+ /*
+ * Get the user-visible handle using idr.
+ */
+again:
+ /* ensure there is space available to allocate a handle */
+ if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
+ return -ENOMEM;
+
+ /* do the allocation under our spinlock */
+ spin_lock(&file_priv->table_lock);
+ ret = idr_get_new_above(&file_priv->object_idr, obj, 1, handlep);
+ spin_unlock(&file_priv->table_lock);
+ if (ret == -EAGAIN)
+ goto again;
+
+ if (ret != 0)
+ return ret;
+
+ drm_gem_object_handle_reference(obj);
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_handle_create);
+
+/** Returns a reference to the object named by the handle. */
+struct drm_gem_object *
+drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
+ int handle)
+{
+ struct drm_gem_object *obj;
+
+ spin_lock(&filp->table_lock);
+
+ /* Check if we currently have a reference on the object */
+ obj = idr_find(&filp->object_idr, handle);
+ if (obj == NULL) {
+ spin_unlock(&filp->table_lock);
+ return NULL;
+ }
+
+ drm_gem_object_reference(obj);
+
+ spin_unlock(&filp->table_lock);
+
+ return obj;
+}
+EXPORT_SYMBOL(drm_gem_object_lookup);
+
+/**
+ * Releases the handle to an mm object.
+ */
+int
+drm_gem_close_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_gem_close *args = data;
+ int ret;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ ret = drm_gem_handle_delete(file_priv, args->handle);
+
+ return ret;
+}
+
+/**
+ * Create a global name for an object, returning the name.
+ *
+ * Note that the name does not hold a reference; when the object
+ * is freed, the name goes away.
+ */
+int
+drm_gem_flink_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_gem_flink *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EINVAL;
+
+again:
+ if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0)
+ return -ENOMEM;
+
+ spin_lock(&dev->object_name_lock);
+ if (obj->name) {
+ spin_unlock(&dev->object_name_lock);
+ return -EEXIST;
+ }
+ ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
+ &obj->name);
+ spin_unlock(&dev->object_name_lock);
+ if (ret == -EAGAIN)
+ goto again;
+
+ if (ret != 0) {
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
+ /*
+ * Leave the reference from the lookup around as the
+ * name table now holds one
+ */
+ args->name = (uint64_t) obj->name;
+
+ return 0;
+}
+
+/**
+ * Open an object using the global name, returning a handle and the size.
+ *
+ * This handle (of course) holds a reference to the object, so the object
+ * will not go away until the handle is deleted.
+ */
+int
+drm_gem_open_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_gem_open *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+ int handle;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ spin_lock(&dev->object_name_lock);
+ obj = idr_find(&dev->object_name_idr, (int) args->name);
+ if (obj)
+ drm_gem_object_reference(obj);
+ spin_unlock(&dev->object_name_lock);
+ if (!obj)
+ return -ENOENT;
+
+ ret = drm_gem_handle_create(file_priv, obj, &handle);
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+ args->size = obj->size;
+
+ return 0;
+}
+
+/**
+ * Called at device open time, sets up the structure for handling refcounting
+ * of mm objects.
+ */
+void
+drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
+{
+ idr_init(&file_private->object_idr);
+ spin_lock_init(&file_private->table_lock);
+}
+
+/**
+ * Called at device close to release the file's
+ * handle references on objects.
+ */
+static int
+drm_gem_object_release_handle(int id, void *ptr, void *data)
+{
+ struct drm_gem_object *obj = ptr;
+
+ drm_gem_object_handle_unreference(obj);
+
+ return 0;
+}
+
+/**
+ * Called at close time when the filp is going away.
+ *
+ * Releases any remaining references on objects by this filp.
+ */
+void
+drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
+{
+ mutex_lock(&dev->struct_mutex);
+ idr_for_each(&file_private->object_idr,
+ &drm_gem_object_release_handle, NULL);
+
+ idr_destroy(&file_private->object_idr);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * Called after the last reference to the object has been lost.
+ *
+ * Frees the object
+ */
+void
+drm_gem_object_free(struct kref *kref)
+{
+ struct drm_gem_object *obj = (struct drm_gem_object *) kref;
+ struct drm_device *dev = obj->dev;
+
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ if (dev->driver->gem_free_object != NULL)
+ dev->driver->gem_free_object(obj);
+
+ fput(obj->filp);
+ atomic_dec(&dev->object_count);
+ atomic_sub(obj->size, &dev->object_memory);
+ kfree(obj);
+}
+EXPORT_SYMBOL(drm_gem_object_free);
+
+/**
+ * Called after the last handle to the object has been closed
+ *
+ * Removes any name for the object. Note that this must be
+ * called before drm_gem_object_free or we'll be touching
+ * freed memory
+ */
+void
+drm_gem_object_handle_free(struct kref *kref)
+{
+ struct drm_gem_object *obj = container_of(kref,
+ struct drm_gem_object,
+ handlecount);
+ struct drm_device *dev = obj->dev;
+
+ /* Remove any name for this object */
+ spin_lock(&dev->object_name_lock);
+ if (obj->name) {
+ idr_remove(&dev->object_name_idr, obj->name);
+ spin_unlock(&dev->object_name_lock);
+ /*
+ * The object name held a reference to this object, drop
+ * that now.
+ */
+ drm_gem_object_unreference(obj);
+ } else
+ spin_unlock(&dev->object_name_lock);
+
+}
+EXPORT_SYMBOL(drm_gem_object_handle_free);
+
diff --git a/linux-core/drm_irq.c b/linux-core/drm_irq.c
index d0d6f987..5b9f474b 100644
--- a/linux-core/drm_irq.c
+++ b/linux-core/drm_irq.c
@@ -63,7 +63,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
return -EINVAL;
- p->irq = dev->irq;
+ p->irq = dev->pdev->irq;
DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
p->irq);
@@ -128,6 +128,7 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
(unsigned long)dev);
+ init_timer_deferrable(&dev->vblank_disable_timer);
spin_lock_init(&dev->vbl_lock);
atomic_set(&dev->vbl_signal_pending, 0);
dev->num_crtcs = num_crtcs;
@@ -201,7 +202,7 @@ int drm_irq_install(struct drm_device * dev)
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
- if (dev->irq == 0)
+ if (dev->pdev->irq == 0)
return -EINVAL;
mutex_lock(&dev->struct_mutex);
@@ -219,7 +220,7 @@ int drm_irq_install(struct drm_device * dev)
dev->irq_enabled = 1;
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("irq=%d\n", dev->irq);
+ DRM_DEBUG("irq=%d\n", dev->pdev->irq);
/* Before installing handler */
dev->driver->irq_preinstall(dev);
@@ -228,7 +229,7 @@ int drm_irq_install(struct drm_device * dev)
if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
sh_flags = IRQF_SHARED;
- ret = request_irq(dev->irq, dev->driver->irq_handler,
+ ret = request_irq(dev->pdev->irq, dev->driver->irq_handler,
sh_flags, dev->devname, dev);
if (ret < 0) {
mutex_lock(&dev->struct_mutex);
@@ -236,6 +237,10 @@ int drm_irq_install(struct drm_device * dev)
mutex_unlock(&dev->struct_mutex);
return ret;
}
+ /* Expose the device irq to device drivers that want to export it for
+ * whatever reason.
+ */
+ dev->irq = dev->pdev->irq;
/* After installing handler */
ret = dev->driver->irq_postinstall(dev);
@@ -271,11 +276,11 @@ int drm_irq_uninstall(struct drm_device * dev)
if (!irq_enabled)
return -EINVAL;
- DRM_DEBUG("irq=%d\n", dev->irq);
+ DRM_DEBUG("irq=%d\n", dev->pdev->irq);
dev->driver->irq_uninstall(dev);
- free_irq(dev->irq, dev);
+ free_irq(dev->pdev->irq, dev);
drm_vblank_cleanup(dev);
@@ -309,7 +314,7 @@ int drm_control(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return 0;
if (dev->if_version < DRM_IF_VERSION(1, 2) &&
- ctl->irq != dev->irq)
+ ctl->irq != dev->pdev->irq)
return -EINVAL;
return drm_irq_install(dev);
case DRM_UNINST_HANDLER:
@@ -514,7 +519,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
int ret = 0;
unsigned int flags, seq, crtc;
- if ((!dev->irq) || (!dev->irq_enabled))
+ if ((!dev->pdev->irq) || (!dev->irq_enabled))
return -EINVAL;
if (vblwait->request.type &
diff --git a/linux-core/drm_lock.c b/linux-core/drm_lock.c
index 573213de..a2966efb 100644
--- a/linux-core/drm_lock.c
+++ b/linux-core/drm_lock.c
@@ -218,22 +218,16 @@ int drm_lock_take(struct drm_lock_data *lock_data,
} while (prev != old);
spin_unlock_bh(&lock_data->spinlock);
- if (_DRM_LOCKING_CONTEXT(old) == context) {
- if (old & _DRM_LOCK_HELD) {
- if (context != DRM_KERNEL_CONTEXT) {
- DRM_ERROR("%d holds heavyweight lock\n",
- context);
- }
- return 0;
+ /* Warn on recursive locking of user contexts. */
+ if (_DRM_LOCKING_CONTEXT(old) == context && _DRM_LOCK_IS_HELD(old)) {
+ if (context != DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("%d holds heavyweight lock\n",
+ context);
}
+ return 0;
}
- if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
- /* Have lock */
-
- return 1;
- }
- return 0;
+ return !_DRM_LOCK_IS_HELD(old);
}
/**
@@ -386,7 +380,6 @@ void drm_idlelock_release(struct drm_lock_data *lock_data)
}
EXPORT_SYMBOL(drm_idlelock_release);
-
int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
{
diff --git a/linux-core/drm_memory.c b/linux-core/drm_memory.c
index e1df3dac..b90fc020 100644
--- a/linux-core/drm_memory.c
+++ b/linux-core/drm_memory.c
@@ -310,6 +310,7 @@ int drm_free_agp(DRM_AGP_MEM * handle, int pages)
{
return drm_agp_free_memory(handle) ? 0 : -EINVAL;
}
+EXPORT_SYMBOL(drm_free_agp);
/** Wrapper around agp_bind_memory() */
int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
@@ -322,6 +323,7 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
{
return drm_agp_unbind_memory(handle);
}
+EXPORT_SYMBOL(drm_unbind_agp);
#else /* __OS_HAS_AGP*/
static void *agp_remap(unsigned long offset, unsigned long size,
diff --git a/linux-core/drm_mm.c b/linux-core/drm_mm.c
index 59110293..1de7059a 100644
--- a/linux-core/drm_mm.c
+++ b/linux-core/drm_mm.c
@@ -167,6 +167,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
return child;
}
+EXPORT_SYMBOL(drm_mm_get_block);
/*
* Put a block. Merge with the previous and / or next block if they are free.
@@ -257,6 +258,7 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
return best;
}
+EXPORT_SYMBOL(drm_mm_search_free);
int drm_mm_clean(struct drm_mm * mm)
{
diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h
index 770fbc56..6a900612 100644
--- a/linux-core/drm_objects.h
+++ b/linux-core/drm_objects.h
@@ -301,7 +301,12 @@ struct drm_ttm_backend_func {
void (*destroy) (struct drm_ttm_backend *backend);
};
-
+/**
+ * This structure associates a set of flags and methods with a drm_ttm
+ * object, and will also be subclassed by the particular backend.
+ *
+ * \sa #drm_agp_ttm_backend
+ */
struct drm_ttm_backend {
struct drm_device *dev;
uint32_t flags;
diff --git a/linux-core/drm_proc.c b/linux-core/drm_proc.c
index 42da5c69..d3845bb3 100644
--- a/linux-core/drm_proc.c
+++ b/linux-core/drm_proc.c
@@ -51,6 +51,10 @@ static int drm_bufs_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_objects_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
+static int drm_gem_name_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data);
+static int drm_gem_object_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data);
#if DRM_DEBUG_CODE
static int drm_vma_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
@@ -70,6 +74,8 @@ static struct drm_proc_list {
{"queues", drm_queues_info},
{"bufs", drm_bufs_info},
{"objects", drm_objects_info},
+ {"gem_names", drm_gem_name_info},
+ {"gem_objects", drm_gem_object_info},
#if DRM_DEBUG_CODE
{"vma", drm_vma_info},
#endif
@@ -582,6 +588,84 @@ static int drm_clients_info(char *buf, char **start, off_t offset,
return ret;
}
+struct drm_gem_name_info_data {
+ int len;
+ char *buf;
+ int eof;
+};
+
+static int drm_gem_one_name_info(int id, void *ptr, void *data)
+{
+ struct drm_gem_object *obj = ptr;
+ struct drm_gem_name_info_data *nid = data;
+
+ DRM_INFO("name %d size %d\n", obj->name, obj->size);
+ if (nid->eof)
+ return 0;
+
+ nid->len += sprintf(&nid->buf[nid->len],
+ "%6d%9d%8d%9d\n",
+ obj->name, obj->size,
+ atomic_read(&obj->handlecount.refcount),
+ atomic_read(&obj->refcount.refcount));
+ if (nid->len > DRM_PROC_LIMIT) {
+ nid->eof = 1;
+ return 0;
+ }
+ return 0;
+}
+
+static int drm_gem_name_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data)
+{
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
+ struct drm_gem_name_info_data nid;
+
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
+ }
+
+ nid.len = sprintf(buf, " name size handles refcount\n");
+ nid.buf = buf;
+ nid.eof = 0;
+ idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, &nid);
+
+ *start = &buf[offset];
+ *eof = 0;
+ if (nid.len > request + offset)
+ return request;
+ *eof = 1;
+ return nid.len - offset;
+}
+
+static int drm_gem_object_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data)
+{
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
+ int len = 0;
+
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
+ }
+
+ *start = &buf[offset];
+ *eof = 0;
+ DRM_PROC_PRINT("%d objects\n", atomic_read(&dev->object_count));
+ DRM_PROC_PRINT("%d object bytes\n", atomic_read(&dev->object_memory));
+ DRM_PROC_PRINT("%d pinned\n", atomic_read(&dev->pin_count));
+ DRM_PROC_PRINT("%d pin bytes\n", atomic_read(&dev->pin_memory));
+ DRM_PROC_PRINT("%d gtt bytes\n", atomic_read(&dev->gtt_memory));
+ DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total);
+ if (len > request + offset)
+ return request;
+ *eof = 1;
+ return len - offset;
+}
+
#if DRM_DEBUG_CODE
static int drm__vma_info(char *buf, char **start, off_t offset, int request,
diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c
index c68adbaf..1518bc6d 100644
--- a/linux-core/drm_stub.c
+++ b/linux-core/drm_stub.c
@@ -163,7 +163,16 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
goto error_out_unreg;
}
+ if (driver->driver_features & DRIVER_GEM) {
+ retcode = drm_gem_init (dev);
+ if (retcode) {
+ DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
+ goto error_out_unreg;
+ }
+ }
+
drm_fence_manager_init(dev);
+
return 0;
error_out_unreg:
@@ -213,6 +222,13 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t
DRM_ERROR("DRM: Failed to initialize /proc/dri.\n");
goto err_mem;
}
+ if (dev->driver->proc_init) {
+ ret = dev->driver->proc_init(new_minor);
+ if (ret) {
+ DRM_ERROR("DRM: Driver failed to initialize /proc/dri.\n");
+ goto err_mem;
+ }
+ }
} else
new_minor->dev_root = NULL;
@@ -229,8 +245,11 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t
err_g2:
- if (new_minor->type == DRM_MINOR_LEGACY)
+ if (new_minor->type == DRM_MINOR_LEGACY) {
+ if (dev->driver->proc_cleanup)
+ dev->driver->proc_cleanup(new_minor);
drm_proc_cleanup(new_minor, drm_proc_root);
+ }
err_mem:
kfree(new_minor);
err_idr:
@@ -293,7 +312,7 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
return 0;
err_g4:
- drm_put_minor(&dev->primary);
+ drm_put_minor(dev);
err_g3:
if (!drm_fb_loaded)
pci_disable_device(pdev);
@@ -349,13 +368,17 @@ int drm_put_dev(struct drm_device * dev)
* last minor released.
*
*/
-int drm_put_minor(struct drm_minor **minor_p)
+int drm_put_minor(struct drm_device *dev)
{
+ struct drm_minor **minor_p = &dev->primary;
struct drm_minor *minor = *minor_p;
DRM_DEBUG("release secondary minor %d\n", minor->index);
- if (minor->type == DRM_MINOR_LEGACY)
+ if (minor->type == DRM_MINOR_LEGACY) {
+ if (dev->driver->proc_cleanup)
+ dev->driver->proc_cleanup(minor);
drm_proc_cleanup(minor, drm_proc_root);
+ }
drm_sysfs_device_remove(minor);
idr_remove(&drm_minors_idr, minor->index);
diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c
index cf8016c0..28923201 100644
--- a/linux-core/i915_drv.c
+++ b/linux-core/i915_drv.c
@@ -111,18 +111,22 @@ static int i915_resume(struct drm_device *dev)
}
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void remove(struct pci_dev *pdev);
+
static struct drm_driver driver = {
/* don't use mtrr's here, the Xserver or user space app should
* deal with them for intel hardware.
*/
.driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR | */
- DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
.load = i915_driver_load,
.unload = i915_driver_unload,
.firstopen = i915_driver_firstopen,
+ .open = i915_driver_open,
.lastclose = i915_driver_lastclose,
.preclose = i915_driver_preclose,
+ .postclose = i915_driver_postclose,
.suspend = i915_suspend,
.resume = i915_resume,
.device_is_agp = i915_driver_device_is_agp,
@@ -136,7 +140,11 @@ static struct drm_driver driver = {
.reclaim_buffers = drm_core_reclaim_buffers,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
+ .proc_init = i915_gem_proc_init,
+ .proc_cleanup = i915_gem_proc_cleanup,
.ioctls = i915_ioctls,
+ .gem_init_object = i915_gem_init_object,
+ .gem_free_object = i915_gem_free_object,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -153,7 +161,7 @@ static struct drm_driver driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = probe,
- .remove = __devexit_p(drm_cleanup_pci),
+ .remove = remove,
},
#ifdef I915_HAVE_FENCE
.fence_driver = &i915_fence_driver,
@@ -171,7 +179,28 @@ static struct drm_driver driver = {
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- return drm_get_dev(pdev, ent, &driver);
+ int ret;
+
+ /* On the 945G/GM, the chipset reports the MSI capability on the
+ * integrated graphics even though the support isn't actually there
+ * according to the published specs. It doesn't appear to function
+ * correctly in testing on 945G.
+ * This may be a side effect of MSI having been made available for PEG
+ * and the registers being closely associated.
+ */
+ if (pdev->device != 0x2772 && pdev->device != 0x27A2)
+ (void )pci_enable_msi(pdev);
+
+ ret = drm_get_dev(pdev, ent, &driver);
+ if (ret && pdev->msi_enabled)
+ pci_disable_msi(pdev);
+ return ret;
+}
+static void remove(struct pci_dev *pdev)
+{
+ if (pdev->msi_enabled)
+ pci_disable_msi(pdev);
+ drm_cleanup_pci(pdev);
}
static int __init i915_init(void)
diff --git a/linux-core/i915_gem.c b/linux-core/i915_gem.c
new file mode 100644
index 00000000..35dc5bd7
--- /dev/null
+++ b/linux-core/i915_gem.c
@@ -0,0 +1,2502 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_compat.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include <linux/swap.h>
+
+static int
+i915_gem_object_set_domain(struct drm_gem_object *obj,
+ uint32_t read_domains,
+ uint32_t write_domain);
+static int
+i915_gem_object_set_domain_range(struct drm_gem_object *obj,
+ uint64_t offset,
+ uint64_t size,
+ uint32_t read_domains,
+ uint32_t write_domain);
+int
+i915_gem_set_domain(struct drm_gem_object *obj,
+ struct drm_file *file_priv,
+ uint32_t read_domains,
+ uint32_t write_domain);
+static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
+static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
+static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
+
+int
+i915_gem_init_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_init *args = data;
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (args->gtt_start >= args->gtt_end ||
+ (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
+ (args->gtt_end & (PAGE_SIZE - 1)) != 0) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start,
+ args->gtt_end - args->gtt_start);
+
+ dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+
+/**
+ * Creates a new mm object and returns a handle to it.
+ */
+int
+i915_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_create *args = data;
+ struct drm_gem_object *obj;
+ int handle, ret;
+
+ args->size = roundup(args->size, PAGE_SIZE);
+
+ /* Allocate the new object */
+ obj = drm_gem_object_alloc(dev, args->size);
+ if (obj == NULL)
+ return -ENOMEM;
+
+ ret = drm_gem_handle_create(file_priv, obj, &handle);
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_handle_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+
+ return 0;
+}
+
+/**
+ * Reads data from the object referenced by handle.
+ *
+ * On error, the contents of *data are undefined.
+ */
+int
+i915_gem_pread_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_pread *args = data;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ ssize_t read;
+ loff_t offset;
+ int ret;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EBADF;
+ obj_priv = obj->driver_private;
+
+ /* Bounds check source.
+ *
+ * XXX: This could use review for overflow issues...
+ */
+ if (args->offset > obj->size || args->size > obj->size ||
+ args->offset + args->size > obj->size) {
+ drm_gem_object_unreference(obj);
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ ret = i915_gem_object_set_domain_range(obj, args->offset, args->size,
+ I915_GEM_DOMAIN_CPU, 0);
+ if (ret != 0) {
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ offset = args->offset;
+
+ read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
+ args->size, &offset);
+ if (read != args->size) {
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ if (read < 0)
+ return read;
+ else
+ return -EINVAL;
+ }
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+#include "drm_compat.h"
+
+static int
+i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+ struct drm_i915_gem_pwrite *args,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ ssize_t remain;
+ loff_t offset;
+ char __user *user_data;
+ char *vaddr;
+ int i, o, l;
+ int ret = 0;
+ unsigned long pfn;
+ unsigned long unwritten;
+
+ user_data = (char __user *) (uintptr_t) args->data_ptr;
+ remain = args->size;
+ if (!access_ok(VERIFY_READ, user_data, remain))
+ return -EFAULT;
+
+
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_gem_object_pin(obj, 0);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+ ret = i915_gem_set_domain(obj, file_priv,
+ I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
+ if (ret)
+ goto fail;
+
+ obj_priv = obj->driver_private;
+ offset = obj_priv->gtt_offset + args->offset;
+ obj_priv->dirty = 1;
+
+ while (remain > 0) {
+ /* Operation in this page
+ *
+ * i = page number
+ * o = offset within page
+ * l = bytes to copy
+ */
+ i = offset >> PAGE_SHIFT;
+ o = offset & (PAGE_SIZE-1);
+ l = remain;
+ if ((o + l) > PAGE_SIZE)
+ l = PAGE_SIZE - o;
+
+ pfn = (dev->agp->base >> PAGE_SHIFT) + i;
+
+#ifdef DRM_KMAP_ATOMIC_PROT_PFN
+ /* kmap_atomic can't map IO pages on non-HIGHMEM kernels
+ */
+ vaddr = kmap_atomic_prot_pfn(pfn, KM_USER0,
+ __pgprot(__PAGE_KERNEL));
+#if WATCH_PWRITE
+ DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n",
+ i, o, l, pfn, vaddr);
+#endif
+ unwritten = __copy_from_user_inatomic_nocache(vaddr + o,
+ user_data, l);
+ kunmap_atomic(vaddr, KM_USER0);
+
+ if (unwritten)
+#endif
+ {
+ vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+#if WATCH_PWRITE
+ DRM_INFO("pwrite slow i %d o %d l %d "
+ "pfn %ld vaddr %p\n",
+ i, o, l, pfn, vaddr);
+#endif
+ if (vaddr == NULL) {
+ ret = -EFAULT;
+ goto fail;
+ }
+ unwritten = __copy_from_user(vaddr + o, user_data, l);
+#if WATCH_PWRITE
+ DRM_INFO("unwritten %ld\n", unwritten);
+#endif
+ iounmap(vaddr);
+ if (unwritten) {
+ ret = -EFAULT;
+ goto fail;
+ }
+ }
+
+ remain -= l;
+ user_data += l;
+ offset += l;
+ }
+#if WATCH_PWRITE && 1
+ i915_gem_clflush_object(obj);
+ i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0);
+ i915_gem_clflush_object(obj);
+#endif
+
+fail:
+ i915_gem_object_unpin(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+int
+i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+ struct drm_i915_gem_pwrite *args,
+ struct drm_file *file_priv)
+{
+ int ret;
+ loff_t offset;
+ ssize_t written;
+
+ mutex_lock(&dev->struct_mutex);
+
+ ret = i915_gem_set_domain(obj, file_priv,
+ I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
+ offset = args->offset;
+
+ written = vfs_write(obj->filp,
+ (char __user *)(uintptr_t) args->data_ptr,
+ args->size, &offset);
+ if (written != args->size) {
+ mutex_unlock(&dev->struct_mutex);
+ if (written < 0)
+ return written;
+ else
+ return -EINVAL;
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+/**
+ * Writes data to the object referenced by handle.
+ *
+ * On error, the contents of the buffer that were to be modified are undefined.
+ */
+int
+i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_pwrite *args = data;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret = 0;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EBADF;
+ obj_priv = obj->driver_private;
+
+ /* Bounds check destination.
+ *
+ * XXX: This could use review for overflow issues...
+ */
+ if (args->offset > obj->size || args->size > obj->size ||
+ args->offset + args->size > obj->size) {
+ drm_gem_object_unreference(obj);
+ return -EINVAL;
+ }
+
+ /* We can only do the GTT pwrite on untiled buffers, as otherwise
+ * it would end up going through the fenced access, and we'll get
+ * different detiling behavior between reading and writing.
+ * pread/pwrite currently are reading and writing from the CPU
+ * perspective, requiring manual detiling by the client.
+ */
+ if (obj_priv->tiling_mode == I915_TILING_NONE &&
+ dev->gtt_total != 0)
+ ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
+ else
+ ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
+
+#if WATCH_PWRITE
+ if (ret)
+ DRM_INFO("pwrite failed %d\n", ret);
+#endif
+
+ drm_gem_object_unreference(obj);
+
+ return ret;
+}
+
+/**
+ * Called when user space prepares to use an object
+ */
+int
+i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_set_domain *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EBADF;
+
+ mutex_lock(&dev->struct_mutex);
+#if WATCH_BUF
+ DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
+ obj, obj->size, args->read_domains, args->write_domain);
+#endif
+ ret = i915_gem_set_domain(obj, file_priv,
+ args->read_domains, args->write_domain);
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+/**
+ * Called when user space has done writes to this buffer
+ */
+int
+i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_sw_finish *args = data;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret = 0;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EBADF;
+ }
+
+#if WATCH_BUF
+ DRM_INFO("%s: sw_finish %d (%p %d)\n",
+ __func__, args->handle, obj, obj->size);
+#endif
+ obj_priv = obj->driver_private;
+
+ /* Pinned buffers may be scanout, so flush the cache */
+ if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) {
+ i915_gem_clflush_object(obj);
+ drm_agp_chipset_flush(dev);
+ }
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+/**
+ * Maps the contents of an object, returning the address it is mapped
+ * into.
+ *
+ * While the mapping holds a reference on the contents of the object, it doesn't
+ * imply a ref on the object itself.
+ */
+int
+i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_mmap *args = data;
+ struct drm_gem_object *obj;
+ loff_t offset;
+ unsigned long addr;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EBADF;
+
+ offset = args->offset;
+
+ down_write(&current->mm->mmap_sem);
+ addr = do_mmap(obj->filp, 0, args->size,
+ PROT_READ | PROT_WRITE, MAP_SHARED,
+ args->offset);
+ up_write(&current->mm->mmap_sem);
+ mutex_lock(&dev->struct_mutex);
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ if (IS_ERR((void *)addr))
+ return addr;
+
+ args->addr_ptr = (uint64_t) addr;
+
+ return 0;
+}
+
+static void
+i915_gem_object_free_page_list(struct drm_gem_object *obj)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int page_count = obj->size / PAGE_SIZE;
+ int i;
+
+ if (obj_priv->page_list == NULL)
+ return;
+
+
+ for (i = 0; i < page_count; i++)
+ if (obj_priv->page_list[i] != NULL) {
+ if (obj_priv->dirty)
+ set_page_dirty(obj_priv->page_list[i]);
+ mark_page_accessed(obj_priv->page_list[i]);
+ page_cache_release(obj_priv->page_list[i]);
+ }
+ obj_priv->dirty = 0;
+
+ drm_free(obj_priv->page_list,
+ page_count * sizeof(struct page *),
+ DRM_MEM_DRIVER);
+ obj_priv->page_list = NULL;
+}
+
+static void
+i915_gem_object_move_to_active(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ /* Add a reference if we're newly entering the active list. */
+ if (!obj_priv->active) {
+ drm_gem_object_reference(obj);
+ obj_priv->active = 1;
+ }
+ /* Move from whatever list we were on to the tail of execution. */
+ list_move_tail(&obj_priv->list,
+ &dev_priv->mm.active_list);
+}
+
+
+static void
+i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ i915_verify_inactive(dev, __FILE__, __LINE__);
+ if (obj_priv->pin_count != 0)
+ list_del_init(&obj_priv->list);
+ else
+ list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+
+ if (obj_priv->active) {
+ obj_priv->active = 0;
+ drm_gem_object_unreference(obj);
+ }
+ i915_verify_inactive(dev, __FILE__, __LINE__);
+}
+
+/**
+ * Creates a new sequence number, emitting a write of it to the status page
+ * plus an interrupt, which will trigger i915_user_interrupt_handler.
+ *
+ * Must be called with struct_lock held.
+ *
+ * Returned sequence numbers are nonzero on success.
+ */
+static uint32_t
+i915_add_request(struct drm_device *dev, uint32_t flush_domains)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_request *request;
+ uint32_t seqno;
+ int was_empty;
+ RING_LOCALS;
+
+ request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
+ if (request == NULL)
+ return 0;
+
+ /* Grab the seqno we're going to make this request be, and bump the
+ * next (skipping 0 so it can be the reserved no-seqno value).
+ */
+ seqno = dev_priv->mm.next_gem_seqno;
+ dev_priv->mm.next_gem_seqno++;
+ if (dev_priv->mm.next_gem_seqno == 0)
+ dev_priv->mm.next_gem_seqno++;
+
+ BEGIN_LP_RING(4);
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(seqno);
+
+ OUT_RING(MI_USER_INTERRUPT);
+ ADVANCE_LP_RING();
+
+ DRM_DEBUG("%d\n", seqno);
+
+ request->seqno = seqno;
+ request->emitted_jiffies = jiffies;
+ request->flush_domains = flush_domains;
+ was_empty = list_empty(&dev_priv->mm.request_list);
+ list_add_tail(&request->list, &dev_priv->mm.request_list);
+
+ if (was_empty)
+ schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
+ return seqno;
+}
+
+/**
+ * Command execution barrier
+ *
+ * Ensures that all commands in the ring are finished
+ * before signalling the CPU
+ */
+uint32_t
+i915_retire_commands(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+ uint32_t flush_domains = 0;
+ RING_LOCALS;
+
+ /* The sampler always gets flushed on i965 (sigh) */
+ if (IS_I965G(dev))
+ flush_domains |= I915_GEM_DOMAIN_SAMPLER;
+ BEGIN_LP_RING(2);
+ OUT_RING(cmd);
+ OUT_RING(0); /* noop */
+ ADVANCE_LP_RING();
+ return flush_domains;
+}
+
+/**
+ * Moves buffers associated only with the given active seqno from the active
+ * to inactive list, potentially freeing them.
+ */
+static void
+i915_gem_retire_request(struct drm_device *dev,
+ struct drm_i915_gem_request *request)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (request->flush_domains != 0) {
+ struct drm_i915_gem_object *obj_priv, *next;
+
+ /* First clear any buffers that were only waiting for a flush
+ * matching the one just retired.
+ */
+
+ list_for_each_entry_safe(obj_priv, next,
+ &dev_priv->mm.flushing_list, list) {
+ struct drm_gem_object *obj = obj_priv->obj;
+
+ if (obj->write_domain & request->flush_domains) {
+ obj->write_domain = 0;
+ i915_gem_object_move_to_inactive(obj);
+ }
+ }
+
+ }
+
+ /* Move any buffers on the active list that are no longer referenced
+ * by the ringbuffer to the flushing/inactive lists as appropriate.
+ */
+ while (!list_empty(&dev_priv->mm.active_list)) {
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ obj_priv = list_first_entry(&dev_priv->mm.active_list,
+ struct drm_i915_gem_object,
+ list);
+ obj = obj_priv->obj;
+
+ /* If the seqno being retired doesn't match the oldest in the
+ * list, then the oldest in the list must still be newer than
+ * this seqno.
+ */
+ if (obj_priv->last_rendering_seqno != request->seqno)
+ return;
+#if WATCH_LRU
+ DRM_INFO("%s: retire %d moves to inactive list %p\n",
+ __func__, request->seqno, obj);
+#endif
+
+ /* If this request flushes the write domain,
+ * clear the write domain from the object now
+ */
+ if (request->flush_domains & obj->write_domain)
+ obj->write_domain = 0;
+
+ if (obj->write_domain != 0) {
+ list_move_tail(&obj_priv->list,
+ &dev_priv->mm.flushing_list);
+ } else {
+ i915_gem_object_move_to_inactive(obj);
+ }
+ }
+}
+
+/**
+ * Returns true if seq1 is later than seq2.
+ */
+static int
+i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+{
+ return (int32_t)(seq1 - seq2) >= 0;
+}
+
+uint32_t
+i915_get_gem_seqno(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
+}
+
+/**
+ * This function clears the request list as sequence numbers are passed.
+ */
+void
+i915_gem_retire_requests(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t seqno;
+
+ seqno = i915_get_gem_seqno(dev);
+
+ while (!list_empty(&dev_priv->mm.request_list)) {
+ struct drm_i915_gem_request *request;
+ uint32_t retiring_seqno;
+
+ request = list_first_entry(&dev_priv->mm.request_list,
+ struct drm_i915_gem_request,
+ list);
+ retiring_seqno = request->seqno;
+
+ if (i915_seqno_passed(seqno, retiring_seqno) ||
+ dev_priv->mm.wedged) {
+ i915_gem_retire_request(dev, request);
+
+ list_del(&request->list);
+ drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
+ } else
+ break;
+ }
+}
+
+void
+i915_gem_retire_work_handler(struct work_struct *work)
+{
+ drm_i915_private_t *dev_priv;
+ struct drm_device *dev;
+
+ dev_priv = container_of(work, drm_i915_private_t,
+ mm.retire_work.work);
+ dev = dev_priv->dev;
+
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_retire_requests(dev);
+ if (!list_empty(&dev_priv->mm.request_list))
+ schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
+int
+i915_wait_request(struct drm_device *dev, uint32_t seqno)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret = 0;
+
+ BUG_ON(seqno == 0);
+
+ if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
+ dev_priv->mm.waiting_gem_seqno = seqno;
+ i915_user_irq_on(dev_priv);
+ ret = wait_event_interruptible(dev_priv->irq_queue,
+ i915_seqno_passed(i915_get_gem_seqno(dev),
+ seqno) ||
+ dev_priv->mm.wedged);
+ i915_user_irq_off(dev_priv);
+ dev_priv->mm.waiting_gem_seqno = 0;
+ }
+ if (dev_priv->mm.wedged)
+ ret = -EIO;
+
+ if (ret && ret != -ERESTARTSYS)
+ DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
+ __func__, ret, seqno, i915_get_gem_seqno(dev));
+
+ /* Directly dispatch request retiring. While we have the work queue
+ * to handle this, the waiter on a request often wants an associated
+ * buffer to have made it to the inactive list, and we would need
+ * a separate wait queue to handle that.
+ */
+ if (ret == 0)
+ i915_gem_retire_requests(dev);
+
+ return ret;
+}
+
+static void
+i915_gem_flush(struct drm_device *dev,
+ uint32_t invalidate_domains,
+ uint32_t flush_domains)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t cmd;
+ RING_LOCALS;
+
+#if WATCH_EXEC
+ DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
+ invalidate_domains, flush_domains);
+#endif
+
+ if (flush_domains & I915_GEM_DOMAIN_CPU)
+ drm_agp_chipset_flush(dev);
+
+ if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
+ I915_GEM_DOMAIN_GTT)) {
+ /*
+ * read/write caches:
+ *
+ * I915_GEM_DOMAIN_RENDER is always invalidated, but is
+ * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
+ * also flushed at 2d versus 3d pipeline switches.
+ *
+ * read-only caches:
+ *
+ * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
+ * MI_READ_FLUSH is set, and is always flushed on 965.
+ *
+ * I915_GEM_DOMAIN_COMMAND may not exist?
+ *
+ * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
+ * invalidated when MI_EXE_FLUSH is set.
+ *
+ * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
+ * invalidated with every MI_FLUSH.
+ *
+ * TLBs:
+ *
+ * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
+ * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
+ * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
+ * are flushed at any MI_FLUSH.
+ */
+
+ cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+ if ((invalidate_domains|flush_domains) &
+ I915_GEM_DOMAIN_RENDER)
+ cmd &= ~MI_NO_WRITE_FLUSH;
+ if (!IS_I965G(dev)) {
+ /*
+ * On the 965, the sampler cache always gets flushed
+ * and this bit is reserved.
+ */
+ if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+ cmd |= MI_READ_FLUSH;
+ }
+ if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
+ cmd |= MI_EXE_FLUSH;
+
+#if WATCH_EXEC
+ DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
+#endif
+ BEGIN_LP_RING(2);
+ OUT_RING(cmd);
+ OUT_RING(0); /* noop */
+ ADVANCE_LP_RING();
+ }
+}
+
+/**
+ * Ensures that all rendering to the object has completed and the object is
+ * safe to unbind from the GTT or access from the CPU.
+ */
+static int
+i915_gem_object_wait_rendering(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int ret;
+ uint32_t write_domain;
+
+ /* If there are writes queued to the buffer, flush and
+ * create a new seqno to wait for.
+ */
+ write_domain = obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT);
+ if (write_domain) {
+#if WATCH_BUF
+ DRM_INFO("%s: flushing object %p from write domain %08x\n",
+ __func__, obj, write_domain);
+#endif
+ i915_gem_flush(dev, 0, write_domain);
+
+ i915_gem_object_move_to_active(obj);
+ obj_priv->last_rendering_seqno = i915_add_request(dev,
+ write_domain);
+ BUG_ON(obj_priv->last_rendering_seqno == 0);
+#if WATCH_LRU
+ DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
+#endif
+ }
+
+ /* If there is rendering queued on the buffer being evicted, wait for
+ * it.
+ */
+ if (obj_priv->active) {
+#if WATCH_BUF
+ DRM_INFO("%s: object %p wait for seqno %08x\n",
+ __func__, obj, obj_priv->last_rendering_seqno);
+#endif
+ ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * Unbinds an object from the GTT aperture.
+ */
+static int
+i915_gem_object_unbind(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int ret = 0;
+
+#if WATCH_BUF
+ DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
+ DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
+#endif
+ if (obj_priv->gtt_space == NULL)
+ return 0;
+
+ if (obj_priv->pin_count != 0) {
+ DRM_ERROR("Attempting to unbind pinned buffer\n");
+ return -EINVAL;
+ }
+
+ /* Wait for any rendering to complete
+ */
+ ret = i915_gem_object_wait_rendering(obj);
+ if (ret) {
+ DRM_ERROR("wait_rendering failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Move the object to the CPU domain to ensure that
+ * any possible CPU writes while it's not in the GTT
+ * are flushed when we go to remap it. This will
+ * also ensure that all pending GPU writes are finished
+ * before we unbind.
+ */
+ ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU,
+ I915_GEM_DOMAIN_CPU);
+ if (ret) {
+ DRM_ERROR("set_domain failed: %d\n", ret);
+ return ret;
+ }
+
+ if (obj_priv->agp_mem != NULL) {
+ drm_unbind_agp(obj_priv->agp_mem);
+ drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
+ obj_priv->agp_mem = NULL;
+ }
+
+ BUG_ON(obj_priv->active);
+
+ i915_gem_object_free_page_list(obj);
+
+ if (obj_priv->gtt_space) {
+ atomic_dec(&dev->gtt_count);
+ atomic_sub(obj->size, &dev->gtt_memory);
+
+ drm_mm_put_block(obj_priv->gtt_space);
+ obj_priv->gtt_space = NULL;
+ }
+
+ /* Remove ourselves from the LRU list if present. */
+ if (!list_empty(&obj_priv->list))
+ list_del_init(&obj_priv->list);
+
+ return 0;
+}
+
+static int
+i915_gem_evict_something(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret = 0;
+
+ for (;;) {
+ /* If there's an inactive buffer available now, grab it
+ * and be done.
+ */
+ if (!list_empty(&dev_priv->mm.inactive_list)) {
+ obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
+ struct drm_i915_gem_object,
+ list);
+ obj = obj_priv->obj;
+ BUG_ON(obj_priv->pin_count != 0);
+#if WATCH_LRU
+ DRM_INFO("%s: evicting %p\n", __func__, obj);
+#endif
+ BUG_ON(obj_priv->active);
+
+ /* Wait on the rendering and unbind the buffer. */
+ ret = i915_gem_object_unbind(obj);
+ break;
+ }
+
+ /* If we didn't get anything, but the ring is still processing
+ * things, wait for one of those things to finish and hopefully
+ * leave us a buffer to evict.
+ */
+ if (!list_empty(&dev_priv->mm.request_list)) {
+ struct drm_i915_gem_request *request;
+
+ request = list_first_entry(&dev_priv->mm.request_list,
+ struct drm_i915_gem_request,
+ list);
+
+ ret = i915_wait_request(dev, request->seqno);
+ if (ret)
+ break;
+
+ /* if waiting caused an object to become inactive,
+ * then loop around and wait for it. Otherwise, we
+ * assume that waiting freed and unbound something,
+ * so there should now be some space in the GTT
+ */
+ if (!list_empty(&dev_priv->mm.inactive_list))
+ continue;
+ break;
+ }
+
+ /* If we didn't have anything on the request list but there
+ * are buffers awaiting a flush, emit one and try again.
+ * When we wait on it, those buffers waiting for that flush
+ * will get moved to inactive.
+ */
+ if (!list_empty(&dev_priv->mm.flushing_list)) {
+ obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
+ struct drm_i915_gem_object,
+ list);
+ obj = obj_priv->obj;
+
+ i915_gem_flush(dev,
+ obj->write_domain,
+ obj->write_domain);
+ i915_add_request(dev, obj->write_domain);
+
+ obj = NULL;
+ continue;
+ }
+
+ DRM_ERROR("inactive empty %d request empty %d "
+ "flushing empty %d\n",
+ list_empty(&dev_priv->mm.inactive_list),
+ list_empty(&dev_priv->mm.request_list),
+ list_empty(&dev_priv->mm.flushing_list));
+ /* If we didn't do any of the above, there's nothing to be done
+ * and we just can't fit it in.
+ */
+ return -ENOMEM;
+ }
+ return ret;
+}
+
+static int
+i915_gem_object_get_page_list(struct drm_gem_object *obj)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int page_count, i;
+ struct address_space *mapping;
+ struct inode *inode;
+ struct page *page;
+ int ret;
+
+ if (obj_priv->page_list)
+ return 0;
+
+ /* Get the list of pages out of our struct file. They'll be pinned
+ * at this point until we release them.
+ */
+ page_count = obj->size / PAGE_SIZE;
+ BUG_ON(obj_priv->page_list != NULL);
+ obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
+ DRM_MEM_DRIVER);
+ if (obj_priv->page_list == NULL) {
+ DRM_ERROR("Faled to allocate page list\n");
+ return -ENOMEM;
+ }
+
+ inode = obj->filp->f_path.dentry->d_inode;
+ mapping = inode->i_mapping;
+ for (i = 0; i < page_count; i++) {
+ page = read_mapping_page(mapping, i, NULL);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+ DRM_ERROR("read_mapping_page failed: %d\n", ret);
+ i915_gem_object_free_page_list(obj);
+ return ret;
+ }
+ obj_priv->page_list[i] = page;
+ }
+ return 0;
+}
+
+/**
+ * Finds free space in the GTT aperture and binds the object there.
+ */
+static int
+i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
+{
+ struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ struct drm_mm_node *free_space;
+ int page_count, ret;
+
+ if (alignment == 0)
+ alignment = PAGE_SIZE;
+ if (alignment & (PAGE_SIZE - 1)) {
+ DRM_ERROR("Invalid object alignment requested %u\n", alignment);
+ return -EINVAL;
+ }
+
+ search_free:
+ free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
+ obj->size, alignment, 0);
+ if (free_space != NULL) {
+ obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
+ alignment);
+ if (obj_priv->gtt_space != NULL) {
+ obj_priv->gtt_space->private = obj;
+ obj_priv->gtt_offset = obj_priv->gtt_space->start;
+ }
+ }
+ if (obj_priv->gtt_space == NULL) {
+ /* If the gtt is empty and we're still having trouble
+ * fitting our object in, we're out of memory.
+ */
+#if WATCH_LRU
+ DRM_INFO("%s: GTT full, evicting something\n", __func__);
+#endif
+ if (list_empty(&dev_priv->mm.inactive_list) &&
+ list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->mm.active_list)) {
+ DRM_ERROR("GTT full, but LRU list empty\n");
+ return -ENOMEM;
+ }
+
+ ret = i915_gem_evict_something(dev);
+ if (ret != 0) {
+ DRM_ERROR("Failed to evict a buffer %d\n", ret);
+ return ret;
+ }
+ goto search_free;
+ }
+
+#if WATCH_BUF
+ DRM_INFO("Binding object of size %d at 0x%08x\n",
+ obj->size, obj_priv->gtt_offset);
+#endif
+ ret = i915_gem_object_get_page_list(obj);
+ if (ret) {
+ drm_mm_put_block(obj_priv->gtt_space);
+ obj_priv->gtt_space = NULL;
+ return ret;
+ }
+
+ page_count = obj->size / PAGE_SIZE;
+ /* Create an AGP memory structure pointing at our pages, and bind it
+ * into the GTT.
+ */
+ obj_priv->agp_mem = drm_agp_bind_pages(dev,
+ obj_priv->page_list,
+ page_count,
+ obj_priv->gtt_offset);
+ if (obj_priv->agp_mem == NULL) {
+ i915_gem_object_free_page_list(obj);
+ drm_mm_put_block(obj_priv->gtt_space);
+ obj_priv->gtt_space = NULL;
+ return -ENOMEM;
+ }
+ atomic_inc(&dev->gtt_count);
+ atomic_add(obj->size, &dev->gtt_memory);
+
+ /* Assert that the object is not currently in any GPU domain. As it
+ * wasn't in the GTT, there shouldn't be any way it could have been in
+ * a GPU cache
+ */
+ BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
+ BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
+
+ return 0;
+}
+
+void
+i915_gem_clflush_object(struct drm_gem_object *obj)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ /* If we don't have a page list set up, then we're not pinned
+ * to GPU, and we can ignore the cache flush because it'll happen
+ * again at bind time.
+ */
+ if (obj_priv->page_list == NULL)
+ return;
+
+ drm_ttm_cache_flush(obj_priv->page_list, obj->size / PAGE_SIZE);
+}
+
+/*
+ * Set the next domain for the specified object. This
+ * may not actually perform the necessary flushing/invaliding though,
+ * as that may want to be batched with other set_domain operations
+ *
+ * This is (we hope) the only really tricky part of gem. The goal
+ * is fairly simple -- track which caches hold bits of the object
+ * and make sure they remain coherent. A few concrete examples may
+ * help to explain how it works. For shorthand, we use the notation
+ * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
+ * a pair of read and write domain masks.
+ *
+ * Case 1: the batch buffer
+ *
+ * 1. Allocated
+ * 2. Written by CPU
+ * 3. Mapped to GTT
+ * 4. Read by GPU
+ * 5. Unmapped from GTT
+ * 6. Freed
+ *
+ * Let's take these a step at a time
+ *
+ * 1. Allocated
+ * Pages allocated from the kernel may still have
+ * cache contents, so we set them to (CPU, CPU) always.
+ * 2. Written by CPU (using pwrite)
+ * The pwrite function calls set_domain (CPU, CPU) and
+ * this function does nothing (as nothing changes)
+ * 3. Mapped by GTT
+ * This function asserts that the object is not
+ * currently in any GPU-based read or write domains
+ * 4. Read by GPU
+ * i915_gem_execbuffer calls set_domain (COMMAND, 0).
+ * As write_domain is zero, this function adds in the
+ * current read domains (CPU+COMMAND, 0).
+ * flush_domains is set to CPU.
+ * invalidate_domains is set to COMMAND
+ * clflush is run to get data out of the CPU caches
+ * then i915_dev_set_domain calls i915_gem_flush to
+ * emit an MI_FLUSH and drm_agp_chipset_flush
+ * 5. Unmapped from GTT
+ * i915_gem_object_unbind calls set_domain (CPU, CPU)
+ * flush_domains and invalidate_domains end up both zero
+ * so no flushing/invalidating happens
+ * 6. Freed
+ * yay, done
+ *
+ * Case 2: The shared render buffer
+ *
+ * 1. Allocated
+ * 2. Mapped to GTT
+ * 3. Read/written by GPU
+ * 4. set_domain to (CPU,CPU)
+ * 5. Read/written by CPU
+ * 6. Read/written by GPU
+ *
+ * 1. Allocated
+ * Same as last example, (CPU, CPU)
+ * 2. Mapped to GTT
+ * Nothing changes (assertions find that it is not in the GPU)
+ * 3. Read/written by GPU
+ * execbuffer calls set_domain (RENDER, RENDER)
+ * flush_domains gets CPU
+ * invalidate_domains gets GPU
+ * clflush (obj)
+ * MI_FLUSH and drm_agp_chipset_flush
+ * 4. set_domain (CPU, CPU)
+ * flush_domains gets GPU
+ * invalidate_domains gets CPU
+ * wait_rendering (obj) to make sure all drawing is complete.
+ * This will include an MI_FLUSH to get the data from GPU
+ * to memory
+ * clflush (obj) to invalidate the CPU cache
+ * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
+ * 5. Read/written by CPU
+ * cache lines are loaded and dirtied
+ * 6. Read written by GPU
+ * Same as last GPU access
+ *
+ * Case 3: The constant buffer
+ *
+ * 1. Allocated
+ * 2. Written by CPU
+ * 3. Read by GPU
+ * 4. Updated (written) by CPU again
+ * 5. Read by GPU
+ *
+ * 1. Allocated
+ * (CPU, CPU)
+ * 2. Written by CPU
+ * (CPU, CPU)
+ * 3. Read by GPU
+ * (CPU+RENDER, 0)
+ * flush_domains = CPU
+ * invalidate_domains = RENDER
+ * clflush (obj)
+ * MI_FLUSH
+ * drm_agp_chipset_flush
+ * 4. Updated (written) by CPU again
+ * (CPU, CPU)
+ * flush_domains = 0 (no previous write domain)
+ * invalidate_domains = 0 (no new read domains)
+ * 5. Read by GPU
+ * (CPU+RENDER, 0)
+ * flush_domains = CPU
+ * invalidate_domains = RENDER
+ * clflush (obj)
+ * MI_FLUSH
+ * drm_agp_chipset_flush
+ */
+static int
+i915_gem_object_set_domain(struct drm_gem_object *obj,
+ uint32_t read_domains,
+ uint32_t write_domain)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ uint32_t invalidate_domains = 0;
+ uint32_t flush_domains = 0;
+ int ret;
+
+#if WATCH_BUF
+ DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
+ __func__, obj,
+ obj->read_domains, read_domains,
+ obj->write_domain, write_domain);
+#endif
+ /*
+ * If the object isn't moving to a new write domain,
+ * let the object stay in multiple read domains
+ */
+ if (write_domain == 0)
+ read_domains |= obj->read_domains;
+ else
+ obj_priv->dirty = 1;
+
+ /*
+ * Flush the current write domain if
+ * the new read domains don't match. Invalidate
+ * any read domains which differ from the old
+ * write domain
+ */
+ if (obj->write_domain && obj->write_domain != read_domains) {
+ flush_domains |= obj->write_domain;
+ invalidate_domains |= read_domains & ~obj->write_domain;
+ }
+ /*
+ * Invalidate any read caches which may have
+ * stale data. That is, any new read domains.
+ */
+ invalidate_domains |= read_domains & ~obj->read_domains;
+ if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
+#if WATCH_BUF
+ DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
+ __func__, flush_domains, invalidate_domains);
+#endif
+ /*
+ * If we're invaliding the CPU cache and flushing a GPU cache,
+ * then pause for rendering so that the GPU caches will be
+ * flushed before the cpu cache is invalidated
+ */
+ if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
+ (flush_domains & ~(I915_GEM_DOMAIN_CPU |
+ I915_GEM_DOMAIN_GTT))) {
+ ret = i915_gem_object_wait_rendering(obj);
+ if (ret)
+ return ret;
+ }
+ i915_gem_clflush_object(obj);
+ }
+
+ if ((write_domain | flush_domains) != 0)
+ obj->write_domain = write_domain;
+
+ /* If we're invalidating the CPU domain, clear the per-page CPU
+ * domain list as well.
+ */
+ if (obj_priv->page_cpu_valid != NULL &&
+ (obj->read_domains & I915_GEM_DOMAIN_CPU) &&
+ ((read_domains & I915_GEM_DOMAIN_CPU) == 0)) {
+ memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
+ }
+ obj->read_domains = read_domains;
+
+ dev->invalidate_domains |= invalidate_domains;
+ dev->flush_domains |= flush_domains;
+#if WATCH_BUF
+ DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
+ __func__,
+ obj->read_domains, obj->write_domain,
+ dev->invalidate_domains, dev->flush_domains);
+#endif
+ return 0;
+}
+
+/**
+ * Set the read/write domain on a range of the object.
+ *
+ * Currently only implemented for CPU reads, otherwise drops to normal
+ * i915_gem_object_set_domain().
+ */
+static int
+i915_gem_object_set_domain_range(struct drm_gem_object *obj,
+ uint64_t offset,
+ uint64_t size,
+ uint32_t read_domains,
+ uint32_t write_domain)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int ret, i;
+
+ if (obj->read_domains & I915_GEM_DOMAIN_CPU)
+ return 0;
+
+ if (read_domains != I915_GEM_DOMAIN_CPU ||
+ write_domain != 0)
+ return i915_gem_object_set_domain(obj,
+ read_domains, write_domain);
+
+ /* Wait on any GPU rendering to the object to be flushed. */
+ if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) {
+ ret = i915_gem_object_wait_rendering(obj);
+ if (ret)
+ return ret;
+ }
+
+ if (obj_priv->page_cpu_valid == NULL) {
+ obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
+ DRM_MEM_DRIVER);
+ }
+
+ /* Flush the cache on any pages that are still invalid from the CPU's
+ * perspective.
+ */
+ for (i = offset / PAGE_SIZE; i < (offset + size - 1) / PAGE_SIZE; i++) {
+ if (obj_priv->page_cpu_valid[i])
+ continue;
+
+ drm_ttm_cache_flush(obj_priv->page_list + i, 1);
+
+ obj_priv->page_cpu_valid[i] = 1;
+ }
+
+ return 0;
+}
+
+/**
+ * Once all of the objects have been set in the proper domain,
+ * perform the necessary flush and invalidate operations.
+ *
+ * Returns the write domains flushed, for use in flush tracking.
+ */
+static uint32_t
+i915_gem_dev_set_domain(struct drm_device *dev)
+{
+ uint32_t flush_domains = dev->flush_domains;
+
+ /*
+ * Now that all the buffers are synced to the proper domains,
+ * flush and invalidate the collected domains
+ */
+ if (dev->invalidate_domains | dev->flush_domains) {
+#if WATCH_EXEC
+ DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
+ __func__,
+ dev->invalidate_domains,
+ dev->flush_domains);
+#endif
+ i915_gem_flush(dev,
+ dev->invalidate_domains,
+ dev->flush_domains);
+ dev->invalidate_domains = 0;
+ dev->flush_domains = 0;
+ }
+
+ return flush_domains;
+}
+
+/**
+ * Pin an object to the GTT and evaluate the relocations landing in it.
+ */
+static int
+i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
+ struct drm_file *file_priv,
+ struct drm_i915_gem_exec_object *entry)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_relocation_entry reloc;
+ struct drm_i915_gem_relocation_entry __user *relocs;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int i, ret;
+ uint32_t last_reloc_offset = -1;
+ void *reloc_page = NULL;
+
+ /* Choose the GTT offset for our buffer and put it there. */
+ ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
+ if (ret)
+ return ret;
+
+ entry->offset = obj_priv->gtt_offset;
+
+ relocs = (struct drm_i915_gem_relocation_entry __user *)
+ (uintptr_t) entry->relocs_ptr;
+ /* Apply the relocations, using the GTT aperture to avoid cache
+ * flushing requirements.
+ */
+ for (i = 0; i < entry->relocation_count; i++) {
+ struct drm_gem_object *target_obj;
+ struct drm_i915_gem_object *target_obj_priv;
+ uint32_t reloc_val, reloc_offset, *reloc_entry;
+ int ret;
+
+ ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
+ if (ret != 0) {
+ i915_gem_object_unpin(obj);
+ return ret;
+ }
+
+ target_obj = drm_gem_object_lookup(obj->dev, file_priv,
+ reloc.target_handle);
+ if (target_obj == NULL) {
+ i915_gem_object_unpin(obj);
+ return -EBADF;
+ }
+ target_obj_priv = target_obj->driver_private;
+
+ /* The target buffer should have appeared before us in the
+ * exec_object list, so it should have a GTT space bound by now.
+ */
+ if (target_obj_priv->gtt_space == NULL) {
+ DRM_ERROR("No GTT space found for object %d\n",
+ reloc.target_handle);
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ return -EINVAL;
+ }
+
+ if (reloc.offset > obj->size - 4) {
+ DRM_ERROR("Relocation beyond object bounds: "
+ "obj %p target %d offset %d size %d.\n",
+ obj, reloc.target_handle,
+ (int) reloc.offset, (int) obj->size);
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ return -EINVAL;
+ }
+ if (reloc.offset & 3) {
+ DRM_ERROR("Relocation not 4-byte aligned: "
+ "obj %p target %d offset %d.\n",
+ obj, reloc.target_handle,
+ (int) reloc.offset);
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ return -EINVAL;
+ }
+
+ if (reloc.write_domain && target_obj->pending_write_domain &&
+ reloc.write_domain != target_obj->pending_write_domain) {
+ DRM_ERROR("Write domain conflict: "
+ "obj %p target %d offset %d "
+ "new %08x old %08x\n",
+ obj, reloc.target_handle,
+ (int) reloc.offset,
+ reloc.write_domain,
+ target_obj->pending_write_domain);
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ return -EINVAL;
+ }
+
+#if WATCH_RELOC
+ DRM_INFO("%s: obj %p offset %08x target %d "
+ "read %08x write %08x gtt %08x "
+ "presumed %08x delta %08x\n",
+ __func__,
+ obj,
+ (int) reloc.offset,
+ (int) reloc.target_handle,
+ (int) reloc.read_domains,
+ (int) reloc.write_domain,
+ (int) target_obj_priv->gtt_offset,
+ (int) reloc.presumed_offset,
+ reloc.delta);
+#endif
+
+ target_obj->pending_read_domains |= reloc.read_domains;
+ target_obj->pending_write_domain |= reloc.write_domain;
+
+ /* If the relocation already has the right value in it, no
+ * more work needs to be done.
+ */
+ if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
+ drm_gem_object_unreference(target_obj);
+ continue;
+ }
+
+ /* Now that we're going to actually write some data in,
+ * make sure that any rendering using this buffer's contents
+ * is completed.
+ */
+ i915_gem_object_wait_rendering(obj);
+
+ /* As we're writing through the gtt, flush
+ * any CPU writes before we write the relocations
+ */
+ if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
+ i915_gem_clflush_object(obj);
+ drm_agp_chipset_flush(dev);
+ obj->write_domain = 0;
+ }
+
+ /* Map the page containing the relocation we're going to
+ * perform.
+ */
+ reloc_offset = obj_priv->gtt_offset + reloc.offset;
+ if (reloc_page == NULL ||
+ (last_reloc_offset & ~(PAGE_SIZE - 1)) !=
+ (reloc_offset & ~(PAGE_SIZE - 1))) {
+ if (reloc_page != NULL)
+ iounmap(reloc_page);
+
+ reloc_page = ioremap(dev->agp->base +
+ (reloc_offset & ~(PAGE_SIZE - 1)),
+ PAGE_SIZE);
+ last_reloc_offset = reloc_offset;
+ if (reloc_page == NULL) {
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ return -ENOMEM;
+ }
+ }
+
+ reloc_entry = (uint32_t *)((char *)reloc_page +
+ (reloc_offset & (PAGE_SIZE - 1)));
+ reloc_val = target_obj_priv->gtt_offset + reloc.delta;
+
+#if WATCH_BUF
+ DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
+ obj, (unsigned int) reloc.offset,
+ readl(reloc_entry), reloc_val);
+#endif
+ writel(reloc_val, reloc_entry);
+
+ /* Write the updated presumed offset for this entry back out
+ * to the user.
+ */
+ reloc.presumed_offset = target_obj_priv->gtt_offset;
+ ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
+ if (ret != 0) {
+ drm_gem_object_unreference(target_obj);
+ i915_gem_object_unpin(obj);
+ return ret;
+ }
+
+ drm_gem_object_unreference(target_obj);
+ }
+
+ if (reloc_page != NULL)
+ iounmap(reloc_page);
+
+#if WATCH_BUF
+ if (0)
+ i915_gem_dump_object(obj, 128, __func__, ~0);
+#endif
+ return 0;
+}
+
+/** Dispatch a batchbuffer to the ring
+ */
+static int
+i915_dispatch_gem_execbuffer(struct drm_device *dev,
+ struct drm_i915_gem_execbuffer *exec,
+ uint64_t exec_offset)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
+ (uintptr_t) exec->cliprects_ptr;
+ int nbox = exec->num_cliprects;
+ int i = 0, count;
+ uint32_t exec_start, exec_len;
+ RING_LOCALS;
+
+ exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+ exec_len = (uint32_t) exec->batch_len;
+
+ if ((exec_start | exec_len) & 0x7) {
+ DRM_ERROR("alignment\n");
+ return -EINVAL;
+ }
+
+ if (!exec_start)
+ return -EINVAL;
+
+ count = nbox ? nbox : 1;
+
+ for (i = 0; i < count; i++) {
+ if (i < nbox) {
+ int ret = i915_emit_box(dev, boxes, i,
+ exec->DR1, exec->DR4);
+ if (ret)
+ return ret;
+ }
+
+ if (IS_I830(dev) || IS_845G(dev)) {
+ BEGIN_LP_RING(4);
+ OUT_RING(MI_BATCH_BUFFER);
+ OUT_RING(exec_start | MI_BATCH_NON_SECURE);
+ OUT_RING(exec_start + exec_len - 4);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ } else {
+ BEGIN_LP_RING(2);
+ if (IS_I965G(dev)) {
+ OUT_RING(MI_BATCH_BUFFER_START |
+ (2 << 6) |
+ MI_BATCH_NON_SECURE_I965);
+ OUT_RING(exec_start);
+ } else {
+ OUT_RING(MI_BATCH_BUFFER_START |
+ (2 << 6));
+ OUT_RING(exec_start | MI_BATCH_NON_SECURE);
+ }
+ ADVANCE_LP_RING();
+ }
+ }
+
+ /* XXX breadcrumb */
+ return 0;
+}
+
+/* Throttle our rendering by waiting until the ring has completed our requests
+ * emitted over 20 msec ago.
+ *
+ * This should get us reasonable parallelism between CPU and GPU but also
+ * relatively low latency when blocking on a particular request to finish.
+ */
+static int
+i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
+{
+ struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+ int ret = 0;
+ uint32_t seqno;
+
+ mutex_lock(&dev->struct_mutex);
+ seqno = i915_file_priv->mm.last_gem_throttle_seqno;
+ i915_file_priv->mm.last_gem_throttle_seqno =
+ i915_file_priv->mm.last_gem_seqno;
+ if (seqno)
+ ret = i915_wait_request(dev, seqno);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int
+i915_gem_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+ struct drm_i915_gem_execbuffer *args = data;
+ struct drm_i915_gem_exec_object *exec_list = NULL;
+ struct drm_gem_object **object_list = NULL;
+ struct drm_gem_object *batch_obj;
+ int ret, i, pinned = 0;
+ uint64_t exec_offset;
+ uint32_t seqno, flush_domains;
+
+#if WATCH_EXEC
+ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+
+ /* Copy in the exec list from userland */
+ exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
+ DRM_MEM_DRIVER);
+ object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
+ DRM_MEM_DRIVER);
+ if (exec_list == NULL || object_list == NULL) {
+ DRM_ERROR("Failed to allocate exec or object list "
+ "for %d buffers\n",
+ args->buffer_count);
+ ret = -ENOMEM;
+ goto pre_mutex_err;
+ }
+ ret = copy_from_user(exec_list,
+ (struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ sizeof(*exec_list) * args->buffer_count);
+ if (ret != 0) {
+ DRM_ERROR("copy %d exec entries failed %d\n",
+ args->buffer_count, ret);
+ goto pre_mutex_err;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ i915_verify_inactive(dev, __FILE__, __LINE__);
+
+ if (dev_priv->mm.wedged) {
+ DRM_ERROR("Execbuf while wedged\n");
+ mutex_unlock(&dev->struct_mutex);
+ return -EIO;
+ }
+
+ if (dev_priv->mm.suspended) {
+ DRM_ERROR("Execbuf while VT-switched.\n");
+ mutex_unlock(&dev->struct_mutex);
+ return -EBUSY;
+ }
+
+ /* Zero the gloabl flush/invalidate flags. These
+ * will be modified as each object is bound to the
+ * gtt
+ */
+ dev->invalidate_domains = 0;
+ dev->flush_domains = 0;
+
+ /* Look up object handles and perform the relocations */
+ for (i = 0; i < args->buffer_count; i++) {
+ object_list[i] = drm_gem_object_lookup(dev, file_priv,
+ exec_list[i].handle);
+ if (object_list[i] == NULL) {
+ DRM_ERROR("Invalid object handle %d at index %d\n",
+ exec_list[i].handle, i);
+ ret = -EBADF;
+ goto err;
+ }
+
+ object_list[i]->pending_read_domains = 0;
+ object_list[i]->pending_write_domain = 0;
+ ret = i915_gem_object_pin_and_relocate(object_list[i],
+ file_priv,
+ &exec_list[i]);
+ if (ret) {
+ DRM_ERROR("object bind and relocate failed %d\n", ret);
+ goto err;
+ }
+ pinned = i + 1;
+ }
+
+ /* Set the pending read domains for the batch buffer to COMMAND */
+ batch_obj = object_list[args->buffer_count-1];
+ batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
+ batch_obj->pending_write_domain = 0;
+
+ i915_verify_inactive(dev, __FILE__, __LINE__);
+
+ for (i = 0; i < args->buffer_count; i++) {
+ struct drm_gem_object *obj = object_list[i];
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ if (obj_priv->gtt_space == NULL) {
+ /* We evicted the buffer in the process of validating
+ * our set of buffers in. We could try to recover by
+ * kicking them everything out and trying again from
+ * the start.
+ */
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /* make sure all previous memory operations have passed */
+ ret = i915_gem_object_set_domain(obj,
+ obj->pending_read_domains,
+ obj->pending_write_domain);
+ if (ret)
+ goto err;
+ }
+
+ i915_verify_inactive(dev, __FILE__, __LINE__);
+
+ /* Flush/invalidate caches and chipset buffer */
+ flush_domains = i915_gem_dev_set_domain(dev);
+
+ i915_verify_inactive(dev, __FILE__, __LINE__);
+
+#if WATCH_COHERENCY
+ for (i = 0; i < args->buffer_count; i++) {
+ i915_gem_object_check_coherency(object_list[i],
+ exec_list[i].handle);
+ }
+#endif
+
+ exec_offset = exec_list[args->buffer_count - 1].offset;
+
+#if WATCH_EXEC
+ i915_gem_dump_object(object_list[args->buffer_count - 1],
+ args->batch_len,
+ __func__,
+ ~0);
+#endif
+
+ /* Exec the batchbuffer */
+ ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
+ if (ret) {
+ DRM_ERROR("dispatch failed %d\n", ret);
+ goto err;
+ }
+
+ /*
+ * Ensure that the commands in the batch buffer are
+ * finished before the interrupt fires
+ */
+ flush_domains |= i915_retire_commands(dev);
+
+ i915_verify_inactive(dev, __FILE__, __LINE__);
+
+ /*
+ * Get a seqno representing the execution of the current buffer,
+ * which we can wait on. We would like to mitigate these interrupts,
+ * likely by only creating seqnos occasionally (so that we have
+ * *some* interrupts representing completion of buffers that we can
+ * wait on when trying to clear up gtt space).
+ */
+ seqno = i915_add_request(dev, flush_domains);
+ BUG_ON(seqno == 0);
+ i915_file_priv->mm.last_gem_seqno = seqno;
+ for (i = 0; i < args->buffer_count; i++) {
+ struct drm_gem_object *obj = object_list[i];
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ i915_gem_object_move_to_active(obj);
+ obj_priv->last_rendering_seqno = seqno;
+#if WATCH_LRU
+ DRM_INFO("%s: move to exec list %p\n", __func__, obj);
+#endif
+ }
+#if WATCH_LRU
+ i915_dump_lru(dev, __func__);
+#endif
+
+ i915_verify_inactive(dev, __FILE__, __LINE__);
+
+ /* Copy the new buffer offsets back to the user's exec list. */
+ ret = copy_to_user((struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ exec_list,
+ sizeof(*exec_list) * args->buffer_count);
+ if (ret)
+ DRM_ERROR("failed to copy %d exec entries "
+ "back to user (%d)\n",
+ args->buffer_count, ret);
+err:
+ if (object_list != NULL) {
+ for (i = 0; i < pinned; i++)
+ i915_gem_object_unpin(object_list[i]);
+
+ for (i = 0; i < args->buffer_count; i++)
+ drm_gem_object_unreference(object_list[i]);
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+pre_mutex_err:
+ drm_free(object_list, sizeof(*object_list) * args->buffer_count,
+ DRM_MEM_DRIVER);
+ drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
+ DRM_MEM_DRIVER);
+
+ return ret;
+}
+
+int
+i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int ret;
+
+ i915_verify_inactive(dev, __FILE__, __LINE__);
+ if (obj_priv->gtt_space == NULL) {
+ ret = i915_gem_object_bind_to_gtt(obj, alignment);
+ if (ret != 0) {
+ DRM_ERROR("Failure to bind: %d", ret);
+ return ret;
+ }
+ }
+ obj_priv->pin_count++;
+
+ /* If the object is not active and not pending a flush,
+ * remove it from the inactive list
+ */
+ if (obj_priv->pin_count == 1) {
+ atomic_inc(&dev->pin_count);
+ atomic_add(obj->size, &dev->pin_memory);
+ if (!obj_priv->active &&
+ (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
+ I915_GEM_DOMAIN_GTT)) == 0 &&
+ !list_empty(&obj_priv->list))
+ list_del_init(&obj_priv->list);
+ }
+ i915_verify_inactive(dev, __FILE__, __LINE__);
+
+ return 0;
+}
+
+void
+i915_gem_object_unpin(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ i915_verify_inactive(dev, __FILE__, __LINE__);
+ obj_priv->pin_count--;
+ BUG_ON(obj_priv->pin_count < 0);
+ BUG_ON(obj_priv->gtt_space == NULL);
+
+ /* If the object is no longer pinned, and is
+ * neither active nor being flushed, then stick it on
+ * the inactive list
+ */
+ if (obj_priv->pin_count == 0) {
+ if (!obj_priv->active &&
+ (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
+ I915_GEM_DOMAIN_GTT)) == 0)
+ list_move_tail(&obj_priv->list,
+ &dev_priv->mm.inactive_list);
+ atomic_dec(&dev->pin_count);
+ atomic_sub(obj->size, &dev->pin_memory);
+ }
+ i915_verify_inactive(dev, __FILE__, __LINE__);
+}
+
+int
+i915_gem_pin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_pin *args = data;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL) {
+ DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
+ args->handle);
+ mutex_unlock(&dev->struct_mutex);
+ return -EBADF;
+ }
+ obj_priv = obj->driver_private;
+
+ ret = i915_gem_object_pin(obj, args->alignment);
+ if (ret != 0) {
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
+ /* XXX - flush the CPU caches for pinned objects
+ * as the X server doesn't manage domains yet
+ */
+ if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
+ i915_gem_clflush_object(obj);
+ drm_agp_chipset_flush(dev);
+ obj->write_domain = 0;
+ }
+ args->offset = obj_priv->gtt_offset;
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+int
+i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_pin *args = data;
+ struct drm_gem_object *obj;
+
+ mutex_lock(&dev->struct_mutex);
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL) {
+ DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
+ args->handle);
+ mutex_unlock(&dev->struct_mutex);
+ return -EBADF;
+ }
+
+ i915_gem_object_unpin(obj);
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+int
+i915_gem_busy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_busy *args = data;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL) {
+ DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
+ args->handle);
+ mutex_unlock(&dev->struct_mutex);
+ return -EBADF;
+ }
+
+ obj_priv = obj->driver_private;
+ args->busy = obj_priv->active;
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+int
+i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return i915_gem_ring_throttle(dev, file_priv);
+}
+
+int i915_gem_init_object(struct drm_gem_object *obj)
+{
+ struct drm_i915_gem_object *obj_priv;
+
+ obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
+ if (obj_priv == NULL)
+ return -ENOMEM;
+
+ /*
+ * We've just allocated pages from the kernel,
+ * so they've just been written by the CPU with
+ * zeros. They'll need to be clflushed before we
+ * use them with the GPU.
+ */
+ obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->read_domains = I915_GEM_DOMAIN_CPU;
+
+ obj->driver_private = obj_priv;
+ obj_priv->obj = obj;
+ INIT_LIST_HEAD(&obj_priv->list);
+ return 0;
+}
+
+void i915_gem_free_object(struct drm_gem_object *obj)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ while (obj_priv->pin_count > 0)
+ i915_gem_object_unpin(obj);
+
+ i915_gem_object_unbind(obj);
+
+ drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
+ drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
+}
+
+int
+i915_gem_set_domain(struct drm_gem_object *obj,
+ struct drm_file *file_priv,
+ uint32_t read_domains,
+ uint32_t write_domain)
+{
+ struct drm_device *dev = obj->dev;
+ int ret;
+ uint32_t flush_domains;
+
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
+ if (ret)
+ return ret;
+ flush_domains = i915_gem_dev_set_domain(obj->dev);
+
+ if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
+ (void) i915_add_request(dev, flush_domains);
+
+ return 0;
+}
+
+/** Unbinds all objects that are on the given buffer list. */
+static int
+i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
+{
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ while (!list_empty(head)) {
+ obj_priv = list_first_entry(head,
+ struct drm_i915_gem_object,
+ list);
+ obj = obj_priv->obj;
+
+ if (obj_priv->pin_count != 0) {
+ DRM_ERROR("Pinned object in unbind list\n");
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ ret = i915_gem_object_unbind(obj);
+ if (ret != 0) {
+ DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
+ ret);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+ }
+
+
+ return 0;
+}
+
+static int
+i915_gem_idle(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t seqno, cur_seqno, last_seqno;
+ int stuck;
+
+ if (dev_priv->mm.suspended)
+ return 0;
+
+ /* Hack! Don't let anybody do execbuf while we don't control the chip.
+ * We need to replace this with a semaphore, or something.
+ */
+ dev_priv->mm.suspended = 1;
+
+ i915_kernel_lost_context(dev);
+
+ /* Flush the GPU along with all non-CPU write domains
+ */
+ i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
+ ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
+ seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU |
+ I915_GEM_DOMAIN_GTT));
+
+ if (seqno == 0) {
+ mutex_unlock(&dev->struct_mutex);
+ return -ENOMEM;
+ }
+
+ dev_priv->mm.waiting_gem_seqno = seqno;
+ last_seqno = 0;
+ stuck = 0;
+ for (;;) {
+ cur_seqno = i915_get_gem_seqno(dev);
+ if (i915_seqno_passed(cur_seqno, seqno))
+ break;
+ if (last_seqno == cur_seqno) {
+ if (stuck++ > 100) {
+ DRM_ERROR("hardware wedged\n");
+ dev_priv->mm.wedged = 1;
+ DRM_WAKEUP(&dev_priv->irq_queue);
+ break;
+ }
+ }
+ msleep(10);
+ last_seqno = cur_seqno;
+ }
+ dev_priv->mm.waiting_gem_seqno = 0;
+
+ i915_gem_retire_requests(dev);
+
+ /* Active and flushing should now be empty as we've
+ * waited for a sequence higher than any pending execbuffer
+ */
+ BUG_ON(!list_empty(&dev_priv->mm.active_list));
+ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+
+ /* Request should now be empty as we've also waited
+ * for the last request in the list
+ */
+ BUG_ON(!list_empty(&dev_priv->mm.request_list));
+
+ /* Move all buffers out of the GTT. */
+ i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
+
+ BUG_ON(!list_empty(&dev_priv->mm.active_list));
+ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+ BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
+ BUG_ON(!list_empty(&dev_priv->mm.request_list));
+ return 0;
+}
+
+static int
+i915_gem_init_hws(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ /* If we need a physical address for the status page, it's already
+ * initialized at driver load time.
+ */
+ if (!I915_NEED_GFX_HWS(dev))
+ return 0;
+
+ obj = drm_gem_object_alloc(dev, 4096);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate status page\n");
+ return -ENOMEM;
+ }
+ obj_priv = obj->driver_private;
+
+ ret = i915_gem_object_pin(obj, 4096);
+ if (ret != 0) {
+ drm_gem_object_unreference(obj);
+ return ret;
+ }
+
+ dev_priv->status_gfx_addr = obj_priv->gtt_offset;
+ dev_priv->hws_map.offset = dev->agp->base + obj_priv->gtt_offset;
+ dev_priv->hws_map.size = 4096;
+ dev_priv->hws_map.type = 0;
+ dev_priv->hws_map.flags = 0;
+ dev_priv->hws_map.mtrr = 0;
+
+ drm_core_ioremap(&dev_priv->hws_map, dev);
+ if (dev_priv->hws_map.handle == NULL) {
+ DRM_ERROR("Failed to map status page.\n");
+ memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+ drm_gem_object_unreference(obj);
+ return -EINVAL;
+ }
+ dev_priv->hws_obj = obj;
+ dev_priv->hw_status_page = dev_priv->hws_map.handle;
+ memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+ I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+ DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
+
+ return 0;
+}
+
+static int
+i915_gem_init_ringbuffer(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ ret = i915_gem_init_hws(dev);
+ if (ret != 0)
+ return ret;
+
+ obj = drm_gem_object_alloc(dev, 128 * 1024);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate ringbuffer\n");
+ return -ENOMEM;
+ }
+ obj_priv = obj->driver_private;
+
+ ret = i915_gem_object_pin(obj, 4096);
+ if (ret != 0) {
+ drm_gem_object_unreference(obj);
+ return ret;
+ }
+
+ /* Set up the kernel mapping for the ring. */
+ dev_priv->ring.Size = obj->size;
+ dev_priv->ring.tail_mask = obj->size - 1;
+
+ dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset;
+ dev_priv->ring.map.size = obj->size;
+ dev_priv->ring.map.type = 0;
+ dev_priv->ring.map.flags = 0;
+ dev_priv->ring.map.mtrr = 0;
+
+ drm_core_ioremap(&dev_priv->ring.map, dev);
+ if (dev_priv->ring.map.handle == NULL) {
+ DRM_ERROR("Failed to map ringbuffer.\n");
+ memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
+ drm_gem_object_unreference(obj);
+ return -EINVAL;
+ }
+ dev_priv->ring.ring_obj = obj;
+ dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+
+ /* Stop the ring if it's running. */
+ I915_WRITE(PRB0_CTL, 0);
+ I915_WRITE(PRB0_HEAD, 0);
+ I915_WRITE(PRB0_TAIL, 0);
+ I915_WRITE(PRB0_START, 0);
+
+ /* Initialize the ring. */
+ I915_WRITE(PRB0_START, obj_priv->gtt_offset);
+ I915_WRITE(PRB0_CTL,
+ ((obj->size - 4096) & RING_NR_PAGES) |
+ RING_NO_REPORT |
+ RING_VALID);
+
+ /* Update our cache of the ring state */
+ i915_kernel_lost_context(dev);
+
+ return 0;
+}
+
+static void
+i915_gem_cleanup_ringbuffer(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (dev_priv->ring.ring_obj == NULL)
+ return;
+
+ drm_core_ioremapfree(&dev_priv->ring.map, dev);
+
+ i915_gem_object_unpin(dev_priv->ring.ring_obj);
+ drm_gem_object_unreference(dev_priv->ring.ring_obj);
+ dev_priv->ring.ring_obj = NULL;
+ memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
+
+ if (dev_priv->hws_obj != NULL) {
+ i915_gem_object_unpin(dev_priv->hws_obj);
+ drm_gem_object_unreference(dev_priv->hws_obj);
+ dev_priv->hws_obj = NULL;
+ memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+
+ /* Write high address into HWS_PGA when disabling. */
+ I915_WRITE(HWS_PGA, 0x1ffff000);
+ }
+}
+
+int
+i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ if (dev_priv->mm.wedged) {
+ DRM_ERROR("Reenabling wedged hardware, good luck\n");
+ dev_priv->mm.wedged = 0;
+ }
+
+ ret = i915_gem_init_ringbuffer(dev);
+ if (ret != 0)
+ return ret;
+
+ mutex_lock(&dev->struct_mutex);
+ BUG_ON(!list_empty(&dev_priv->mm.active_list));
+ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+ BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
+ BUG_ON(!list_empty(&dev_priv->mm.request_list));
+ dev_priv->mm.suspended = 0;
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+int
+i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_gem_idle(dev);
+ if (ret == 0)
+ i915_gem_cleanup_ringbuffer(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+void
+i915_gem_lastclose(struct drm_device *dev)
+{
+ int ret;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (dev_priv->ring.ring_obj != NULL) {
+ ret = i915_gem_idle(dev);
+ if (ret)
+ DRM_ERROR("failed to idle hardware: %d\n", ret);
+
+ i915_gem_cleanup_ringbuffer(dev);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+}
+
+void i915_gem_load(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ INIT_LIST_HEAD(&dev_priv->mm.active_list);
+ INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
+ INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+ INIT_LIST_HEAD(&dev_priv->mm.request_list);
+ INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
+ i915_gem_retire_work_handler);
+ dev_priv->mm.next_gem_seqno = 1;
+
+ i915_gem_detect_bit_6_swizzle(dev);
+}
diff --git a/linux-core/i915_gem_debug.c b/linux-core/i915_gem_debug.c
new file mode 100644
index 00000000..a2d6f289
--- /dev/null
+++ b/linux-core/i915_gem_debug.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_compat.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+#if WATCH_INACTIVE
+void
+i915_verify_inactive(struct drm_device *dev, char *file, int line)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+ obj = obj_priv->obj;
+ if (obj_priv->pin_count || obj_priv->active ||
+ (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
+ I915_GEM_DOMAIN_GTT)))
+ DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n",
+ obj,
+ obj_priv->pin_count, obj_priv->active,
+ obj->write_domain, file, line);
+ }
+}
+#endif /* WATCH_INACTIVE */
+
+
+#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
+static void
+i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
+ uint32_t bias, uint32_t mark)
+{
+ uint32_t *mem = kmap_atomic(page, KM_USER0);
+ int i;
+ for (i = start; i < end; i += 4)
+ DRM_INFO("%08x: %08x%s\n",
+ (int) (bias + i), mem[i / 4],
+ (bias + i == mark) ? " ********" : "");
+ kunmap_atomic(mem, KM_USER0);
+ /* give syslog time to catch up */
+ msleep(1);
+}
+
+void
+i915_gem_dump_object(struct drm_gem_object *obj, int len,
+ const char *where, uint32_t mark)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int page;
+
+ DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
+ for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
+ int page_len, chunk, chunk_len;
+
+ page_len = len - page * PAGE_SIZE;
+ if (page_len > PAGE_SIZE)
+ page_len = PAGE_SIZE;
+
+ for (chunk = 0; chunk < page_len; chunk += 128) {
+ chunk_len = page_len - chunk;
+ if (chunk_len > 128)
+ chunk_len = 128;
+ i915_gem_dump_page(obj_priv->page_list[page],
+ chunk, chunk + chunk_len,
+ obj_priv->gtt_offset +
+ page * PAGE_SIZE,
+ mark);
+ }
+ }
+}
+#endif
+
+#if WATCH_LRU
+void
+i915_dump_lru(struct drm_device *dev, const char *where)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+
+ DRM_INFO("active list %s {\n", where);
+ list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
+ list)
+ {
+ DRM_INFO(" %p: %08x\n", obj_priv,
+ obj_priv->last_rendering_seqno);
+ }
+ DRM_INFO("}\n");
+ DRM_INFO("flushing list %s {\n", where);
+ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
+ list)
+ {
+ DRM_INFO(" %p: %08x\n", obj_priv,
+ obj_priv->last_rendering_seqno);
+ }
+ DRM_INFO("}\n");
+ DRM_INFO("inactive %s {\n", where);
+ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+ DRM_INFO(" %p: %08x\n", obj_priv,
+ obj_priv->last_rendering_seqno);
+ }
+ DRM_INFO("}\n");
+}
+#endif
+
+
+#if WATCH_COHERENCY
+void
+i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int page;
+ uint32_t *gtt_mapping;
+ uint32_t *backing_map = NULL;
+ int bad_count = 0;
+
+ DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n",
+ __func__, obj, obj_priv->gtt_offset, handle,
+ obj->size / 1024);
+
+ gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
+ obj->size);
+ if (gtt_mapping == NULL) {
+ DRM_ERROR("failed to map GTT space\n");
+ return;
+ }
+
+ for (page = 0; page < obj->size / PAGE_SIZE; page++) {
+ int i;
+
+ backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0);
+
+ if (backing_map == NULL) {
+ DRM_ERROR("failed to map backing page\n");
+ goto out;
+ }
+
+ for (i = 0; i < PAGE_SIZE / 4; i++) {
+ uint32_t cpuval = backing_map[i];
+ uint32_t gttval = readl(gtt_mapping +
+ page * 1024 + i);
+
+ if (cpuval != gttval) {
+ DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
+ "0x%08x vs 0x%08x\n",
+ (int)(obj_priv->gtt_offset +
+ page * PAGE_SIZE + i * 4),
+ cpuval, gttval);
+ if (bad_count++ >= 8) {
+ DRM_INFO("...\n");
+ goto out;
+ }
+ }
+ }
+ kunmap_atomic(backing_map, KM_USER0);
+ backing_map = NULL;
+ }
+
+ out:
+ if (backing_map != NULL)
+ kunmap_atomic(backing_map, KM_USER0);
+ iounmap(gtt_mapping);
+
+ /* give syslog time to catch up */
+ msleep(1);
+
+ /* Directly flush the object, since we just loaded values with the CPU
+ * from the backing pages and we don't want to disturb the cache
+ * management that we're trying to observe.
+ */
+
+ i915_gem_clflush_object(obj);
+}
+#endif
diff --git a/linux-core/i915_gem_proc.c b/linux-core/i915_gem_proc.c
new file mode 100644
index 00000000..132eb3d1
--- /dev/null
+++ b/linux-core/i915_gem_proc.c
@@ -0,0 +1,293 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_compat.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+static int i915_gem_active_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data)
+{
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+ int len = 0;
+
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
+ }
+
+ *start = &buf[offset];
+ *eof = 0;
+ DRM_PROC_PRINT("Active:\n");
+ list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
+ list)
+ {
+ struct drm_gem_object *obj = obj_priv->obj;
+ if (obj->name) {
+ DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
+ obj, obj->name,
+ obj->read_domains, obj->write_domain,
+ obj_priv->last_rendering_seqno);
+ } else {
+ DRM_PROC_PRINT(" %p: %08x %08x %d\n",
+ obj,
+ obj->read_domains, obj->write_domain,
+ obj_priv->last_rendering_seqno);
+ }
+ }
+ if (len > request + offset)
+ return request;
+ *eof = 1;
+ return len - offset;
+}
+
+static int i915_gem_flushing_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data)
+{
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+ int len = 0;
+
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
+ }
+
+ *start = &buf[offset];
+ *eof = 0;
+ DRM_PROC_PRINT("Flushing:\n");
+ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
+ list)
+ {
+ struct drm_gem_object *obj = obj_priv->obj;
+ if (obj->name) {
+ DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
+ obj, obj->name,
+ obj->read_domains, obj->write_domain,
+ obj_priv->last_rendering_seqno);
+ } else {
+ DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
+ obj->read_domains, obj->write_domain,
+ obj_priv->last_rendering_seqno);
+ }
+ }
+ if (len > request + offset)
+ return request;
+ *eof = 1;
+ return len - offset;
+}
+
+static int i915_gem_inactive_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data)
+{
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+ int len = 0;
+
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
+ }
+
+ *start = &buf[offset];
+ *eof = 0;
+ DRM_PROC_PRINT("Inactive:\n");
+ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list,
+ list)
+ {
+ struct drm_gem_object *obj = obj_priv->obj;
+ if (obj->name) {
+ DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
+ obj, obj->name,
+ obj->read_domains, obj->write_domain,
+ obj_priv->last_rendering_seqno);
+ } else {
+ DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
+ obj->read_domains, obj->write_domain,
+ obj_priv->last_rendering_seqno);
+ }
+ }
+ if (len > request + offset)
+ return request;
+ *eof = 1;
+ return len - offset;
+}
+
+static int i915_gem_request_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data)
+{
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_request *gem_request;
+ int len = 0;
+
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
+ }
+
+ *start = &buf[offset];
+ *eof = 0;
+ DRM_PROC_PRINT("Request:\n");
+ list_for_each_entry(gem_request, &dev_priv->mm.request_list,
+ list)
+ {
+ DRM_PROC_PRINT(" %d @ %d %08x\n",
+ gem_request->seqno,
+ (int) (jiffies - gem_request->emitted_jiffies),
+ gem_request->flush_domains);
+ }
+ if (len > request + offset)
+ return request;
+ *eof = 1;
+ return len - offset;
+}
+
+static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data)
+{
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int len = 0;
+
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
+ }
+
+ *start = &buf[offset];
+ *eof = 0;
+ DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev));
+ DRM_PROC_PRINT("Waiter sequence: %d\n",
+ dev_priv->mm.waiting_gem_seqno);
+ DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
+ if (len > request + offset)
+ return request;
+ *eof = 1;
+ return len - offset;
+}
+
+
+static int i915_interrupt_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data)
+{
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int len = 0;
+
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
+ }
+
+ *start = &buf[offset];
+ *eof = 0;
+ DRM_PROC_PRINT("Interrupt enable: %08x\n",
+ I915_READ(IER));
+ DRM_PROC_PRINT("Interrupt identity: %08x\n",
+ I915_READ(IIR));
+ DRM_PROC_PRINT("Interrupt mask: %08x\n",
+ I915_READ(IMR));
+ DRM_PROC_PRINT("Pipe A stat: %08x\n",
+ I915_READ(PIPEASTAT));
+ DRM_PROC_PRINT("Pipe B stat: %08x\n",
+ I915_READ(PIPEBSTAT));
+ DRM_PROC_PRINT("Interrupts received: %d\n",
+ atomic_read(&dev_priv->irq_received));
+ DRM_PROC_PRINT("Current sequence: %d\n",
+ i915_get_gem_seqno(dev));
+ DRM_PROC_PRINT("Waiter sequence: %d\n",
+ dev_priv->mm.waiting_gem_seqno);
+ DRM_PROC_PRINT("IRQ sequence: %d\n",
+ dev_priv->mm.irq_gem_seqno);
+ if (len > request + offset)
+ return request;
+ *eof = 1;
+ return len - offset;
+}
+
+static struct drm_proc_list {
+ /** file name */
+ const char *name;
+ /** proc callback*/
+ int (*f) (char *, char **, off_t, int, int *, void *);
+} i915_gem_proc_list[] = {
+ {"i915_gem_active", i915_gem_active_info},
+ {"i915_gem_flushing", i915_gem_flushing_info},
+ {"i915_gem_inactive", i915_gem_inactive_info},
+ {"i915_gem_request", i915_gem_request_info},
+ {"i915_gem_seqno", i915_gem_seqno_info},
+ {"i915_gem_interrupt", i915_interrupt_info},
+};
+
+#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list)
+
+int i915_gem_proc_init(struct drm_minor *minor)
+{
+ struct proc_dir_entry *ent;
+ int i, j;
+
+ for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) {
+ ent = create_proc_entry(i915_gem_proc_list[i].name,
+ S_IFREG | S_IRUGO, minor->dev_root);
+ if (!ent) {
+ DRM_ERROR("Cannot create /proc/dri/.../%s\n",
+ i915_gem_proc_list[i].name);
+ for (j = 0; j < i; j++)
+ remove_proc_entry(i915_gem_proc_list[i].name,
+ minor->dev_root);
+ return -1;
+ }
+ ent->read_proc = i915_gem_proc_list[i].f;
+ ent->data = minor;
+ }
+ return 0;
+}
+
+void i915_gem_proc_cleanup(struct drm_minor *minor)
+{
+ int i;
+
+ if (!minor->dev_root)
+ return;
+
+ for (i = 0; i < I915_GEM_PROC_ENTRIES; i++)
+ remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root);
+}
diff --git a/linux-core/i915_gem_tiling.c b/linux-core/i915_gem_tiling.c
new file mode 100644
index 00000000..a4ff736f
--- /dev/null
+++ b/linux-core/i915_gem_tiling.c
@@ -0,0 +1,309 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+/** @file i915_gem_tiling.c
+ *
+ * Support for managing tiling state of buffer objects.
+ *
+ * The idea behind tiling is to increase cache hit rates by rearranging
+ * pixel data so that a group of pixel accesses are in the same cacheline.
+ * Performance improvement from doing this on the back/depth buffer are on
+ * the order of 30%.
+ *
+ * Intel architectures make this somewhat more complicated, though, by
+ * adjustments made to addressing of data when the memory is in interleaved
+ * mode (matched pairs of DIMMS) to improve memory bandwidth.
+ * For interleaved memory, the CPU sends every sequential 64 bytes
+ * to an alternate memory channel so it can get the bandwidth from both.
+ *
+ * The GPU also rearranges its accesses for increased bandwidth to interleaved
+ * memory, and it matches what the CPU does for non-tiled. However, when tiled
+ * it does it a little differently, since one walks addresses not just in the
+ * X direction but also Y. So, along with alternating channels when bit
+ * 6 of the address flips, it also alternates when other bits flip -- Bits 9
+ * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
+ * are common to both the 915 and 965-class hardware.
+ *
+ * The CPU also sometimes XORs in higher bits as well, to improve
+ * bandwidth doing strided access like we do so frequently in graphics. This
+ * is called "Channel XOR Randomization" in the MCH documentation. The result
+ * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
+ * decode.
+ *
+ * All of this bit 6 XORing has an effect on our memory management,
+ * as we need to make sure that the 3d driver can correctly address object
+ * contents.
+ *
+ * If we don't have interleaved memory, all tiling is safe and no swizzling is
+ * required.
+ *
+ * When bit 17 is XORed in, we simply refuse to tile at all. Bit
+ * 17 is not just a page offset, so as we page an objet out and back in,
+ * individual pages in it will have different bit 17 addresses, resulting in
+ * each 64 bytes being swapped with its neighbor!
+ *
+ * Otherwise, if interleaved, we have to tell the 3d driver what the address
+ * swizzling it needs to do is, since it's writing with the CPU to the pages
+ * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
+ * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
+ * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
+ * to match what the GPU expects.
+ */
+
+/**
+ * Detects bit 6 swizzling of address lookup between IGD access and CPU
+ * access through main memory.
+ */
+void
+i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct pci_dev *bridge;
+ uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+ int mchbar_offset;
+ char __iomem *mchbar;
+ int ret;
+
+ bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
+ if (bridge == NULL) {
+ DRM_ERROR("Couldn't get bridge device\n");
+ return;
+ }
+
+ ret = pci_enable_device(bridge);
+ if (ret != 0) {
+ DRM_ERROR("pci_enable_device failed: %d\n", ret);
+ return;
+ }
+
+ if (IS_I965G(dev))
+ mchbar_offset = 0x48;
+ else
+ mchbar_offset = 0x44;
+
+ /* Use resource 2 for our BAR that's stashed in a nonstandard location,
+ * since the bridge would only ever use standard BARs 0-1 (though it
+ * doesn't anyway)
+ */
+ ret = pci_read_base(bridge, mchbar_offset, &bridge->resource[2]);
+ if (ret != 0) {
+ DRM_ERROR("pci_read_base failed: %d\n", ret);
+ return;
+ }
+
+ mchbar = ioremap(pci_resource_start(bridge, 2),
+ pci_resource_len(bridge, 2));
+ if (mchbar == NULL) {
+ DRM_ERROR("Couldn't map MCHBAR to determine tile swizzling\n");
+ return;
+ }
+
+ if (IS_I965G(dev) && !IS_I965GM(dev)) {
+ uint32_t chdecmisc;
+
+ /* On the 965, channel interleave appears to be determined by
+ * the flex bit. If flex is set, then the ranks (sides of a
+ * DIMM) of memory will be "stacked" (physical addresses walk
+ * through one rank then move on to the next, flipping channels
+ * or not depending on rank configuration). The GPU in this
+ * case does exactly the same addressing as the CPU.
+ *
+ * Unlike the 945, channel randomization based does not
+ * appear to be available.
+ *
+ * XXX: While the G965 doesn't appear to do any interleaving
+ * when the DIMMs are not exactly matched, the G4x chipsets
+ * might be for "L-shaped" configurations, and will need to be
+ * detected.
+ *
+ * L-shaped configuration:
+ *
+ * +-----+
+ * | |
+ * |DIMM2| <-- non-interleaved
+ * +-----+
+ * +-----+ +-----+
+ * | | | |
+ * |DIMM0| |DIMM1| <-- interleaved area
+ * +-----+ +-----+
+ */
+ chdecmisc = readb(mchbar + CHDECMISC);
+
+ if (chdecmisc == 0xff) {
+ DRM_ERROR("Couldn't read from MCHBAR. "
+ "Disabling tiling.\n");
+ } else if (chdecmisc & CHDECMISC_FLEXMEMORY) {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ } else {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ }
+ } else if (IS_I9XX(dev)) {
+ uint32_t dcc;
+
+ /* On 915-945 and GM965, channel interleave by the CPU is
+ * determined by DCC. The CPU will alternate based on bit 6
+ * in interleaved mode, and the GPU will then also alternate
+ * on bit 6, 9, and 10 for X, but the CPU may also optionally
+ * alternate based on bit 17 (XOR not disabled and XOR
+ * bit == 17).
+ */
+ dcc = readl(mchbar + DCC);
+ switch (dcc & DCC_ADDRESSING_MODE_MASK) {
+ case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
+ case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ break;
+ case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
+ if (IS_I915G(dev) || IS_I915GM(dev) ||
+ dcc & DCC_CHANNEL_XOR_DISABLE) {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else if (IS_I965GM(dev)) {
+ /* GM965 only does bit 11-based channel
+ * randomization
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
+ swizzle_y = I915_BIT_6_SWIZZLE_9_11;
+ } else {
+ /* Bit 17 or perhaps other swizzling */
+ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+ }
+ break;
+ }
+ if (dcc == 0xffffffff) {
+ DRM_ERROR("Couldn't read from MCHBAR. "
+ "Disabling tiling.\n");
+ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+ }
+ } else {
+ /* As far as we know, the 865 doesn't have these bit 6
+ * swizzling issues.
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ }
+
+ iounmap(mchbar);
+
+ dev_priv->mm.bit_6_swizzle_x = swizzle_x;
+ dev_priv->mm.bit_6_swizzle_y = swizzle_y;
+}
+
+/**
+ * Sets the tiling mode of an object, returning the required swizzling of
+ * bit 6 of addresses in the object.
+ */
+int
+i915_gem_set_tiling(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_set_tiling *args = data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EINVAL;
+ obj_priv = obj->driver_private;
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (args->tiling_mode == I915_TILING_NONE) {
+ obj_priv->tiling_mode = I915_TILING_NONE;
+ args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ } else {
+ if (args->tiling_mode == I915_TILING_X)
+ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
+ else
+ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+ /* If we can't handle the swizzling, make it untiled. */
+ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
+ args->tiling_mode = I915_TILING_NONE;
+ args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ }
+ }
+ obj_priv->tiling_mode = args->tiling_mode;
+
+ mutex_unlock(&dev->struct_mutex);
+
+ drm_gem_object_unreference(obj);
+
+ return 0;
+}
+
+/**
+ * Returns the current tiling mode and required bit 6 swizzling for the object.
+ */
+int
+i915_gem_get_tiling(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_get_tiling *args = data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EINVAL;
+ obj_priv = obj->driver_private;
+
+ mutex_lock(&dev->struct_mutex);
+
+ args->tiling_mode = obj_priv->tiling_mode;
+ switch (obj_priv->tiling_mode) {
+ case I915_TILING_X:
+ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
+ break;
+ case I915_TILING_Y:
+ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+ break;
+ case I915_TILING_NONE:
+ args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ break;
+ default:
+ DRM_ERROR("unknown tiling mode\n");
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+ drm_gem_object_unreference(obj);
+
+ return 0;
+}
diff --git a/linux-core/i915_opregion.c b/linux-core/i915_opregion.c
index fdd4987e..1fa599ea 100644
--- a/linux-core/i915_opregion.c
+++ b/linux-core/i915_opregion.c
@@ -250,19 +250,20 @@ void opregion_enable_asle(struct drm_device *dev)
struct opregion_asle *asle = dev_priv->opregion.asle;
if (asle) {
- u32 pipeb_stats = I915_READ(PIPEBSTAT);
if (IS_MOBILE(dev)) {
+ u32 pipeb_stats = I915_READ(PIPEBSTAT);
/* Some hardware uses the legacy backlight controller
to signal interrupts, so we need to set up pipe B
to generate an IRQ on writes */
- I915_WRITE(PIPEBSTAT, pipeb_stats |=
- I915_LEGACY_BLC_EVENT_ENABLE);
- dev_priv->irq_enable_reg |=
- (I915_ASLE_INTERRUPT
- | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
- } else
- dev_priv->irq_enable_reg |= I915_ASLE_INTERRUPT;
-
+ pipeb_stats |= I915_LEGACY_BLC_EVENT_ENABLE;
+ I915_WRITE(PIPEBSTAT, pipeb_stats);
+
+ dev_priv->irq_mask_reg &=
+ ~I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+ }
+
+ dev_priv->irq_mask_reg &= ~I915_ASLE_INTERRUPT;
+
asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
ASLE_PFMB_EN;
asle->ardy = 1;