diff options
Diffstat (limited to 'linux-core')
37 files changed, 4688 insertions, 296 deletions
diff --git a/linux-core/Makefile b/linux-core/Makefile index 9b288851..1790bdb0 100644 --- a/linux-core/Makefile +++ b/linux-core/Makefile @@ -117,7 +117,7 @@ V := $(shell if [ -f $(BOOTVERSION_PREFIX)version.h ]; then \ ifeq ($(V),"$(RUNNING_REL)") HEADERFROMBOOT := 1 -GETCONFIG := MAKEFILES=$(shell pwd)/.config +GETCONFIG := MAKEFILES=$(shell /bin/pwd)/.config HAVECONFIG := y endif @@ -164,7 +164,7 @@ endif all: modules modules: includes - +make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`pwd` DRMSRCDIR=`pwd` modules + +make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`/bin/pwd` DRMSRCDIR=`/bin/pwd` modules ifeq ($(HEADERFROMBOOT),1) @@ -240,7 +240,7 @@ drmstat: drmstat.c $(CC) $(PRGCFLAGS) $< -o $@ $(DRMSTATLIBS) install: - make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`pwd` DRMSRCDIR=`pwd` modules_install + make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`/bin/pwd` DRMSRCDIR=`/bin/pwd` modules_install else diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 246c0b35..768cd22a 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -12,16 +12,16 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \ drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \ drm_memory_debug.o ati_pcigart.o drm_sman.o \ - drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \ + drm_hashtab.o drm_memrange.o drm_object.o drm_compat.o \ drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_crtc.o \ drm_edid.o drm_modes.o drm_bo_lock.o drm_regman.o \ - drm_vm_nopage_compat.o drm_crtc_helper.o + drm_vm_nopage_compat.o drm_crtc_helper.o drm_gem.o tdfx-objs := tdfx_drv.o r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o i810-objs := i810_drv.o i810_dma.o i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \ - i915_buffer.o i915_execbuf.o \ + i915_buffer.o i915_execbuf.o i915_gem.o \ intel_display.o intel_crt.o intel_lvds.o intel_bios.o \ intel_sdvo.o intel_modes.o intel_i2c.o i915_init.o intel_fb.o \ intel_tv.o i915_compat.o intel_dvo.o dvo_ch7xxx.o \ diff --git a/linux-core/drm-gem.txt b/linux-core/drm-gem.txt new file mode 100644 index 00000000..5cda87f8 --- /dev/null +++ b/linux-core/drm-gem.txt @@ -0,0 +1,805 @@ + The Graphics Execution Manager + Part of the Direct Rendering Manager + ============================== + + Keith Packard <keithp@keithp.com> + Eric Anholt <eric@anholt.net> + 2008-5-9 + +Contents: + + 1. GEM Overview + 2. API overview and conventions + 3. Object Creation/Destruction + 4. Reading/writing contents + 5. Mapping objects to userspace + 6. Memory Domains + 7. Execution (Intel specific) + 8. Other misc Intel-specific functions + +1. Graphics Execution Manager Overview + +Gem is designed to manage graphics memory, control access to the graphics +device execution context and handle the essentially NUMA environment unique +to modern graphics hardware. Gem allows multiple applications to share +graphics device resources without the need to constantly reload the entire +graphics card. Data may be shared between multiple applications with gem +ensuring that the correct memory synchronization occurs. + +Graphics data can consume arbitrary amounts of memory, with 3D applications +constructing ever larger sets of textures and vertices. With graphics cards +memory space growing larger every year, and graphics APIs growing more +complex, we can no longer insist that each application save a complete copy +of their graphics state so that the card can be re-initialized from user +space at each context switch. Ensuring that graphics data remains persistent +across context switches allows applications significant new functionality +while also improving performance for existing APIs. + +Modern linux desktops include significant 3D rendering as a fundemental +component of the desktop image construction process. 2D and 3D applications +paint their content to offscreen storage and the central 'compositing +manager' constructs the final screen image from those window contents. This +means that pixel image data from these applications must move within reach +of the compositing manager and used as source operands for screen image +rendering operations. + +Gem provides simple mechanisms to manage graphics data and control execution +flow within the linux operating system. Using many existing kernel +subsystems, it does this with a modest amount of code. + +2. API Overview and Conventions + +All APIs here are defined in terms of ioctls appplied to the DRM file +descriptor. To create and manipulate objects, an application must be +'authorized' using the DRI or DRI2 protocols with the X server. To relax +that, we will need to implement some better access control mechanisms within +the hardware portion of the driver to prevent inappropriate +cross-application data access. + +Any DRM driver which does not support GEM will return -ENODEV for all of +these ioctls. Invalid object handles return -EINVAL. Invalid object names +return -ENOENT. Other errors are as documented in the specific API below. + +To avoid the need to translate ioctl contents on mixed-size systems (with +32-bit user space running on a 64-bit kernel), the ioctl data structures +contain explicitly sized objects, using 64-bits for all size and pointer +data and 32-bits for identifiers. In addition, the 64-bit objects are all +carefully aligned on 64-bit boundaries. Because of this, all pointers in the +ioctl data structures are passed as uint64_t values. Suitable casts will +be necessary. + +One significant operation which is explicitly left out of this API is object +locking. Applications are expected to perform locking of shared objects +outside of the GEM api. This kind of locking is not necessary to safely +manipulate the graphics engine, and with multiple objects interacting in +unknown ways, per-object locking would likely introduce all kinds of +lock-order issues. Punting this to the application seems like the only +sensible plan. Given that DRM already offers a global lock on the hardware, +this doesn't change the current situation. + +3. Object Creation and Destruction + +Gem provides explicit memory management primitives. System pages are +allocated when the object is created, either as the fundemental storage for +hardware where system memory is used by the graphics processor directly, or +as backing store for graphics-processor resident memory. + +Objects are referenced from user space using handles. These are, for all +intents and purposes, equivalent to file descriptors. We could simply use +file descriptors were it not for the small limit (1024) of file descriptors +available to applications, and for the fact that the X server (a rather +significant user of this API) uses 'select' and has a limited maximum file +descriptor for that operation. Given the ability to allocate more file +descriptors, and given the ability to place these 'higher' in the file +descriptor space, we'd love to simply use file descriptors. + +Objects may be published with a name so that other applications can access +them. The name remains valid as long as the object exists. Right now, our +DRI APIs use 32-bit integer names, so that's what we expose here + + A. Creation + + struct drm_gem_create { + /** + * Requested size for the object. + * + * The (page-aligned) allocated size for the object + * will be returned. + */ + uint64_t size; + /** + * Returned handle for the object. + * + * Object handles are nonzero. + */ + uint32_t handle; + uint32_t pad; + }; + + /* usage */ + create.size = 16384; + ret = ioctl (fd, DRM_IOCTL_GEM_CREATE, &create); + if (ret == 0) + return create.handle; + + Note that the size is rounded up to a page boundary, and that + the rounded-up size is returned in 'size'. No name is assigned to + this object, making it local to this process. + + If insufficient memory is availabe, -ENOMEM will be returned. + + B. Closing + + struct drm_gem_close { + /** Handle of the object to be closed. */ + uint32_t handle; + uint32_t pad; + }; + + + /* usage */ + close.handle = <handle>; + ret = ioctl (fd, DRM_IOCTL_GEM_CLOSE, &close); + + This call makes the specified handle invalid, and if no other + applications are using the object, any necessary graphics hardware + synchronization is performed and the resources used by the object + released. + + C. Naming + + struct drm_gem_flink { + /** Handle for the object being named */ + uint32_t handle; + + /** Returned global name */ + uint32_t name; + }; + + /* usage */ + flink.handle = <handle>; + ret = ioctl (fd, DRM_IOCTL_GEM_FLINK, &flink); + if (ret == 0) + return flink.name; + + Flink creates a name for the object and returns it to the + application. This name can be used by other applications to gain + access to the same object. + + D. Opening by name + + struct drm_gem_open { + /** Name of object being opened */ + uint32_t name; + + /** Returned handle for the object */ + uint32_t handle; + + /** Returned size of the object */ + uint64_t size; + }; + + /* usage */ + open.name = <name>; + ret = ioctl (fd, DRM_IOCTL_GEM_OPEN, &open); + if (ret == 0) { + *sizep = open.size; + return open.handle; + } + + Open accesses an existing object and returns a handle for it. If the + object doesn't exist, -ENOENT is returned. The size of the object is + also returned. This handle has all the same capabilities as the + handle used to create the object. In particular, the object is not + destroyed until all handles are closed. + +4. Basic read/write operations + +By default, gem objects are not mapped to the applications address space, +getting data in and out of them is done with I/O operations instead. This +allows the data to reside in otherwise unmapped pages, including pages in +video memory on an attached discrete graphics card. In addition, using +explicit I/O operations allows better control over cache contents, as +graphics devices are generally not cache coherent with the CPU, mapping +pages used for graphics into an application address space requires the use +of expensive cache flushing operations. Providing direct control over +graphics data access ensures that data are handled in the most efficient +possible fashion. + + A. Reading + + struct drm_gem_pread { + /** Handle for the object being read. */ + uint32_t handle; + uint32_t pad; + /** Offset into the object to read from */ + uint64_t offset; + /** Length of data to read */ + uint64_t size; + /** Pointer to write the data into. */ + uint64_t data_ptr; /* void * */ + }; + + This copies data into the specified object at the specified + position. Any necessary graphics device synchronization and + flushing will be done automatically. + + struct drm_gem_pwrite { + /** Handle for the object being written to. */ + uint32_t handle; + uint32_t pad; + /** Offset into the object to write to */ + uint64_t offset; + /** Length of data to write */ + uint64_t size; + /** Pointer to read the data from. */ + uint64_t data_ptr; /* void * */ + }; + + This copies data out of the specified object into the + waiting user memory. Again, device synchronization will + be handled by the kernel to ensure user space sees a + consistent view of the graphics device. + +5. Mapping objects to user space + +For most objects, reading/writing is the preferred interaction mode. +However, when the CPU is involved in rendering to cover deficiencies in +hardware support for particular operations, the CPU will want to directly +access the relevant objects. + +Because mmap is fairly heavyweight, we allow applications to retain maps to +objects persistently and then update how they're using the memory through a +separate interface. Applications which fail to use this separate interface +may exhibit unpredictable behaviour as memory consistency will not be +preserved. + + A. Mapping + + struct drm_gem_mmap { + /** Handle for the object being mapped. */ + uint32_t handle; + uint32_t pad; + /** Offset in the object to map. */ + uint64_t offset; + /** + * Length of data to map. + * + * The value will be page-aligned. + */ + uint64_t size; + /** Returned pointer the data was mapped at */ + uint64_t addr_ptr; /* void * */ + }; + + /* usage */ + mmap.handle = <handle>; + mmap.offset = <offset>; + mmap.size = <size>; + ret = ioctl (fd, DRM_IOCTL_GEM_MMAP, &mmap); + if (ret == 0) + return (void *) (uintptr_t) mmap.addr_ptr; + + + B. Unmapping + + munmap (addr, length); + + Nothing strange here, just use the normal munmap syscall. + +6. Memory Domains + +Graphics devices remain a strong bastion of non cache-coherent memory. As a +result, accessing data through one functional unit will end up loading that +cache with data which then needs to be manually synchronized when that data +is used with another functional unit. + +Tracking where data are resident is done by identifying how functional units +deal with caches. Each cache is labeled as a separate memory domain. Then, +each sequence of operations is expected to load data into various read +domains and leave data in at most one write domain. Gem tracks the read and +write memory domains of each object and performs the necessary +synchronization operations when objects move from one domain set to another. + +For example, if operation 'A' constructs an image that is immediately used +by operation 'B', then when the read domain for 'B' is not the same as the +write domain for 'A', then the write domain must be flushed, and the read +domain invalidated. If these two operations are both executed in the same +command queue, then the flush operation can go inbetween them in the same +queue, avoiding any kind of CPU-based synchronization and leaving the GPU to +do the work itself. + +6.1 Memory Domains (GPU-independent) + + * DRM_GEM_DOMAIN_CPU. + + Objects in this domain are using caches which are connected to the CPU. + Moving objects from non-CPU domains into the CPU domain can involve waiting + for the GPU to finish with operations using this object. Moving objects + from this domain to a GPU domain can involve flushing CPU caches and chipset + buffers. + +6.1 GPU-independent memory domain ioctl + +This ioctl is independent of the GPU in use. So far, no use other than +synchronizing objects to the CPU domain have been found; if that turns out +to be generally true, this ioctl may be simplified further. + + A. Explicit domain control + + struct drm_gem_set_domain { + /** Handle for the object */ + uint32_t handle; + + /** New read domains */ + uint32_t read_domains; + + /** New write domain */ + uint32_t write_domain; + }; + + /* usage */ + set_domain.handle = <handle>; + set_domain.read_domains = <read_domains>; + set_domain.write_domain = <write_domain>; + ret = ioctl (fd, DRM_IOCTL_GEM_SET_DOMAIN, &set_domain); + + When the application wants to explicitly manage memory domains for + an object, it can use this function. Usually, this is only used + when the application wants to synchronize object contents between + the GPU and CPU-based application rendering. In that case, + the <read_domains> would be set to DRM_GEM_DOMAIN_CPU, and if the + application were going to write to the object, the <write_domain> + would also be set to DRM_GEM_DOMAIN_CPU. After the call, gem + guarantees that all previous rendering operations involving this + object are complete. The application is then free to access the + object through the address returned by the mmap call. Afterwards, + when the application again uses the object through the GPU, any + necessary CPU flushing will occur and the object will be correctly + synchronized with the GPU. + + Note that this synchronization is not required for any accesses + going through the driver itself. The pread, pwrite and execbuffer + ioctls all perform the necessary domain management internally. + Explicit synchronization is only necessary when accessing the object + through the mmap'd address. + +7. Execution (Intel specific) + +Managing the command buffers is inherently chip-specific, so the core of gem +doesn't have any intrinsic functions. Rather, execution is left to the +device-specific portions of the driver. + +The Intel DRM_I915_GEM_EXECBUFFER ioctl takes a list of gem objects, all of +which are mapped to the graphics device. The last object in the list is the +command buffer. + +7.1. Relocations + +Command buffers often refer to other objects, and to allow the kernel driver +to move objects around, a sequence of relocations is associated with each +object. Device-specific relocation operations are used to place the +target-object relative value into the object. + +The Intel driver has a single relocation type: + + struct drm_i915_gem_relocation_entry { + /** + * Handle of the buffer being pointed to by this + * relocation entry. + * + * It's appealing to make this be an index into the + * mm_validate_entry list to refer to the buffer, + * but this allows the driver to create a relocation + * list for state buffers and not re-write it per + * exec using the buffer. + */ + uint32_t target_handle; + + /** + * Value to be added to the offset of the target + * buffer to make up the relocation entry. + */ + uint32_t delta; + + /** + * Offset in the buffer the relocation entry will be + * written into + */ + uint64_t offset; + + /** + * Offset value of the target buffer that the + * relocation entry was last written as. + * + * If the buffer has the same offset as last time, we + * can skip syncing and writing the relocation. This + * value is written back out by the execbuffer ioctl + * when the relocation is written. + */ + uint64_t presumed_offset; + + /** + * Target memory domains read by this operation. + */ + uint32_t read_domains; + + /* + * Target memory domains written by this operation. + * + * Note that only one domain may be written by the + * whole execbuffer operation, so that where there are + * conflicts, the application will get -EINVAL back. + */ + uint32_t write_domain; + }; + + 'target_handle', the handle to the target object. This object must + be one of the objects listed in the execbuffer request or + bad things will happen. The kernel doesn't check for this. + + 'offset' is where, in the source object, the relocation data + are written. Each relocation value is a 32-bit value consisting + of the location of the target object in the GPU memory space plus + the 'delta' value included in the relocation. + + 'presumed_offset' is where user-space believes the target object + lies in GPU memory space. If this value matches where the object + actually is, then no relocation data are written, the kernel + assumes that user space has set up data in the source object + using this presumption. This offers a fairly important optimization + as writing relocation data requires mapping of the source object + into the kernel memory space. + + 'read_domains' and 'write_domains' list the usage by the source + object of the target object. The kernel unions all of the domain + information from all relocations in the execbuffer request. No more + than one write_domain is allowed, otherwise an EINVAL error is + returned. read_domains must contain write_domain. This domain + information is used to synchronize buffer contents as described + above in the section on domains. + +7.1.1 Memory Domains (Intel specific) + +The Intel GPU has several internal caches which are not coherent and hence +require explicit synchronization. Memory domains provide the necessary data +to synchronize what is needed while leaving other cache contents intact. + + * DRM_GEM_DOMAIN_I915_RENDER. + The GPU 3D and 2D rendering operations use a unified rendering cache, so + operations doing 3D painting and 2D blts will use this domain + + * DRM_GEM_DOMAIN_I915_SAMPLER + Textures are loaded by the sampler through a separate cache, so + any texture reading will use this domain. Note that the sampler + and renderer use different caches, so moving an object from render target + to texture source will require a domain transfer. + + * DRM_GEM_DOMAIN_I915_COMMAND + The command buffer doesn't have an explicit cache (although it does + read ahead quite a bit), so this domain just indicates that the object + needs to be flushed to the GPU. + + * DRM_GEM_DOMAIN_I915_INSTRUCTION + All of the programs on Gen4 and later chips use an instruction cache to + speed program execution. It must be explicitly flushed when new programs + are written to memory by the CPU. + + * DRM_GEM_DOMAIN_I915_VERTEX + Vertex data uses two different vertex caches, but they're + both flushed with the same instruction. + +7.2 Execution object list (Intel specific) + + struct drm_i915_gem_exec_object { + /** + * User's handle for a buffer to be bound into the GTT + * for this operation. + */ + uint32_t handle; + + /** + * List of relocations to be performed on this buffer + */ + uint32_t relocation_count; + /* struct drm_i915_gem_relocation_entry *relocs */ + uint64_t relocs_ptr; + + /** + * Required alignment in graphics aperture + */ + uint64_t alignment; + + /** + * Returned value of the updated offset of the object, + * for future presumed_offset writes. + */ + uint64_t offset; + }; + + Each object involved in a particular execution operation must be + listed using one of these structures. + + 'handle' references the object. + + 'relocs_ptr' is a user-mode pointer to a array of 'relocation_count' + drm_i915_gem_relocation_entry structs (see above) that + define the relocations necessary in this buffer. Note that all + relocations must reference other exec_object structures in the same + execbuffer ioctl and that those other buffers must come earlier in + the exec_object array. In other words, the dependencies mapped by the + exec_object relocations must form a directed acyclic graph. + + 'alignment' is the byte alignment necessary for this buffer. Each + object has specific alignment requirements, as the kernel doesn't + know what each object is being used for, those requirements must be + provided by user mode. If an object is used in two different ways, + it's quite possible that the alignment requirements will differ. + + 'offset' is a return value, receiving the location of the object + during this execbuffer operation. The application should use this + as the presumed offset in future operations; if the object does not + move, then kernel need not write relocation data. + +7.3 Execbuffer ioctl (Intel specific) + + struct drm_i915_gem_execbuffer { + /** + * List of buffers to be validated with their + * relocations to be performend on them. + * + * These buffers must be listed in an order such that + * all relocations a buffer is performing refer to + * buffers that have already appeared in the validate + * list. + */ + /* struct drm_i915_gem_validate_entry *buffers */ + uint64_t buffers_ptr; + uint32_t buffer_count; + + /** + * Offset in the batchbuffer to start execution from. + */ + uint32_t batch_start_offset; + + /** + * Bytes used in batchbuffer from batch_start_offset + */ + uint32_t batch_len; + uint32_t DR1; + uint32_t DR4; + uint32_t num_cliprects; + uint64_t cliprects_ptr; /* struct drm_clip_rect *cliprects */ + }; + + + 'buffers_ptr' is a user-mode pointer to an array of 'buffer_count' + drm_i915_gem_exec_object structures which contains the complete set + of objects required for this execbuffer operation. The last entry in + this array, the 'batch buffer', is the buffer of commands which will + be linked to the ring and executed. + + 'batch_start_offset' is the byte offset within the batch buffer which + contains the first command to execute. So far, we haven't found a + reason to use anything other than '0' here, but the thought was that + some space might be allocated for additional initialization which + could be skipped in some cases. This must be a multiple of 4. + + 'batch_len' is the length, in bytes, of the data to be executed + (i.e., the amount of data after batch_start_offset). This must + be a multiple of 4. + + 'num_cliprects' and 'cliprects_ptr' reference an array of + drm_clip_rect structures that is num_cliprects long. The entire + batch buffer will be executed multiple times, once for each + rectangle in this list. If num_cliprects is 0, then no clipping + rectangle will be set. + + 'DR1' and 'DR4' are portions of the 3DSTATE_DRAWING_RECTANGLE + command which will be queued when this operation is clipped + (num_cliprects != 0). + + DR1 bit definition + 31 Fast Scissor Clip Disable (debug only). + Disables a hardware optimization that + improves performance. This should have + no visible effect, other than reducing + performance + + 30 Depth Buffer Coordinate Offset Disable. + This disables the addition of the + depth buffer offset bits which are used + to change the location of the depth buffer + relative to the front buffer. + + 27:26 X Dither Offset. Specifies the X pixel + offset to use when accessing the dither table + + 25:24 Y Dither Offset. Specifies the Y pixel + offset to use when accessing the dither + table. + + DR4 bit definition + 31:16 Drawing Rectangle Origin Y. Specifies the Y + origin of coordinates relative to the + draw buffer. + + 15:0 Drawing Rectangle Origin X. Specifies the X + origin of coordinates relative to the + draw buffer. + + As you can see, these two fields are necessary for correctly + offsetting drawing within a buffer which contains multiple surfaces. + Note that DR1 is only used on Gen3 and earlier hardware and that + newer hardware sticks the dither offset elsewhere. + +7.3.1 Detailed Execution Description + + Execution of a single batch buffer requires several preparatory + steps to make the objects visible to the graphics engine and resolve + relocations to account for their current addresses. + + A. Mapping and Relocation + + Each exec_object structure in the array is examined in turn. + + If the object is not already bound to the GTT, it is assigned a + location in the graphics address space. If no space is available in + the GTT, some other object will be evicted. This may require waiting + for previous execbuffer requests to complete before that object can + be unmapped. With the location assigned, the pages for the object + are pinned in memory using find_or_create_page and the GTT entries + updated to point at the relevant pages using drm_agp_bind_pages. + + Then the array of relocations is traversed. Each relocation record + looks up the target object and, if the presumed offset does not + match the current offset (remember that this buffer has already been + assigned an address as it must have been mapped earlier), the + relocation value is computed using the current offset. If the + object is currently in use by the graphics engine, writing the data + out must be preceeded by a delay while the object is still busy. + Once it is idle, then the page containing the relocation is mapped + by the CPU and the updated relocation data written out. + + The read_domains and write_domain entries in each relocation are + used to compute the new read_domains and write_domain values for the + target buffers. The actual execution of the domain changes must wait + until all of the exec_object entries have been evaluated as the + complete set of domain information will not be available until then. + + B. Memory Domain Resolution + + After all of the new memory domain data has been pulled out of the + relocations and computed for each object, the list of objects is + again traversed and the new memory domains compared against the + current memory domains. There are two basic operations involved here: + + * Flushing the current write domain. If the new read domains + are not equal to the current write domain, then the current + write domain must be flushed. Otherwise, reads will not see data + present in the write domain cache. In addition, any new read domains + other than the current write domain must be invalidated to ensure + that the flushed data are re-read into their caches. + + * Invaliding new read domains. Any domains which were not currently + used for this object must be invalidated as old objects which + were mapped at the same location may have stale data in the new + domain caches. + + If the CPU cache is being invalidated and some GPU cache is being + flushed, then we'll have to wait for rendering to complete so that + any pending GPU writes will be complete before we flush the GPU + cache. + + If the CPU cache is being flushed, then we use 'clflush' to get data + written from the CPU. + + Because the GPU caches cannot be partially flushed or invalidated, + we don't actually flush them during this traversal stage. Rather, we + gather the invalidate and flush bits up in the device structure. + + Once all of the object domain changes have been evaluated, then the + gathered invalidate and flush bits are examined. For any GPU flush + operations, we emit a single MI_FLUSH command that performs all of + the necessary flushes. We then look to see if the CPU cache was + flushed. If so, we use the chipset flush magic (writing to a special + page) to get the data out of the chipset and into memory. + + C. Queuing Batch Buffer to the Ring + + With all of the objects resident in graphics memory space, and all + of the caches prepared with appropriate data, the batch buffer + object can be queued to the ring. If there are clip rectangles, then + the buffer is queued once per rectangle, with suitable clipping + inserted into the ring just before the batch buffer. + + D. Creating an IRQ Cookie + + Right after the batch buffer is placed in the ring, a request to + generate an IRQ is added to the ring along with a command to write a + marker into memory. When the IRQ fires, the driver can look at the + memory location to see where in the ring the GPU has passed. This + magic cookie value is stored in each object used in this execbuffer + command; it is used whereever you saw 'wait for rendering' above in + this document. + + E. Writing back the new object offsets + + So that the application has a better idea what to use for + 'presumed_offset' values later, the current object offsets are + written back to the exec_object structures. + + +8. Other misc Intel-specific functions. + +To complete the driver, a few other functions were necessary. + +8.1 Initialization from the X server + +As the X server is currently responsible for apportioning memory between 2D +and 3D, it must tell the kernel which region of the GTT aperture is +available for 3D objects to be mapped into. + + struct drm_i915_gem_init { + /** + * Beginning offset in the GTT to be managed by the + * DRM memory manager. + */ + uint64_t gtt_start; + /** + * Ending offset in the GTT to be managed by the DRM + * memory manager. + */ + uint64_t gtt_end; + }; + /* usage */ + init.gtt_start = <gtt_start>; + init.gtt_end = <gtt_end>; + ret = ioctl (fd, DRM_IOCTL_I915_GEM_INIT, &init); + + The GTT aperture between gtt_start and gtt_end will be used to map + objects. This also tells the kernel that the ring can be used, + pulling the ring addresses from the device registers. + +8.2 Pinning objects in the GTT + +For scan-out buffers and the current shared depth and back buffers, we need +to have them always available in the GTT, at least for now. Pinning means to +lock their pages in memory along with keeping them at a fixed offset in the +graphics aperture. These operations are available only to root. + + struct drm_i915_gem_pin { + /** Handle of the buffer to be pinned. */ + uint32_t handle; + uint32_t pad; + + /** alignment required within the aperture */ + uint64_t alignment; + + /** Returned GTT offset of the buffer. */ + uint64_t offset; + }; + + /* usage */ + pin.handle = <handle>; + pin.alignment = <alignment>; + ret = ioctl (fd, DRM_IOCTL_I915_GEM_PIN, &pin); + if (ret == 0) + return pin.offset; + + Pinning an object ensures that it will not be evicted from the GTT + or moved. It will stay resident until destroyed or unpinned. + + struct drm_i915_gem_unpin { + /** Handle of the buffer to be unpinned. */ + uint32_t handle; + uint32_t pad; + }; + + /* usage */ + unpin.handle = <handle>; + ret = ioctl (fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin); + + Unpinning an object makes it possible to evict this object from the + GTT. It doesn't ensure that it will be evicted, just that it may. + diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 5b2d7829..708b4fa0 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -54,6 +54,8 @@ #include <linux/smp_lock.h> /* For (un)lock_kernel */ #include <linux/dma-mapping.h> #include <linux/mm.h> +#include <linux/swap.h> +#include <linux/kref.h> #include <linux/pagemap.h> #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) #include <linux/mutex.h> @@ -89,6 +91,10 @@ struct drm_device; struct drm_file; +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) +typedef unsigned long uintptr_t; +#endif + /* If you want the memory alloc debug functionality, change define below */ /* #define DEBUG_MEMORY */ @@ -108,7 +114,7 @@ struct drm_file; #define DRIVER_DMA_QUEUE 0x100 #define DRIVER_FB_DMA 0x200 #define DRIVER_MODESET 0x400 - +#define DRIVER_GEM 0x800 /*@}*/ @@ -427,6 +433,11 @@ struct drm_file { struct list_head refd_objects; + /** Mapping of mm object handles to object pointers. */ + struct idr object_idr; + /** Lock for synchronization of access to object_idr. */ + spinlock_t table_lock; + struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES]; struct file *filp; void *driver_priv; @@ -469,6 +480,11 @@ struct drm_lock_data { uint32_t kernel_waiters; uint32_t user_waiters; int idle_has_lock; + /** + * Boolean signaling that the lock is held on behalf of the + * file_priv client by the kernel in an ioctl handler. + */ + int kernel_held; }; /** @@ -544,17 +560,17 @@ struct drm_sigdata { * Generic memory manager structs */ -struct drm_mm_node { +struct drm_memrange_node { struct list_head fl_entry; struct list_head ml_entry; int free; unsigned long start; unsigned long size; - struct drm_mm *mm; + struct drm_memrange *mm; void *private; }; -struct drm_mm { +struct drm_memrange { struct list_head fl_entry; struct list_head ml_entry; }; @@ -568,9 +584,9 @@ struct drm_map_list { struct drm_hash_item hash; struct drm_map *map; /**< mapping */ uint64_t user_token; - struct drm_mm_node *file_offset_node; struct drm_master *master; /** if this map is associated with a specific master */ + struct drm_memrange_node *file_offset_node; }; typedef struct drm_map drm_local_map_t; @@ -618,6 +634,56 @@ struct drm_ati_pcigart_info { int table_size; }; +/** + * This structure defines the drm_mm memory object, which will be used by the + * DRM for its buffer objects. + */ +struct drm_gem_object { + /** Reference count of this object */ + struct kref refcount; + + /** Handle count of this object. Each handle also holds a reference */ + struct kref handlecount; + + /** Related drm device */ + struct drm_device *dev; + + /** File representing the shmem storage */ + struct file *filp; + + /** + * Size of the object, in bytes. Immutable over the object's + * lifetime. + */ + size_t size; + + /** + * Global name for this object, starts at 1. 0 means unnamed. + * Access is covered by the object_name_lock in the related drm_device + */ + int name; + + /** + * Memory domains. These monitor which caches contain read/write data + * related to the object. When transitioning from one set of domains + * to another, the driver is called to ensure that caches are suitably + * flushed and invalidated + */ + uint32_t read_domains; + uint32_t write_domain; + + /** + * While validating an exec operation, the + * new read/write domain values are computed here. + * They will be transferred to the above values + * at the point that any cache flushing occurs + */ + uint32_t pending_read_domains; + uint32_t pending_write_domain; + + void *driver_private; +}; + #include "drm_objects.h" #include "drm_crtc.h" @@ -746,6 +812,18 @@ struct drm_driver { int (*master_create)(struct drm_device *dev, struct drm_master *master); void (*master_destroy)(struct drm_device *dev, struct drm_master *master); + int (*proc_init)(struct drm_minor *minor); + void (*proc_cleanup)(struct drm_minor *minor); + + /** + * Driver-specific constructor for drm_gem_objects, to set up + * obj->driver_private. + * + * Returns 0 on success. + */ + int (*gem_init_object) (struct drm_gem_object *obj); + void (*gem_free_object) (struct drm_gem_object *obj); + struct drm_fence_driver *fence_driver; struct drm_bo_driver *bo_driver; @@ -827,7 +905,7 @@ struct drm_device { struct list_head maplist; /**< Linked list of regions */ int map_count; /**< Number of mappable regions */ struct drm_open_hash map_hash; /**< User token hash table for maps */ - struct drm_mm offset_manager; /**< User token manager */ + struct drm_memrange offset_manager; /**< User token manager */ struct drm_open_hash object_hash; /**< User token hash table for objects */ struct address_space *dev_mapping; /**< For unmap_mapping_range() */ struct page *ttm_dummy_page; @@ -943,6 +1021,21 @@ struct drm_device { /* DRM mode setting */ struct drm_mode_config mode_config; + + /** \name GEM information */ + /*@{ */ + spinlock_t object_name_lock; + struct idr object_name_idr; + atomic_t object_count; + atomic_t object_memory; + atomic_t pin_count; + atomic_t pin_memory; + atomic_t gtt_count; + atomic_t gtt_memory; + uint32_t gtt_total; + uint32_t invalidate_domains; /* domains pending invalidation */ + uint32_t flush_domains; /* domains pending flush */ + /*@} */ }; #if __OS_HAS_AGP @@ -1069,6 +1162,10 @@ extern void drm_free_pages(unsigned long address, int order, int area); extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type); extern int drm_free_agp(DRM_AGP_MEM * handle, int pages); extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start); +extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev, + struct page **pages, + unsigned long num_pages, + uint32_t gtt_offset); extern int drm_unbind_agp(DRM_AGP_MEM * handle); extern void drm_free_memctl(size_t size); @@ -1273,7 +1370,7 @@ extern void drm_put_master(struct drm_master *master); extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver *driver); extern int drm_put_dev(struct drm_device *dev); -extern int drm_put_minor(struct drm_minor **minor); +extern int drm_put_minor(struct drm_device *dev, struct drm_minor **p); extern unsigned int drm_debug; /* 1 to enable debug output */ extern struct class *drm_class; @@ -1317,27 +1414,94 @@ extern int drm_sysfs_connector_add(struct drm_connector *connector); extern void drm_sysfs_connector_remove(struct drm_connector *connector); /* - * Basic memory manager support (drm_mm.c) + * Basic memory manager support (drm_memrange.c) */ -extern struct drm_mm_node * drm_mm_get_block(struct drm_mm_node * parent, unsigned long size, - unsigned alignment); -extern void drm_mm_put_block(struct drm_mm_node *cur); -extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size, - unsigned alignment, int best_match); -extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size); -extern void drm_mm_takedown(struct drm_mm *mm); -extern int drm_mm_clean(struct drm_mm *mm); -extern unsigned long drm_mm_tail_space(struct drm_mm *mm); -extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size); -extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size); -extern void drm_mm_print(struct drm_mm *mm, const char *name); - -static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block) +extern struct drm_memrange_node *drm_memrange_get_block(struct drm_memrange_node * parent, + unsigned long size, + unsigned alignment); +extern void drm_memrange_put_block(struct drm_memrange_node *cur); +extern struct drm_memrange_node *drm_memrange_search_free(const struct drm_memrange *mm, + unsigned long size, + unsigned alignment, int best_match); +extern int drm_memrange_init(struct drm_memrange *mm, + unsigned long start, unsigned long size); +extern void drm_memrange_takedown(struct drm_memrange *mm); +extern int drm_memrange_clean(struct drm_memrange *mm); +extern unsigned long drm_memrange_tail_space(struct drm_memrange *mm); +extern int drm_memrange_remove_space_from_tail(struct drm_memrange *mm, + unsigned long size); +extern int drm_memrange_add_space_to_tail(struct drm_memrange *mm, + unsigned long size); +static inline struct drm_memrange *drm_get_mm(struct drm_memrange_node *block) { return block->mm; } +/* Graphics Execution Manager library functions (drm_gem.c) */ +int +drm_gem_init (struct drm_device *dev); + +void +drm_gem_object_free (struct kref *kref); + +struct drm_gem_object * +drm_gem_object_alloc(struct drm_device *dev, size_t size); + +void +drm_gem_object_handle_free (struct kref *kref); + +static inline void drm_gem_object_reference(struct drm_gem_object *obj) +{ + kref_get(&obj->refcount); +} + +static inline void drm_gem_object_unreference(struct drm_gem_object *obj) +{ + if (obj == NULL) + return; + + kref_put (&obj->refcount, drm_gem_object_free); +} + +int +drm_gem_handle_create(struct drm_file *file_priv, + struct drm_gem_object *obj, + int *handlep); + +static inline void drm_gem_object_handle_reference (struct drm_gem_object *obj) +{ + drm_gem_object_reference (obj); + kref_get(&obj->handlecount); +} + +static inline void drm_gem_object_handle_unreference (struct drm_gem_object *obj) +{ + if (obj == NULL) + return; + + /* + * Must bump handle count first as this may be the last + * ref, in which case the object would disappear before we + * checked for a name + */ + kref_put (&obj->handlecount, drm_gem_object_handle_free); + drm_gem_object_unreference (obj); +} + +struct drm_gem_object * +drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, + int handle); +int drm_gem_close_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_gem_flink_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_gem_open_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +void drm_gem_open(struct drm_device *dev, struct drm_file *file_private); +void drm_gem_release(struct drm_device *dev, struct drm_file *file_private); + extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev); extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev); diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c index 0aa94a75..3cc94ff7 100644 --- a/linux-core/drm_agpsupport.c +++ b/linux-core/drm_agpsupport.c @@ -484,7 +484,50 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle) return agp_unbind_memory(handle); } +/** + * Binds a collection of pages into AGP memory at the given offset, returning + * the AGP memory structure containing them. + * + * No reference is held on the pages during this time -- it is up to the + * caller to handle that. + */ +DRM_AGP_MEM * +drm_agp_bind_pages(struct drm_device *dev, + struct page **pages, + unsigned long num_pages, + uint32_t gtt_offset) +{ + DRM_AGP_MEM *mem; + int ret, i; + DRM_DEBUG("drm_agp_populate_ttm\n"); +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) + mem = drm_agp_allocate_memory(num_pages, AGP_USER_MEMORY); +#else + mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages, + AGP_USER_MEMORY); +#endif + if (mem == NULL) { + DRM_ERROR("Failed to allocate memory for %ld pages\n", + num_pages); + return NULL; + } + + for (i = 0; i < num_pages; i++) + mem->memory[i] = phys_to_gart(page_to_phys(pages[i])); + mem->page_count = num_pages; + + mem->is_flushed = true; + ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE); + if (ret != 0) { + DRM_ERROR("Failed to bind AGP memory: %d\n", ret); + agp_free_memory(mem); + return NULL; + } + + return mem; +} +EXPORT_SYMBOL(drm_agp_bind_pages); /* * AGP ttm backend interface. @@ -554,7 +597,7 @@ static int drm_agp_bind_ttm(struct drm_ttm_backend *backend, int snooped = (bo_mem->flags & DRM_BO_FLAG_CACHED) && !(bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED); DRM_DEBUG("drm_agp_bind_ttm\n"); - mem->is_flushed = TRUE; + mem->is_flushed = true; mem->type = AGP_USER_MEMORY; /* CACHED MAPPED implies not snooped memory */ if (snooped) @@ -653,7 +696,7 @@ struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev) agp_be->mem = NULL; agp_be->bridge = dev->agp->bridge; - agp_be->populated = FALSE; + agp_be->populated = false; agp_be->backend.func = &agp_ttm_backend; agp_be->backend.dev = dev; diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index f2d3cebf..2e0d1243 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -418,14 +418,14 @@ static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all) if (!bo->fence) { list_del_init(&bo->lru); if (bo->mem.mm_node) { - drm_mm_put_block(bo->mem.mm_node); + drm_memrange_put_block(bo->mem.mm_node); if (bo->pinned_node == bo->mem.mm_node) bo->pinned_node = NULL; bo->mem.mm_node = NULL; } list_del_init(&bo->pinned_lru); if (bo->pinned_node) { - drm_mm_put_block(bo->pinned_node); + drm_memrange_put_block(bo->pinned_node); bo->pinned_node = NULL; } list_del_init(&bo->ddestroy); @@ -790,7 +790,7 @@ out: mutex_lock(&dev->struct_mutex); if (evict_mem.mm_node) { if (evict_mem.mm_node != bo->pinned_node) - drm_mm_put_block(evict_mem.mm_node); + drm_memrange_put_block(evict_mem.mm_node); evict_mem.mm_node = NULL; } drm_bo_add_to_lru(bo); @@ -809,7 +809,7 @@ static int drm_bo_mem_force_space(struct drm_device *dev, struct drm_bo_mem_reg *mem, uint32_t mem_type, int no_wait) { - struct drm_mm_node *node; + struct drm_memrange_node *node; struct drm_buffer_manager *bm = &dev->bm; struct drm_buffer_object *entry; struct drm_mem_type_manager *man = &bm->man[mem_type]; @@ -819,7 +819,7 @@ static int drm_bo_mem_force_space(struct drm_device *dev, mutex_lock(&dev->struct_mutex); do { - node = drm_mm_search_free(&man->manager, num_pages, + node = drm_memrange_search_free(&man->manager, num_pages, mem->page_alignment, 1); if (node) break; @@ -845,7 +845,7 @@ static int drm_bo_mem_force_space(struct drm_device *dev, return -ENOMEM; } - node = drm_mm_get_block(node, num_pages, mem->page_alignment); + node = drm_memrange_get_block(node, num_pages, mem->page_alignment); if (unlikely(!node)) { mutex_unlock(&dev->struct_mutex); return -ENOMEM; @@ -923,7 +923,7 @@ int drm_bo_mem_space(struct drm_buffer_object *bo, int type_found = 0; int type_ok = 0; int has_eagain = 0; - struct drm_mm_node *node = NULL; + struct drm_memrange_node *node = NULL; int ret; mem->mm_node = NULL; @@ -951,10 +951,10 @@ int drm_bo_mem_space(struct drm_buffer_object *bo, mutex_lock(&dev->struct_mutex); if (man->has_type && man->use_type) { type_found = 1; - node = drm_mm_search_free(&man->manager, mem->num_pages, + node = drm_memrange_search_free(&man->manager, mem->num_pages, mem->page_alignment, 1); if (node) - node = drm_mm_get_block(node, mem->num_pages, + node = drm_memrange_get_block(node, mem->num_pages, mem->page_alignment); } mutex_unlock(&dev->struct_mutex); @@ -1339,7 +1339,7 @@ out_unlock: if (ret || !move_unfenced) { if (mem.mm_node) { if (mem.mm_node != bo->pinned_node) - drm_mm_put_block(mem.mm_node); + drm_memrange_put_block(mem.mm_node); mem.mm_node = NULL; } drm_bo_add_to_lru(bo); @@ -1431,7 +1431,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo, if (bo->pinned_node != bo->mem.mm_node) { if (bo->pinned_node != NULL) - drm_mm_put_block(bo->pinned_node); + drm_memrange_put_block(bo->pinned_node); bo->pinned_node = bo->mem.mm_node; } @@ -1442,7 +1442,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo, mutex_lock(&dev->struct_mutex); if (bo->pinned_node != bo->mem.mm_node) - drm_mm_put_block(bo->pinned_node); + drm_memrange_put_block(bo->pinned_node); list_del_init(&bo->pinned_lru); bo->pinned_node = NULL; @@ -2081,7 +2081,7 @@ static int drm_bo_leave_list(struct drm_buffer_object *bo, if (bo->pinned_node == bo->mem.mm_node) bo->pinned_node = NULL; if (bo->pinned_node != NULL) { - drm_mm_put_block(bo->pinned_node); + drm_memrange_put_block(bo->pinned_node); bo->pinned_node = NULL; } mutex_unlock(&dev->struct_mutex); @@ -2222,8 +2222,8 @@ int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_clean) drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0); drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1); - if (drm_mm_clean(&man->manager)) { - drm_mm_takedown(&man->manager); + if (drm_memrange_clean(&man->manager)) { + drm_memrange_takedown(&man->manager); } else { ret = -EBUSY; } @@ -2294,7 +2294,7 @@ int drm_bo_init_mm(struct drm_device *dev, unsigned type, DRM_ERROR("Zero size memory manager type %d\n", type); return ret; } - ret = drm_mm_init(&man->manager, p_offset, p_size); + ret = drm_memrange_init(&man->manager, p_offset, p_size); if (ret) return ret; } @@ -2721,7 +2721,7 @@ static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo) list->user_token = 0; } if (list->file_offset_node) { - drm_mm_put_block(list->file_offset_node); + drm_memrange_put_block(list->file_offset_node); list->file_offset_node = NULL; } @@ -2764,7 +2764,7 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo) atomic_inc(&bo->usage); map->handle = (void *)bo; - list->file_offset_node = drm_mm_search_free(&dev->offset_manager, + list->file_offset_node = drm_memrange_search_free(&dev->offset_manager, bo->mem.num_pages, 0, 0); if (unlikely(!list->file_offset_node)) { @@ -2772,7 +2772,7 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo) return -ENOMEM; } - list->file_offset_node = drm_mm_get_block(list->file_offset_node, + list->file_offset_node = drm_memrange_get_block(list->file_offset_node, bo->mem.num_pages, 0); if (unlikely(!list->file_offset_node)) { diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 5c290af2..9147a475 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -41,7 +41,7 @@ static void drm_bo_free_old_node(struct drm_buffer_object *bo) if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) { mutex_lock(&bo->dev->struct_mutex); - drm_mm_put_block(old_mem->mm_node); + drm_memrange_put_block(old_mem->mm_node); mutex_unlock(&bo->dev->struct_mutex); } old_mem->mm_node = NULL; diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h index f35821bd..cfa4fc6d 100644 --- a/linux-core/drm_compat.h +++ b/linux-core/drm_compat.h @@ -353,7 +353,7 @@ static inline int kobject_uevent_env(struct kobject *kobj, #endif -#if (defined(CONFIG_X86) && defined(CONFIG_X86_32) && defined(CONFIG_HIMEM)) +#if (defined(CONFIG_X86) && defined(CONFIG_X86_32) && defined(CONFIG_HIGHMEM)) #define DRM_KMAP_ATOMIC_PROT_PFN extern void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t protection); diff --git a/linux-core/drm_crtc_helper.c b/linux-core/drm_crtc_helper.c index ec76aa93..6f16dad6 100644 --- a/linux-core/drm_crtc_helper.c +++ b/linux-core/drm_crtc_helper.c @@ -95,7 +95,7 @@ void drm_helper_probe_single_connector_modes(struct drm_connector *connector, ui } - drm_mode_prune_invalid(dev, &connector->modes, TRUE); + drm_mode_prune_invalid(dev, &connector->modes, true); if (list_empty(&connector->modes)) { struct drm_display_mode *stdmode; diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 4e7c531a..9113fa54 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -175,6 +175,10 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_BO_VERSION, drm_bo_version_ioctl, 0), DRM_IOCTL_DEF(DRM_IOCTL_MM_INFO, drm_mm_info_ioctl, 0), + + DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0), + DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH), }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) @@ -424,12 +428,13 @@ static void drm_cleanup(struct drm_device * dev) drm_ctxbitmap_cleanup(dev); drm_ht_remove(&dev->map_hash); - drm_mm_takedown(&dev->offset_manager); + drm_memrange_takedown(&dev->offset_manager); drm_ht_remove(&dev->object_hash); - drm_put_minor(&dev->primary); + drm_put_minor(dev, &dev->primary); if (drm_core_check_feature(dev, DRIVER_MODESET)) - drm_put_minor(&dev->control); + drm_put_minor(dev, &dev->control); + if (drm_put_dev(dev)) DRM_ERROR("Cannot unload module\n"); } diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c index 03881ee6..3b3a0a3c 100644 --- a/linux-core/drm_fops.c +++ b/linux-core/drm_fops.c @@ -262,6 +262,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp, goto out_free; } + if (dev->driver->driver_features & DRIVER_GEM) + drm_gem_open(dev, priv); + if (dev->driver->open) { ret = dev->driver->open(dev, priv); if (ret < 0) @@ -462,6 +465,9 @@ int drm_release(struct inode *inode, struct file *filp) } } + if (dev->driver->driver_features & DRIVER_GEM) + drm_gem_release(dev, file_priv); + drm_fasync(-1, filp, 0); mutex_lock(&dev->ctxlist_mutex); diff --git a/linux-core/drm_gem.c b/linux-core/drm_gem.c new file mode 100644 index 00000000..434155b3 --- /dev/null +++ b/linux-core/drm_gem.c @@ -0,0 +1,420 @@ +/* + * Copyright © 2008 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt <eric@anholt.net> + * + */ + +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/uaccess.h> +#include <linux/fs.h> +#include <linux/file.h> +#include <linux/module.h> +#include <linux/mman.h> +#include <linux/pagemap.h> +#include "drmP.h" + +/** @file drm_gem.c + * + * This file provides some of the base ioctls and library routines for + * the graphics memory manager implemented by each device driver. + * + * Because various devices have different requirements in terms of + * synchronization and migration strategies, implementing that is left up to + * the driver, and all that the general API provides should be generic -- + * allocating objects, reading/writing data with the cpu, freeing objects. + * Even there, platform-dependent optimizations for reading/writing data with + * the CPU mean we'll likely hook those out to driver-specific calls. However, + * the DRI2 implementation wants to have at least allocate/mmap be generic. + * + * The goal was to have swap-backed object allocation managed through + * struct file. However, file descriptors as handles to a struct file have + * two major failings: + * - Process limits prevent more than 1024 or so being used at a time by + * default. + * - Inability to allocate high fds will aggravate the X Server's select() + * handling, and likely that of many GL client applications as well. + * + * This led to a plan of using our own integer IDs (called handles, following + * DRM terminology) to mimic fds, and implement the fd syscalls we need as + * ioctls. The objects themselves will still include the struct file so + * that we can transition to fds if the required kernel infrastructure shows + * up at a later date, and as our interface with shmfs for memory allocation. + */ + +/** + * Initialize the GEM device fields + */ + +int +drm_gem_init(struct drm_device *dev) +{ + spin_lock_init(&dev->object_name_lock); + idr_init(&dev->object_name_idr); + atomic_set(&dev->object_count, 0); + atomic_set(&dev->object_memory, 0); + atomic_set(&dev->pin_count, 0); + atomic_set(&dev->pin_memory, 0); + atomic_set(&dev->gtt_count, 0); + atomic_set(&dev->gtt_memory, 0); + return 0; +} + +/** + * Allocate a GEM object of the specified size with shmfs backing store + */ +struct drm_gem_object * +drm_gem_object_alloc(struct drm_device *dev, size_t size) +{ + struct drm_gem_object *obj; + + BUG_ON((size & (PAGE_SIZE - 1)) != 0); + + obj = kcalloc(1, sizeof(*obj), GFP_KERNEL); + + obj->dev = dev; + obj->filp = shmem_file_setup("drm mm object", size, 0); + if (IS_ERR(obj->filp)) { + kfree(obj); + return NULL; + } + + kref_init(&obj->refcount); + kref_init(&obj->handlecount); + obj->size = size; + if (dev->driver->gem_init_object != NULL && + dev->driver->gem_init_object(obj) != 0) { + fput(obj->filp); + kfree(obj); + return NULL; + } + atomic_inc(&dev->object_count); + atomic_add(obj->size, &dev->object_memory); + return obj; +} +EXPORT_SYMBOL(drm_gem_object_alloc); + +/** + * Removes the mapping from handle to filp for this object. + */ +static int +drm_gem_handle_delete(struct drm_file *filp, int handle) +{ + struct drm_device *dev; + struct drm_gem_object *obj; + + /* This is gross. The idr system doesn't let us try a delete and + * return an error code. It just spews if you fail at deleting. + * So, we have to grab a lock around finding the object and then + * doing the delete on it and dropping the refcount, or the user + * could race us to double-decrement the refcount and cause a + * use-after-free later. Given the frequency of our handle lookups, + * we may want to use ida for number allocation and a hash table + * for the pointers, anyway. + */ + spin_lock(&filp->table_lock); + + /* Check if we currently have a reference on the object */ + obj = idr_find(&filp->object_idr, handle); + if (obj == NULL) { + spin_unlock(&filp->table_lock); + return -EINVAL; + } + dev = obj->dev; + + /* Release reference and decrement refcount. */ + idr_remove(&filp->object_idr, handle); + spin_unlock(&filp->table_lock); + + mutex_lock(&dev->struct_mutex); + drm_gem_object_handle_unreference(obj); + mutex_unlock(&dev->struct_mutex); + + return 0; +} + +/** + * Create a handle for this object. This adds a handle reference + * to the object, which includes a regular reference count. Callers + * will likely want to dereference the object afterwards. + */ +int +drm_gem_handle_create(struct drm_file *file_priv, + struct drm_gem_object *obj, + int *handlep) +{ + int ret; + + /* + * Get the user-visible handle using idr. + */ +again: + /* ensure there is space available to allocate a handle */ + if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0) + return -ENOMEM; + + /* do the allocation under our spinlock */ + spin_lock(&file_priv->table_lock); + ret = idr_get_new_above(&file_priv->object_idr, obj, 1, handlep); + spin_unlock(&file_priv->table_lock); + if (ret == -EAGAIN) + goto again; + + if (ret != 0) + return ret; + + drm_gem_object_handle_reference(obj); + return 0; +} +EXPORT_SYMBOL(drm_gem_handle_create); + +/** Returns a reference to the object named by the handle. */ +struct drm_gem_object * +drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, + int handle) +{ + struct drm_gem_object *obj; + + spin_lock(&filp->table_lock); + + /* Check if we currently have a reference on the object */ + obj = idr_find(&filp->object_idr, handle); + if (obj == NULL) { + spin_unlock(&filp->table_lock); + return NULL; + } + + drm_gem_object_reference(obj); + + spin_unlock(&filp->table_lock); + + return obj; +} +EXPORT_SYMBOL(drm_gem_object_lookup); + +/** + * Releases the handle to an mm object. + */ +int +drm_gem_close_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_gem_close *args = data; + int ret; + + if (!(dev->driver->driver_features & DRIVER_GEM)) + return -ENODEV; + + ret = drm_gem_handle_delete(file_priv, args->handle); + + return ret; +} + +/** + * Create a global name for an object, returning the name. + * + * Note that the name does not hold a reference; when the object + * is freed, the name goes away. + */ +int +drm_gem_flink_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_gem_flink *args = data; + struct drm_gem_object *obj; + int ret; + + if (!(dev->driver->driver_features & DRIVER_GEM)) + return -ENODEV; + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) + return -EINVAL; + +again: + if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) + return -ENOMEM; + + spin_lock(&dev->object_name_lock); + if (obj->name) { + spin_unlock(&dev->object_name_lock); + return -EEXIST; + } + ret = idr_get_new_above(&dev->object_name_idr, obj, 1, + &obj->name); + spin_unlock(&dev->object_name_lock); + if (ret == -EAGAIN) + goto again; + + if (ret != 0) { + mutex_lock(&dev->struct_mutex); + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + return ret; + } + + /* + * Leave the reference from the lookup around as the + * name table now holds one + */ + args->name = (uint64_t) obj->name; + + return 0; +} + +/** + * Open an object using the global name, returning a handle and the size. + * + * This handle (of course) holds a reference to the object, so the object + * will not go away until the handle is deleted. + */ +int +drm_gem_open_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_gem_open *args = data; + struct drm_gem_object *obj; + int ret; + int handle; + + if (!(dev->driver->driver_features & DRIVER_GEM)) + return -ENODEV; + + spin_lock(&dev->object_name_lock); + obj = idr_find(&dev->object_name_idr, (int) args->name); + if (obj) + drm_gem_object_reference(obj); + spin_unlock(&dev->object_name_lock); + if (!obj) + return -ENOENT; + + ret = drm_gem_handle_create(file_priv, obj, &handle); + mutex_lock(&dev->struct_mutex); + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + if (ret) + return ret; + + args->handle = handle; + args->size = obj->size; + + return 0; +} + +/** + * Called at device open time, sets up the structure for handling refcounting + * of mm objects. + */ +void +drm_gem_open(struct drm_device *dev, struct drm_file *file_private) +{ + idr_init(&file_private->object_idr); + spin_lock_init(&file_private->table_lock); +} + +/** + * Called at device close to release the file's + * handle references on objects. + */ +static int +drm_gem_object_release_handle(int id, void *ptr, void *data) +{ + struct drm_gem_object *obj = ptr; + + drm_gem_object_handle_unreference(obj); + + return 0; +} + +/** + * Called at close time when the filp is going away. + * + * Releases any remaining references on objects by this filp. + */ +void +drm_gem_release(struct drm_device *dev, struct drm_file *file_private) +{ + mutex_lock(&dev->struct_mutex); + idr_for_each(&file_private->object_idr, + &drm_gem_object_release_handle, NULL); + + idr_destroy(&file_private->object_idr); + mutex_unlock(&dev->struct_mutex); +} + +/** + * Called after the last reference to the object has been lost. + * + * Frees the object + */ +void +drm_gem_object_free(struct kref *kref) +{ + struct drm_gem_object *obj = (struct drm_gem_object *) kref; + struct drm_device *dev = obj->dev; + + BUG_ON(!mutex_is_locked(&dev->struct_mutex)); + + if (dev->driver->gem_free_object != NULL) + dev->driver->gem_free_object(obj); + + fput(obj->filp); + atomic_dec(&dev->object_count); + atomic_sub(obj->size, &dev->object_memory); + kfree(obj); +} +EXPORT_SYMBOL(drm_gem_object_free); + +/** + * Called after the last handle to the object has been closed + * + * Removes any name for the object. Note that this must be + * called before drm_gem_object_free or we'll be touching + * freed memory + */ +void +drm_gem_object_handle_free(struct kref *kref) +{ + struct drm_gem_object *obj = container_of(kref, + struct drm_gem_object, + handlecount); + struct drm_device *dev = obj->dev; + + /* Remove any name for this object */ + spin_lock(&dev->object_name_lock); + if (obj->name) { + idr_remove(&dev->object_name_idr, obj->name); + spin_unlock(&dev->object_name_lock); + /* + * The object name held a reference to this object, drop + * that now. + */ + drm_gem_object_unreference(obj); + } else + spin_unlock(&dev->object_name_lock); + +} +EXPORT_SYMBOL(drm_gem_object_handle_free); + diff --git a/linux-core/drm_irq.c b/linux-core/drm_irq.c index e1c93054..0dfbe57a 100644 --- a/linux-core/drm_irq.c +++ b/linux-core/drm_irq.c @@ -63,7 +63,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data, p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn)) return -EINVAL; - p->irq = dev->irq; + p->irq = dev->pdev->irq; DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum, p->irq); @@ -133,6 +133,7 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs) setup_timer(&dev->vblank_disable_timer, vblank_disable_fn, (unsigned long)dev); + init_timer_deferrable(&dev->vblank_disable_timer); spin_lock_init(&dev->vbl_lock); atomic_set(&dev->vbl_signal_pending, 0); dev->num_crtcs = num_crtcs; @@ -284,7 +285,7 @@ int drm_irq_install(struct drm_device * dev) if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) return -EINVAL; - if (dev->irq == 0) + if (dev->pdev->irq == 0) return -EINVAL; mutex_lock(&dev->struct_mutex); @@ -302,7 +303,7 @@ int drm_irq_install(struct drm_device * dev) dev->irq_enabled = 1; mutex_unlock(&dev->struct_mutex); - DRM_DEBUG("irq=%d\n", dev->irq); + DRM_DEBUG("irq=%d\n", dev->pdev->irq); /* Before installing handler */ dev->driver->irq_preinstall(dev); @@ -311,7 +312,7 @@ int drm_irq_install(struct drm_device * dev) if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED)) sh_flags = IRQF_SHARED; - ret = request_irq(dev->irq, dev->driver->irq_handler, + ret = request_irq(dev->pdev->irq, dev->driver->irq_handler, sh_flags, dev->devname, dev); if (ret < 0) { mutex_lock(&dev->struct_mutex); @@ -319,6 +320,10 @@ int drm_irq_install(struct drm_device * dev) mutex_unlock(&dev->struct_mutex); return ret; } + /* Expose the device irq to device drivers that want to export it for + * whatever reason. + */ + dev->irq = dev->pdev->irq; /* After installing handler */ ret = dev->driver->irq_postinstall(dev); @@ -354,11 +359,11 @@ int drm_irq_uninstall(struct drm_device * dev) if (!irq_enabled) return -EINVAL; - DRM_DEBUG("irq=%d\n", dev->irq); + DRM_DEBUG("irq=%d\n", dev->pdev->irq); dev->driver->irq_uninstall(dev); - free_irq(dev->irq, dev); + free_irq(dev->pdev->irq, dev); drm_vblank_cleanup(dev); @@ -396,7 +401,7 @@ int drm_control(struct drm_device *dev, void *data, if (drm_core_check_feature(dev, DRIVER_MODESET)) return 0; if (dev->if_version < DRM_IF_VERSION(1, 2) && - ctl->irq != dev->irq) + ctl->irq != dev->pdev->irq) return -EINVAL; return drm_irq_install(dev); case DRM_UNINST_HANDLER: @@ -579,7 +584,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, int ret = 0; unsigned int flags, seq, crtc; - if ((!dev->irq) || (!dev->irq_enabled)) + if ((!dev->pdev->irq) || (!dev->irq_enabled)) return -EINVAL; if (vblwait->request.type & diff --git a/linux-core/drm_lock.c b/linux-core/drm_lock.c index 6bbf1444..e8bdad92 100644 --- a/linux-core/drm_lock.c +++ b/linux-core/drm_lock.c @@ -384,7 +384,6 @@ void drm_idlelock_release(struct drm_lock_data *lock_data) } EXPORT_SYMBOL(drm_idlelock_release); - int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv) { struct drm_master *master = file_priv->master; diff --git a/linux-core/drm_memory.c b/linux-core/drm_memory.c index 75f5b521..4b494f9c 100644 --- a/linux-core/drm_memory.c +++ b/linux-core/drm_memory.c @@ -310,6 +310,7 @@ int drm_free_agp(DRM_AGP_MEM * handle, int pages) { return drm_agp_free_memory(handle) ? 0 : -EINVAL; } +EXPORT_SYMBOL(drm_free_agp); /** Wrapper around agp_bind_memory() */ int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start) @@ -322,6 +323,7 @@ int drm_unbind_agp(DRM_AGP_MEM * handle) { return drm_agp_unbind_memory(handle); } +EXPORT_SYMBOL(drm_unbind_agp); #else /* __OS_HAS_AGP*/ static void *agp_remap(unsigned long offset, unsigned long size, diff --git a/linux-core/drm_mm.c b/linux-core/drm_memrange.c index 28726a65..5921eff8 100644 --- a/linux-core/drm_mm.c +++ b/linux-core/drm_memrange.c @@ -44,26 +44,26 @@ #include "drmP.h" #include <linux/slab.h> -unsigned long drm_mm_tail_space(struct drm_mm *mm) +unsigned long drm_memrange_tail_space(struct drm_memrange *mm) { struct list_head *tail_node; - struct drm_mm_node *entry; + struct drm_memrange_node *entry; tail_node = mm->ml_entry.prev; - entry = list_entry(tail_node, struct drm_mm_node, ml_entry); + entry = list_entry(tail_node, struct drm_memrange_node, ml_entry); if (!entry->free) return 0; return entry->size; } -int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size) +int drm_memrange_remove_space_from_tail(struct drm_memrange *mm, unsigned long size) { struct list_head *tail_node; - struct drm_mm_node *entry; + struct drm_memrange_node *entry; tail_node = mm->ml_entry.prev; - entry = list_entry(tail_node, struct drm_mm_node, ml_entry); + entry = list_entry(tail_node, struct drm_memrange_node, ml_entry); if (!entry->free) return -ENOMEM; @@ -75,13 +75,13 @@ int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size) } -static int drm_mm_create_tail_node(struct drm_mm *mm, +static int drm_memrange_create_tail_node(struct drm_memrange *mm, unsigned long start, unsigned long size) { - struct drm_mm_node *child; + struct drm_memrange_node *child; - child = (struct drm_mm_node *) + child = (struct drm_memrange_node *) drm_ctl_alloc(sizeof(*child), DRM_MEM_MM); if (!child) return -ENOMEM; @@ -98,26 +98,26 @@ static int drm_mm_create_tail_node(struct drm_mm *mm, } -int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size) +int drm_memrange_add_space_to_tail(struct drm_memrange *mm, unsigned long size) { struct list_head *tail_node; - struct drm_mm_node *entry; + struct drm_memrange_node *entry; tail_node = mm->ml_entry.prev; - entry = list_entry(tail_node, struct drm_mm_node, ml_entry); + entry = list_entry(tail_node, struct drm_memrange_node, ml_entry); if (!entry->free) { - return drm_mm_create_tail_node(mm, entry->start + entry->size, size); + return drm_memrange_create_tail_node(mm, entry->start + entry->size, size); } entry->size += size; return 0; } -static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, +static struct drm_memrange_node *drm_memrange_split_at_start(struct drm_memrange_node *parent, unsigned long size) { - struct drm_mm_node *child; + struct drm_memrange_node *child; - child = (struct drm_mm_node *) + child = (struct drm_memrange_node *) drm_ctl_alloc(sizeof(*child), DRM_MEM_MM); if (!child) return NULL; @@ -137,19 +137,19 @@ static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, return child; } -struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent, +struct drm_memrange_node *drm_memrange_get_block(struct drm_memrange_node * parent, unsigned long size, unsigned alignment) { - struct drm_mm_node *align_splitoff = NULL; - struct drm_mm_node *child; + struct drm_memrange_node *align_splitoff = NULL; + struct drm_memrange_node *child; unsigned tmp = 0; if (alignment) tmp = parent->start % alignment; if (tmp) { - align_splitoff = drm_mm_split_at_start(parent, alignment - tmp); + align_splitoff = drm_memrange_split_at_start(parent, alignment - tmp); if (!align_splitoff) return NULL; } @@ -159,40 +159,41 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent, parent->free = 0; return parent; } else { - child = drm_mm_split_at_start(parent, size); + child = drm_memrange_split_at_start(parent, size); } if (align_splitoff) - drm_mm_put_block(align_splitoff); + drm_memrange_put_block(align_splitoff); return child; } +EXPORT_SYMBOL(drm_memrange_get_block); /* * Put a block. Merge with the previous and / or next block if they are free. * Otherwise add to the free stack. */ -void drm_mm_put_block(struct drm_mm_node * cur) +void drm_memrange_put_block(struct drm_memrange_node * cur) { - struct drm_mm *mm = cur->mm; + struct drm_memrange *mm = cur->mm; struct list_head *cur_head = &cur->ml_entry; struct list_head *root_head = &mm->ml_entry; - struct drm_mm_node *prev_node = NULL; - struct drm_mm_node *next_node; + struct drm_memrange_node *prev_node = NULL; + struct drm_memrange_node *next_node; int merged = 0; if (cur_head->prev != root_head) { - prev_node = list_entry(cur_head->prev, struct drm_mm_node, ml_entry); + prev_node = list_entry(cur_head->prev, struct drm_memrange_node, ml_entry); if (prev_node->free) { prev_node->size += cur->size; merged = 1; } } if (cur_head->next != root_head) { - next_node = list_entry(cur_head->next, struct drm_mm_node, ml_entry); + next_node = list_entry(cur_head->next, struct drm_memrange_node, ml_entry); if (next_node->free) { if (merged) { prev_node->size += next_node->size; @@ -215,16 +216,16 @@ void drm_mm_put_block(struct drm_mm_node * cur) drm_ctl_free(cur, sizeof(*cur), DRM_MEM_MM); } } -EXPORT_SYMBOL(drm_mm_put_block); +EXPORT_SYMBOL(drm_memrange_put_block); -struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm, +struct drm_memrange_node *drm_memrange_search_free(const struct drm_memrange * mm, unsigned long size, unsigned alignment, int best_match) { struct list_head *list; const struct list_head *free_stack = &mm->fl_entry; - struct drm_mm_node *entry; - struct drm_mm_node *best; + struct drm_memrange_node *entry; + struct drm_memrange_node *best; unsigned long best_size; unsigned wasted; @@ -232,7 +233,7 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm, best_size = ~0UL; list_for_each(list, free_stack) { - entry = list_entry(list, struct drm_mm_node, fl_entry); + entry = list_entry(list, struct drm_memrange_node, fl_entry); wasted = 0; if (entry->size < size) @@ -257,30 +258,31 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm, return best; } +EXPORT_SYMBOL(drm_memrange_search_free); -int drm_mm_clean(struct drm_mm * mm) +int drm_memrange_clean(struct drm_memrange * mm) { struct list_head *head = &mm->ml_entry; return (head->next->next == head); } -int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) +int drm_memrange_init(struct drm_memrange * mm, unsigned long start, unsigned long size) { INIT_LIST_HEAD(&mm->ml_entry); INIT_LIST_HEAD(&mm->fl_entry); - return drm_mm_create_tail_node(mm, start, size); + return drm_memrange_create_tail_node(mm, start, size); } -EXPORT_SYMBOL(drm_mm_init); +EXPORT_SYMBOL(drm_memrange_init); -void drm_mm_takedown(struct drm_mm * mm) +void drm_memrange_takedown(struct drm_memrange * mm) { struct list_head *bnode = mm->fl_entry.next; - struct drm_mm_node *entry; + struct drm_memrange_node *entry; - entry = list_entry(bnode, struct drm_mm_node, fl_entry); + entry = list_entry(bnode, struct drm_memrange_node, fl_entry); if (entry->ml_entry.next != &mm->ml_entry || entry->fl_entry.next != &mm->fl_entry) { @@ -292,20 +294,4 @@ void drm_mm_takedown(struct drm_mm * mm) list_del(&entry->ml_entry); drm_ctl_free(entry, sizeof(*entry), DRM_MEM_MM); } - -EXPORT_SYMBOL(drm_mm_takedown); - -void drm_mm_print(struct drm_mm *mm, const char *name) -{ - struct list_head *list; - const struct list_head *mm_stack = &mm->ml_entry; - struct drm_mm_node *entry; - - DRM_DEBUG("Memory usage for '%s'\n", name ? name : "unknown"); - list_for_each(list, mm_stack) { - entry = list_entry(list, struct drm_mm_node, ml_entry); - DRM_DEBUG("\t0x%08lx %li %s pages\n", entry->start, entry->size, - entry->free ? "free" : "used"); - } -} -EXPORT_SYMBOL(drm_mm_print); +EXPORT_SYMBOL(drm_memrange_takedown); diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 7feacd33..d0c34ca3 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -300,7 +300,12 @@ struct drm_ttm_backend_func { void (*destroy) (struct drm_ttm_backend *backend); }; - +/** + * This structure associates a set of flags and methods with a drm_ttm + * object, and will also be subclassed by the particular backend. + * + * \sa #drm_agp_ttm_backend + */ struct drm_ttm_backend { struct drm_device *dev; uint32_t flags; @@ -412,7 +417,7 @@ extern int drm_ttm_destroy(struct drm_ttm *ttm); */ struct drm_bo_mem_reg { - struct drm_mm_node *mm_node; + struct drm_memrange_node *mm_node; unsigned long size; unsigned long num_pages; uint32_t page_alignment; @@ -493,7 +498,7 @@ struct drm_buffer_object { unsigned long num_pages; /* For pinned buffers */ - struct drm_mm_node *pinned_node; + struct drm_memrange_node *pinned_node; uint32_t pinned_mem_type; struct list_head pinned_lru; @@ -528,7 +533,7 @@ struct drm_mem_type_manager { int has_type; int use_type; int kern_init_type; - struct drm_mm manager; + struct drm_memrange manager; struct list_head lru; struct list_head pinned; uint32_t flags; diff --git a/linux-core/drm_proc.c b/linux-core/drm_proc.c index 7f185209..127a7987 100644 --- a/linux-core/drm_proc.c +++ b/linux-core/drm_proc.c @@ -51,6 +51,10 @@ static int drm_bufs_info(char *buf, char **start, off_t offset, int request, int *eof, void *data); static int drm_objects_info(char *buf, char **start, off_t offset, int request, int *eof, void *data); +static int drm_gem_name_info(char *buf, char **start, off_t offset, + int request, int *eof, void *data); +static int drm_gem_object_info(char *buf, char **start, off_t offset, + int request, int *eof, void *data); #if DRM_DEBUG_CODE static int drm_vma_info(char *buf, char **start, off_t offset, int request, int *eof, void *data); @@ -70,6 +74,8 @@ static struct drm_proc_list { {"queues", drm_queues_info}, {"bufs", drm_bufs_info}, {"objects", drm_objects_info}, + {"gem_names", drm_gem_name_info}, + {"gem_objects", drm_gem_object_info}, #if DRM_DEBUG_CODE {"vma", drm_vma_info}, #endif @@ -586,6 +592,84 @@ static int drm_clients_info(char *buf, char **start, off_t offset, return ret; } +struct drm_gem_name_info_data { + int len; + char *buf; + int eof; +}; + +static int drm_gem_one_name_info (int id, void *ptr, void *data) +{ + struct drm_gem_object *obj = ptr; + struct drm_gem_name_info_data *nid = data; + + DRM_INFO ("name %d size %d\n", obj->name, obj->size); + if (nid->eof) + return 0; + + nid->len += sprintf (&nid->buf[nid->len], + "%6d%9d%8d%9d\n", + obj->name, obj->size, + atomic_read(&obj->handlecount.refcount), + atomic_read(&obj->refcount.refcount)); + if (nid->len > DRM_PROC_LIMIT) { + nid->eof = 1; + return 0; + } + return 0; +} + +static int drm_gem_name_info(char *buf, char **start, off_t offset, + int request, int *eof, void *data) +{ + struct drm_minor *minor = (struct drm_minor *) data; + struct drm_device *dev = minor->dev; + struct drm_gem_name_info_data nid; + + if (offset > DRM_PROC_LIMIT) { + *eof = 1; + return 0; + } + + nid.len = sprintf (buf, " name size handles refcount\n"); + nid.buf = buf; + nid.eof = 0; + idr_for_each (&dev->object_name_idr, drm_gem_one_name_info, &nid); + + *start = &buf[offset]; + *eof = 0; + if (nid.len > request + offset) + return request; + *eof = 1; + return nid.len - offset; +} + +static int drm_gem_object_info(char *buf, char **start, off_t offset, + int request, int *eof, void *data) +{ + struct drm_minor *minor = (struct drm_minor *) data; + struct drm_device *dev = minor->dev; + int len = 0; + + if (offset > DRM_PROC_LIMIT) { + *eof = 1; + return 0; + } + + *start = &buf[offset]; + *eof = 0; + DRM_PROC_PRINT("%d objects\n", atomic_read (&dev->object_count)); + DRM_PROC_PRINT("%d object bytes\n", atomic_read (&dev->object_memory)); + DRM_PROC_PRINT("%d pinned\n", atomic_read (&dev->pin_count)); + DRM_PROC_PRINT("%d pin bytes\n", atomic_read (&dev->pin_memory)); + DRM_PROC_PRINT("%d gtt bytes\n", atomic_read (&dev->gtt_memory)); + DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total); + if (len > request + offset) + return request; + *eof = 1; + return len - offset; +} + #if DRM_DEBUG_CODE static int drm__vma_info(char *buf, char **start, off_t offset, int request, diff --git a/linux-core/drm_sman.c b/linux-core/drm_sman.c index 8421a939..7c16f685 100644 --- a/linux-core/drm_sman.c +++ b/linux-core/drm_sman.c @@ -88,34 +88,34 @@ EXPORT_SYMBOL(drm_sman_init); static void *drm_sman_mm_allocate(void *private, unsigned long size, unsigned alignment) { - struct drm_mm *mm = (struct drm_mm *) private; - struct drm_mm_node *tmp; + struct drm_memrange *mm = (struct drm_memrange *) private; + struct drm_memrange_node *tmp; - tmp = drm_mm_search_free(mm, size, alignment, 1); + tmp = drm_memrange_search_free(mm, size, alignment, 1); if (!tmp) { return NULL; } - tmp = drm_mm_get_block(tmp, size, alignment); + tmp = drm_memrange_get_block(tmp, size, alignment); return tmp; } static void drm_sman_mm_free(void *private, void *ref) { - struct drm_mm_node *node = (struct drm_mm_node *) ref; + struct drm_memrange_node *node = (struct drm_memrange_node *) ref; - drm_mm_put_block(node); + drm_memrange_put_block(node); } static void drm_sman_mm_destroy(void *private) { - struct drm_mm *mm = (struct drm_mm *) private; - drm_mm_takedown(mm); + struct drm_memrange *mm = (struct drm_memrange *) private; + drm_memrange_takedown(mm); drm_free(mm, sizeof(*mm), DRM_MEM_MM); } static unsigned long drm_sman_mm_offset(void *private, void *ref) { - struct drm_mm_node *node = (struct drm_mm_node *) ref; + struct drm_memrange_node *node = (struct drm_memrange_node *) ref; return node->start; } @@ -124,7 +124,7 @@ drm_sman_set_range(struct drm_sman * sman, unsigned int manager, unsigned long start, unsigned long size) { struct drm_sman_mm *sman_mm; - struct drm_mm *mm; + struct drm_memrange *mm; int ret; BUG_ON(manager >= sman->num_managers); @@ -135,7 +135,7 @@ drm_sman_set_range(struct drm_sman * sman, unsigned int manager, return -ENOMEM; } sman_mm->private = mm; - ret = drm_mm_init(mm, start, size); + ret = drm_memrange_init(mm, start, size); if (ret) { drm_free(mm, sizeof(*mm), DRM_MEM_MM); diff --git a/linux-core/drm_sman.h b/linux-core/drm_sman.h index 39a39fef..0299776c 100644 --- a/linux-core/drm_sman.h +++ b/linux-core/drm_sman.h @@ -45,7 +45,7 @@ /* * A class that is an abstration of a simple memory allocator. * The sman implementation provides a default such allocator - * using the drm_mm.c implementation. But the user can replace it. + * using the drm_memrange.c implementation. But the user can replace it. * See the SiS implementation, which may use the SiS FB kernel module * for memory management. */ @@ -116,7 +116,7 @@ extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers, unsigned int user_order, unsigned int owner_order); /* - * Initialize a drm_mm.c allocator. Should be called only once for each + * Initialize a drm_memrange.c allocator. Should be called only once for each * manager unless a customized allogator is used. */ diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c index 45b8f386..c62b901d 100644 --- a/linux-core/drm_stub.c +++ b/linux-core/drm_stub.c @@ -201,15 +201,15 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, if (drm_ht_create(&dev->map_hash, DRM_MAP_HASH_ORDER)) return -ENOMEM; - if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START, - DRM_FILE_PAGE_OFFSET_SIZE)) { + if (drm_memrange_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START, + DRM_FILE_PAGE_OFFSET_SIZE)) { drm_ht_remove(&dev->map_hash); return -ENOMEM; } if (drm_ht_create(&dev->object_hash, DRM_OBJECT_HASH_ORDER)) { drm_ht_remove(&dev->map_hash); - drm_mm_takedown(&dev->offset_manager); + drm_memrange_takedown(&dev->offset_manager); return -ENOMEM; } @@ -249,7 +249,16 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, goto error_out_unreg; } + if (driver->driver_features & DRIVER_GEM) { + retcode = drm_gem_init (dev); + if (retcode) { + DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n"); + goto error_out_unreg; + } + } + drm_fence_manager_init(dev); + return 0; error_out_unreg: @@ -300,6 +309,13 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t DRM_ERROR("DRM: Failed to initialize /proc/dri.\n"); goto err_mem; } + if (dev->driver->proc_init) { + ret = dev->driver->proc_init(new_minor); + if (ret) { + DRM_ERROR("DRM: Driver failed to initialize /proc/dri.\n"); + goto err_mem; + } + } } else new_minor->dev_root = NULL; @@ -316,8 +332,11 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t err_g2: - if (new_minor->type == DRM_MINOR_LEGACY) + if (new_minor->type == DRM_MINOR_LEGACY) { + if (dev->driver->proc_cleanup) + dev->driver->proc_cleanup(new_minor); drm_proc_cleanup(new_minor, drm_proc_root); + } err_mem: kfree(new_minor); err_idr: @@ -389,10 +408,10 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, return 0; err_g5: - drm_put_minor(&dev->primary); + drm_put_minor(dev, &dev->primary); err_g4: if (drm_core_check_feature(dev, DRIVER_MODESET)) - drm_put_minor(&dev->control); + drm_put_minor(dev, &dev->control); err_g3: if (!drm_fb_loaded) pci_disable_device(pdev); @@ -443,13 +462,16 @@ int drm_put_dev(struct drm_device * dev) * last minor released. * */ -int drm_put_minor(struct drm_minor **minor_p) +int drm_put_minor(struct drm_device *dev, struct drm_minor **minor_p) { struct drm_minor *minor = *minor_p; DRM_DEBUG("release secondary minor %d\n", minor->index); - if (minor->type == DRM_MINOR_LEGACY) + if (minor->type == DRM_MINOR_LEGACY) { + if (dev->driver->proc_cleanup) + dev->driver->proc_cleanup(minor); drm_proc_cleanup(minor, drm_proc_root); + } drm_sysfs_device_remove(minor); idr_remove(&drm_minors_idr, minor->index); diff --git a/linux-core/dvo_ch7xxx.c b/linux-core/dvo_ch7xxx.c index ebf54bff..77c86395 100644 --- a/linux-core/dvo_ch7xxx.c +++ b/linux-core/dvo_ch7xxx.c @@ -221,7 +221,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo, goto out; } - ch7xxx->quiet = FALSE; + ch7xxx->quiet = false; DRM_DEBUG("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n", name, vendor, device); return true; diff --git a/linux-core/dvo_ivch.c b/linux-core/dvo_ivch.c index 3a29ab6a..788b0721 100644 --- a/linux-core/dvo_ivch.c +++ b/linux-core/dvo_ivch.c @@ -265,7 +265,7 @@ static bool ivch_init(struct intel_dvo_device *dvo, dvo->i2c_bus = i2cbus; dvo->i2c_bus->slave_addr = dvo->slave_addr; dvo->dev_priv = priv; - priv->quiet = TRUE; + priv->quiet = true; if (!ivch_read(dvo, VR00, &temp)) goto out; diff --git a/linux-core/dvo_tfp410.c b/linux-core/dvo_tfp410.c index 8e26235c..207fda80 100644 --- a/linux-core/dvo_tfp410.c +++ b/linux-core/dvo_tfp410.c @@ -187,7 +187,7 @@ static bool tfp410_init(struct intel_dvo_device *dvo, dvo->i2c_bus = i2cbus; dvo->i2c_bus->slave_addr = dvo->slave_addr; dvo->dev_priv = tfp; - tfp->quiet = TRUE; + tfp->quiet = true; if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) { DRM_DEBUG("tfp410 not detected got VID %X: from %s Slave %d.\n", @@ -200,7 +200,7 @@ static bool tfp410_init(struct intel_dvo_device *dvo, id, i2cbus->adapter.name, i2cbus->slave_addr); goto out; } - tfp->quiet = FALSE; + tfp->quiet = false; return true; out: kfree(tfp); diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c index f755dcd4..33a33e61 100644 --- a/linux-core/i915_drv.c +++ b/linux-core/i915_drv.c @@ -48,11 +48,11 @@ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); unsigned int i915_rightof = 1; module_param_named(i915_rightof, i915_rightof, int, 0400); -#ifdef I915_HAVE_FENCE +#if defined(I915_HAVE_FENCE) && defined(I915_TTM) extern struct drm_fence_driver i915_fence_driver; #endif -#ifdef I915_HAVE_BUFFER +#if defined(I915_HAVE_BUFFER) && defined(I915_TTM) static uint32_t i915_mem_prios[] = {DRM_BO_MEM_VRAM, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL}; static uint32_t i915_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_VRAM, DRM_BO_MEM_LOCAL}; @@ -71,7 +71,7 @@ static struct drm_bo_driver i915_bo_driver = { .ttm_cache_flush = i915_flush_ttm, .command_stream_barrier = NULL, }; -#endif +#endif /* ttm */ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) { @@ -569,18 +569,22 @@ static int i915_resume(struct drm_device *dev) } static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static void remove(struct pci_dev *pdev); + static struct drm_driver driver = { /* don't use mtrr's here, the Xserver or user space app should * deal with them for intel hardware. */ .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR | */ - DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, + DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM, .load = i915_driver_load, .unload = i915_driver_unload, .firstopen = i915_driver_firstopen, + .open = i915_driver_open, .lastclose = i915_driver_lastclose, .preclose = i915_driver_preclose, + .postclose = i915_driver_postclose, .suspend = i915_suspend, .resume = i915_resume, .device_is_agp = i915_driver_device_is_agp, @@ -596,7 +600,11 @@ static struct drm_driver driver = { .get_reg_ofs = drm_core_get_reg_ofs, .master_create = i915_master_create, .master_destroy = i915_master_destroy, + .proc_init = i915_gem_proc_init, + .proc_cleanup = i915_gem_proc_cleanup, .ioctls = i915_ioctls, + .gem_init_object = i915_gem_init_object, + .gem_free_object = i915_gem_free_object, .fops = { .owner = THIS_MODULE, .open = drm_open, @@ -613,12 +621,12 @@ static struct drm_driver driver = { .name = DRIVER_NAME, .id_table = pciidlist, .probe = probe, - .remove = __devexit_p(drm_cleanup_pci), + .remove = remove, }, -#ifdef I915_HAVE_FENCE +#if defined(I915_HAVE_FENCE) && defined(I915_TTM) .fence_driver = &i915_fence_driver, #endif -#ifdef I915_HAVE_BUFFER +#if defined(I915_HAVE_BUFFER) && defined(I915_TTM) .bo_driver = &i915_bo_driver, #endif .name = DRIVER_NAME, @@ -631,7 +639,28 @@ static struct drm_driver driver = { static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - return drm_get_dev(pdev, ent, &driver); + int ret; + + /* On the 945G/GM, the chipset reports the MSI capability on the + * integrated graphics even though the support isn't actually there + * according to the published specs. It doesn't appear to function + * correctly in testing on 945G. + * This may be a side effect of MSI having been made available for PEG + * and the registers being closely associated. + */ + if (pdev->device != 0x2772 && pdev->device != 0x27A2) + (void )pci_enable_msi(pdev); + + ret = drm_get_dev(pdev, ent, &driver); + if (ret && pdev->msi_enabled) + pci_disable_msi(pdev); + return ret; +} +static void remove(struct pci_dev *pdev) +{ + drm_cleanup_pci(pdev); + if (pdev->msi_enabled) + pci_disable_msi(pdev); } static int __init i915_init(void) diff --git a/linux-core/i915_gem.c b/linux-core/i915_gem.c new file mode 100644 index 00000000..63f4b91d --- /dev/null +++ b/linux-core/i915_gem.c @@ -0,0 +1,2710 @@ +/* + * Copyright © 2008 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt <eric@anholt.net> + * + */ + +#include "drmP.h" +#include "drm.h" +#include "drm_compat.h" +#include "i915_drm.h" +#include "i915_drv.h" + +#define WATCH_COHERENCY 0 +#define WATCH_BUF 0 +#define WATCH_EXEC 0 +#define WATCH_LRU 0 +#define WATCH_RELOC 0 +#define WATCH_INACTIVE 0 +#define WATCH_PWRITE 0 + +#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE +static void +i915_gem_dump_object(struct drm_gem_object *obj, int len, + const char *where, uint32_t mark); +#endif + +static int +i915_gem_object_set_domain(struct drm_gem_object *obj, + uint32_t read_domains, + uint32_t write_domain); +int +i915_gem_set_domain(struct drm_gem_object *obj, + struct drm_file *file_priv, + uint32_t read_domains, + uint32_t write_domain); + +static void +i915_gem_clflush_object(struct drm_gem_object *obj); + +int i915_gem_do_init(struct drm_device *dev, unsigned long start, + unsigned long end) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (start >= end || + (start & (PAGE_SIZE - 1)) != 0 || + (end & (PAGE_SIZE - 1)) != 0) { + return -EINVAL; + } + + drm_memrange_init(&dev_priv->mm.gtt_space, start, + end - start); + + dev->gtt_total = (uint32_t) (end - start); + + return 0; +} + +int +i915_gem_init_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_i915_gem_init *args = data; + int ret; + + mutex_lock(&dev->struct_mutex); + ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end); + mutex_unlock(&dev->struct_mutex); + + return ret; +} + + +/** + * Creates a new mm object and returns a handle to it. + */ +int +i915_gem_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_i915_gem_create *args = data; + struct drm_gem_object *obj; + int handle, ret; + + args->size = roundup(args->size, PAGE_SIZE); + + /* Allocate the new object */ + obj = drm_gem_object_alloc(dev, args->size); + if (obj == NULL) + return -ENOMEM; + + ret = drm_gem_handle_create(file_priv, obj, &handle); + mutex_lock(&dev->struct_mutex); + drm_gem_object_handle_unreference(obj); + mutex_unlock(&dev->struct_mutex); + + if (ret) + return ret; + + args->handle = handle; + + return 0; +} + +/** + * Reads data from the object referenced by handle. + * + * On error, the contents of *data are undefined. + */ +int +i915_gem_pread_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_i915_gem_pread *args = data; + struct drm_gem_object *obj; + ssize_t read; + loff_t offset; + int ret; + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) + return -EINVAL; + + mutex_lock(&dev->struct_mutex); + ret = i915_gem_set_domain(obj, file_priv, + I915_GEM_DOMAIN_CPU, 0); + if (ret) { + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + return ret; + } + offset = args->offset; + + read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr, + args->size, &offset); + if (read != args->size) { + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + if (read < 0) + return read; + else + return -EINVAL; + } + + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + + return 0; +} + +#include "drm_compat.h" + +/** + * Writes data to the object referenced by handle. + * + * On error, the contents of the buffer that were to be modified are undefined. + */ +int +i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_i915_gem_pwrite *args = data; + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + ssize_t remain; + loff_t offset; + char __user *user_data; + char *vaddr; + int i, o, l; + int ret = 0; + unsigned long pfn; + unsigned long unwritten; + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) + return -EINVAL; + + /** Bounds check destination. + * + * XXX: This could use review for overflow issues... + */ + if (args->offset > obj->size || args->size > obj->size || + args->offset + args->size > obj->size) + return -EFAULT; + + user_data = (char __user *) (uintptr_t) args->data_ptr; + remain = args->size; + if (!access_ok(VERIFY_READ, user_data, remain)) + return -EFAULT; + + + mutex_lock(&dev->struct_mutex); + ret = i915_gem_object_pin(obj, 0); + if (ret) { + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + return ret; + } + ret = i915_gem_set_domain(obj, file_priv, + I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT); + if (ret) + goto fail; + + obj_priv = obj->driver_private; + offset = obj_priv->gtt_offset + args->offset; + obj_priv->dirty = 1; + + while (remain > 0) { + + /** Operation in this page + * + * i = page number + * o = offset within page + * l = bytes to copy + */ + i = offset >> PAGE_SHIFT; + o = offset & (PAGE_SIZE-1); + l = remain; + if ((o + l) > PAGE_SIZE) + l = PAGE_SIZE - o; + + pfn = (dev->agp->base >> PAGE_SHIFT) + i; + +#ifdef DRM_KMAP_ATOMIC_PROT_PFN + /* kmap_atomic can't map IO pages on non-HIGHMEM kernels + */ + vaddr = kmap_atomic_prot_pfn(pfn, KM_USER0, + __pgprot(__PAGE_KERNEL)); +#if WATCH_PWRITE + DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n", + i, o, l, pfn, vaddr); +#endif + unwritten = __copy_from_user_inatomic_nocache(vaddr + o, user_data, l); + kunmap_atomic(vaddr, KM_USER0); + + if (unwritten) +#endif + { + vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); +#if WATCH_PWRITE + DRM_INFO("pwrite slow i %d o %d l %d pfn %ld vaddr %p\n", + i, o, l, pfn, vaddr); +#endif + if (vaddr == NULL) { + ret = -EFAULT; + goto fail; + } + unwritten = __copy_from_user(vaddr + o, user_data, l); +#if WATCH_PWRITE + DRM_INFO("unwritten %ld\n", unwritten); +#endif + iounmap(vaddr); + if (unwritten) { + ret = -EFAULT; + goto fail; + } + } + + remain -= l; + user_data += l; + offset += l; + } +#if WATCH_PWRITE && 1 + i915_gem_clflush_object(obj); + i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0); + i915_gem_clflush_object(obj); +#endif + +fail: + i915_gem_object_unpin (obj); + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + +#if WATCH_PWRITE + if (ret) + DRM_INFO("pwrite failed %d\n", ret); +#endif + return ret; +} + +/** + * Called when user space prepares to use an object + */ +int +i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_i915_gem_set_domain *args = data; + struct drm_gem_object *obj; + int ret; + + if (!(dev->driver->driver_features & DRIVER_GEM)) + return -ENODEV; + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) + return -EINVAL; + + mutex_lock(&dev->struct_mutex); + ret = i915_gem_set_domain(obj, file_priv, + args->read_domains, args->write_domain); + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + return ret; +} + +/** + * Called when user space has done writes to this buffer + */ +int +i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_i915_gem_sw_finish *args = data; + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + int ret = 0; + + if (!(dev->driver->driver_features & DRIVER_GEM)) + return -ENODEV; + + mutex_lock(&dev->struct_mutex); + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) { + mutex_unlock(&dev->struct_mutex); + return -EINVAL; + } + +#if WATCH_BUF + DRM_INFO("%s: sw_finish %d (%p)\n", + __func__, args->handle, obj); +#endif + obj_priv = obj->driver_private; + + /** Pinned buffers may be scanout, so flush the cache + */ + if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) { + i915_gem_clflush_object(obj); + drm_agp_chipset_flush(dev); + } + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + return ret; +} + +/** + * Maps the contents of an object, returning the address it is mapped + * into. + * + * While the mapping holds a reference on the contents of the object, it doesn't + * imply a ref on the object itself. + */ +int +i915_gem_mmap_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_i915_gem_mmap *args = data; + struct drm_gem_object *obj; + loff_t offset; + unsigned long addr; + + if (!(dev->driver->driver_features & DRIVER_GEM)) + return -ENODEV; + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) + return -EINVAL; + + offset = args->offset; + + down_write(¤t->mm->mmap_sem); + addr = do_mmap(obj->filp, 0, args->size, + PROT_READ | PROT_WRITE, MAP_SHARED, + args->offset); + up_write(¤t->mm->mmap_sem); + mutex_lock(&dev->struct_mutex); + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + if (IS_ERR((void *)addr)) + return addr; + + args->addr_ptr = (uint64_t) addr; + + return 0; +} + +static void +i915_gem_object_free_page_list(struct drm_gem_object *obj) +{ + struct drm_i915_gem_object *obj_priv = obj->driver_private; + int page_count = obj->size / PAGE_SIZE; + int i; + + if (obj_priv->page_list == NULL) + return; + + + for (i = 0; i < page_count; i++) + if (obj_priv->page_list[i] != NULL) { + if (obj_priv->dirty) + set_page_dirty(obj_priv->page_list[i]); + mark_page_accessed(obj_priv->page_list[i]); + page_cache_release(obj_priv->page_list[i]); + } + obj_priv->dirty = 0; + + drm_free(obj_priv->page_list, + page_count * sizeof(struct page *), + DRM_MEM_DRIVER); + obj_priv->page_list = NULL; +} + +static void +i915_gem_object_move_to_active(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + + /* Add a reference if we're newly entering the active list. */ + if (!obj_priv->active) { + drm_gem_object_reference(obj); + obj_priv->active = 1; + } + /* Move from whatever list we were on to the tail of execution. */ + list_move_tail(&obj_priv->list, + &dev_priv->mm.active_list); +} + +#if WATCH_INACTIVE +static void +i915_verify_inactive(struct drm_device *dev, char *file, int line) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + + list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { + obj = obj_priv->obj; + if (obj_priv->pin_count || obj_priv->active || (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) + DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n", + obj, + obj_priv->pin_count, obj_priv->active, obj->write_domain, file, line); + } +} +#else +#define i915_verify_inactive(dev,file,line) +#endif + +static void +i915_gem_object_move_to_inactive(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + + i915_verify_inactive(dev, __FILE__, __LINE__); + if (obj_priv->pin_count != 0) + list_del_init(&obj_priv->list); + else + list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); + + if (obj_priv->active) { + obj_priv->active = 0; + drm_gem_object_unreference(obj); + } + i915_verify_inactive(dev, __FILE__, __LINE__); +} + +/** + * Creates a new sequence number, emitting a write of it to the status page + * plus an interrupt, which will trigger i915_user_interrupt_handler. + * + * Must be called with struct_lock held. + * + * Returned sequence numbers are nonzero on success. + */ +static uint32_t +i915_add_request(struct drm_device *dev, uint32_t flush_domains) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_request *request; + uint32_t seqno; + int was_empty; + RING_LOCALS; + + request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER); + if (request == NULL) + return 0; + + /* Grab the seqno we're going to make this request be, and bump the + * next (skipping 0 so it can be the reserved no-seqno value). + */ + seqno = dev_priv->mm.next_gem_seqno; + dev_priv->mm.next_gem_seqno++; + if (dev_priv->mm.next_gem_seqno == 0) + dev_priv->mm.next_gem_seqno++; + + BEGIN_LP_RING(4); + OUT_RING(MI_STORE_DWORD_INDEX); + OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + OUT_RING(seqno); + + OUT_RING(GFX_OP_USER_INTERRUPT); + ADVANCE_LP_RING(); + + DRM_DEBUG("%d\n", seqno); + + request->seqno = seqno; + request->emitted_jiffies = jiffies; + request->flush_domains = flush_domains; + was_empty = list_empty(&dev_priv->mm.request_list); + list_add_tail(&request->list, &dev_priv->mm.request_list); + + if (was_empty) + schedule_delayed_work (&dev_priv->mm.retire_work, HZ); + return seqno; +} + +/** + * Command execution barrier + * + * Ensures that all commands in the ring are finished + * before signalling the CPU + */ + +uint32_t +i915_retire_commands(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; + uint32_t flush_domains = 0; + RING_LOCALS; + + /* The sampler always gets flushed on i965 (sigh) */ + if (IS_I965G(dev)) + flush_domains |= I915_GEM_DOMAIN_SAMPLER; + BEGIN_LP_RING(2); + OUT_RING(cmd); + OUT_RING(0); /* noop */ + ADVANCE_LP_RING(); + return flush_domains; +} + +/** + * Moves buffers associated only with the given active seqno from the active + * to inactive list, potentially freeing them. + */ +static void +i915_gem_retire_request(struct drm_device *dev, + struct drm_i915_gem_request *request) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (request->flush_domains != 0) { + struct drm_i915_gem_object *obj_priv, *next; + + /* First clear any buffers that were only waiting for a flush + * matching the one just retired. + */ + + list_for_each_entry_safe(obj_priv, next, + &dev_priv->mm.flushing_list, list) { + struct drm_gem_object *obj = obj_priv->obj; + + if (obj->write_domain & request->flush_domains) { + obj->write_domain = 0; + i915_gem_object_move_to_inactive(obj); + } + } + + } + + /* Move any buffers on the active list that are no longer referenced + * by the ringbuffer to the flushing/inactive lists as appropriate. + */ + while (!list_empty(&dev_priv->mm.active_list)) { + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + + obj_priv = list_first_entry(&dev_priv->mm.active_list, + struct drm_i915_gem_object, + list); + obj = obj_priv->obj; + + /* If the seqno being retired doesn't match the oldest in the + * list, then the oldest in the list must still be newer than + * this seqno. + */ + if (obj_priv->last_rendering_seqno != request->seqno) + return; +#if WATCH_LRU + DRM_INFO("%s: retire %d moves to inactive list %p\n", + __func__, request->seqno, obj); +#endif + + if (obj->write_domain != 0) { + list_move_tail(&obj_priv->list, + &dev_priv->mm.flushing_list); + } else { + i915_gem_object_move_to_inactive(obj); + } + } +} + +/** + * Returns true if seq1 is later than seq2. + */ +static int +i915_seqno_passed(uint32_t seq1, uint32_t seq2) +{ + return (int32_t)(seq1 - seq2) >= 0; +} + +uint32_t +i915_get_gem_seqno(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); +} + +/** + * This function clears the request list as sequence numbers are passed. + */ +void +i915_gem_retire_requests(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t seqno; + + seqno = i915_get_gem_seqno(dev); + + while (!list_empty(&dev_priv->mm.request_list)) { + struct drm_i915_gem_request *request; + uint32_t retiring_seqno; + + request = list_first_entry(&dev_priv->mm.request_list, + struct drm_i915_gem_request, + list); + retiring_seqno = request->seqno; + + if (i915_seqno_passed(seqno, retiring_seqno) || dev_priv->mm.wedged) { + i915_gem_retire_request(dev, request); + + list_del(&request->list); + drm_free(request, sizeof(*request), DRM_MEM_DRIVER); + } else + break; + } +} + +void +i915_gem_retire_work_handler(struct work_struct *work) +{ + struct drm_i915_private *dev_priv; + struct drm_device *dev; + + dev_priv = container_of(work, struct drm_i915_private, + mm.retire_work.work); + dev = dev_priv->dev; + + mutex_lock(&dev->struct_mutex); + i915_gem_retire_requests(dev); + if (!list_empty(&dev_priv->mm.request_list)) + schedule_delayed_work (&dev_priv->mm.retire_work, HZ); + mutex_unlock(&dev->struct_mutex); +} + +/** + * Waits for a sequence number to be signaled, and cleans up the + * request and object lists appropriately for that event. + */ +int +i915_wait_request(struct drm_device *dev, uint32_t seqno) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret = 0; + + BUG_ON(seqno == 0); + + if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { + dev_priv->mm.waiting_gem_seqno = seqno; + i915_user_irq_on(dev); + ret = wait_event_interruptible(dev_priv->irq_queue, + i915_seqno_passed(i915_get_gem_seqno(dev), + seqno) || dev_priv->mm.wedged); + i915_user_irq_off(dev); + dev_priv->mm.waiting_gem_seqno = 0; + } + if (dev_priv->mm.wedged) + ret = -EIO; + + if (ret) + DRM_ERROR("%s returns %d (awaiting %d at %d)\n", + __func__, ret, seqno, i915_get_gem_seqno(dev)); + + /* Directly dispatch request retiring. While we have the work queue + * to handle this, the waiter on a request often wants an associated + * buffer to have made it to the inactive list, and we would need + * a separate wait queue to handle that. + */ + if (ret == 0) + i915_gem_retire_requests(dev); + + return ret; +} + +static void +i915_gem_flush(struct drm_device *dev, + uint32_t invalidate_domains, + uint32_t flush_domains) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t cmd; + RING_LOCALS; + +#if WATCH_EXEC + DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, + invalidate_domains, flush_domains); +#endif + + if (flush_domains & I915_GEM_DOMAIN_CPU) + drm_agp_chipset_flush(dev); + + if ((invalidate_domains|flush_domains) & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) { + /* + * read/write caches: + * + * I915_GEM_DOMAIN_RENDER is always invalidated, but is + * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is + * also flushed at 2d versus 3d pipeline switches. + * + * read-only caches: + * + * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if + * MI_READ_FLUSH is set, and is always flushed on 965. + * + * I915_GEM_DOMAIN_COMMAND may not exist? + * + * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is + * invalidated when MI_EXE_FLUSH is set. + * + * I915_GEM_DOMAIN_VERTEX, which exists on 965, is + * invalidated with every MI_FLUSH. + * + * TLBs: + * + * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND + * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and + * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER + * are flushed at any MI_FLUSH. + */ + + cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; + if ((invalidate_domains|flush_domains) & + I915_GEM_DOMAIN_RENDER) + cmd &= ~MI_NO_WRITE_FLUSH; + if (!IS_I965G(dev)) { + /* + * On the 965, the sampler cache always gets flushed + * and this bit is reserved. + */ + if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) + cmd |= MI_READ_FLUSH; + } + if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) + cmd |= MI_EXE_FLUSH; + +#if WATCH_EXEC + DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); +#endif + BEGIN_LP_RING(2); + OUT_RING(cmd); + OUT_RING(0); /* noop */ + ADVANCE_LP_RING(); + } +} + +/** + * Ensures that all rendering to the object has completed and the object is + * safe to unbind from the GTT or access from the CPU. + */ +static int +i915_gem_object_wait_rendering(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + int ret; + + /* If there are writes queued to the buffer, flush and + * create a new seqno to wait for. + */ + if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) { + uint32_t write_domain = obj->write_domain; +#if WATCH_BUF + DRM_INFO("%s: flushing object %p from write domain %08x\n", + __func__, obj, write_domain); +#endif + i915_gem_flush(dev, 0, write_domain); + obj->write_domain = 0; + + i915_gem_object_move_to_active(obj); + obj_priv->last_rendering_seqno = i915_add_request(dev, + write_domain); + BUG_ON(obj_priv->last_rendering_seqno == 0); +#if WATCH_LRU + DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj); +#endif + } + /* If there is rendering queued on the buffer being evicted, wait for + * it. + */ + if (obj_priv->active) { +#if WATCH_BUF + DRM_INFO("%s: object %p wait for seqno %08x\n", + __func__, obj, obj_priv->last_rendering_seqno); +#endif + ret = i915_wait_request(dev, obj_priv->last_rendering_seqno); + if (ret != 0) + return ret; + } + + return 0; +} + +/** + * Unbinds an object from the GTT aperture. + */ +static int +i915_gem_object_unbind(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + int ret = 0; + +#if WATCH_BUF + DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj); + DRM_INFO("gtt_space %p\n", obj_priv->gtt_space); +#endif + if (obj_priv->gtt_space == NULL) + return 0; + + if (obj_priv->pin_count != 0) { + DRM_ERROR("Attempting to unbind pinned buffer\n"); + return -EINVAL; + } + + /* Wait for any rendering to complete + */ + ret = i915_gem_object_wait_rendering(obj); + if (ret) { + DRM_ERROR ("wait_rendering failed: %d\n", ret); + return ret; + } + + /* Move the object to the CPU domain to ensure that + * any possible CPU writes while it's not in the GTT + * are flushed when we go to remap it. This will + * also ensure that all pending GPU writes are finished + * before we unbind. + */ + ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU, + I915_GEM_DOMAIN_CPU); + if (ret) { + DRM_ERROR("set_domain failed: %d\n", ret); + return ret; + } + + if (obj_priv->agp_mem != NULL) { + drm_unbind_agp(obj_priv->agp_mem); + drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); + obj_priv->agp_mem = NULL; + } + + BUG_ON(obj_priv->active); + + i915_gem_object_free_page_list(obj); + + if (obj_priv->gtt_space) { + atomic_dec(&dev->gtt_count); + atomic_sub(obj->size, &dev->gtt_memory); + + drm_memrange_put_block(obj_priv->gtt_space); + obj_priv->gtt_space = NULL; + } + + /* Remove ourselves from the LRU list if present. */ + if (!list_empty(&obj_priv->list)) + list_del_init(&obj_priv->list); + + return 0; +} + +#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE +static void +i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end, + uint32_t bias, uint32_t mark) +{ + uint32_t *mem = kmap_atomic(page, KM_USER0); + int i; + for (i = start; i < end; i += 4) + DRM_INFO("%08x: %08x%s\n", + (int) (bias + i), mem[i / 4], + (bias + i == mark) ? " ********" : ""); + kunmap_atomic(mem, KM_USER0); + /* give syslog time to catch up */ + msleep(1); +} + +static void +i915_gem_dump_object(struct drm_gem_object *obj, int len, + const char *where, uint32_t mark) +{ + struct drm_i915_gem_object *obj_priv = obj->driver_private; + int page; + + DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset); + for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) { + int page_len, chunk, chunk_len; + + page_len = len - page * PAGE_SIZE; + if (page_len > PAGE_SIZE) + page_len = PAGE_SIZE; + + for (chunk = 0; chunk < page_len; chunk += 128) { + chunk_len = page_len - chunk; + if (chunk_len > 128) + chunk_len = 128; + i915_gem_dump_page(obj_priv->page_list[page], + chunk, chunk + chunk_len, + obj_priv->gtt_offset + + page * PAGE_SIZE, + mark); + } + } +} +#endif + +#if WATCH_LRU +static void +i915_dump_lru(struct drm_device *dev, const char *where) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv; + + DRM_INFO("active list %s {\n", where); + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, + list) + { + DRM_INFO(" %p: %08x\n", obj_priv, + obj_priv->last_rendering_seqno); + } + DRM_INFO("}\n"); + DRM_INFO("flushing list %s {\n", where); + list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, + list) + { + DRM_INFO(" %p: %08x\n", obj_priv, + obj_priv->last_rendering_seqno); + } + DRM_INFO("}\n"); + DRM_INFO("inactive %s {\n", where); + list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { + DRM_INFO(" %p: %08x\n", obj_priv, + obj_priv->last_rendering_seqno); + } + DRM_INFO("}\n"); +} +#endif + +static int +i915_gem_evict_something(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + int ret = 0; + + for (;;) { + /* If there's an inactive buffer available now, grab it + * and be done. + */ + if (!list_empty(&dev_priv->mm.inactive_list)) { + obj_priv = list_first_entry(&dev_priv->mm.inactive_list, + struct drm_i915_gem_object, + list); + obj = obj_priv->obj; + BUG_ON(obj_priv->pin_count != 0); +#if WATCH_LRU + DRM_INFO("%s: evicting %p\n", __func__, obj); +#endif + BUG_ON(obj_priv->active); + + /* Wait on the rendering and unbind the buffer. */ + ret = i915_gem_object_unbind(obj); + break; + } + + /* If we didn't get anything, but the ring is still processing + * things, wait for one of those things to finish and hopefully + * leave us a buffer to evict. + */ + if (!list_empty(&dev_priv->mm.request_list)) { + struct drm_i915_gem_request *request; + + request = list_first_entry(&dev_priv->mm.request_list, + struct drm_i915_gem_request, + list); + + ret = i915_wait_request(dev, request->seqno); + if (ret) + break; + + /* if waiting caused an object to become inactive, + * then loop around and wait for it. Otherwise, we + * assume that waiting freed and unbound something, + * so there should now be some space in the GTT + */ + if (!list_empty(&dev_priv->mm.inactive_list)) + continue; + break; + } + + /* If we didn't have anything on the request list but there + * are buffers awaiting a flush, emit one and try again. + * When we wait on it, those buffers waiting for that flush + * will get moved to inactive. + */ + if (!list_empty(&dev_priv->mm.flushing_list)) { + obj_priv = list_first_entry(&dev_priv->mm.flushing_list, + struct drm_i915_gem_object, + list); + obj = obj_priv->obj; + + i915_gem_flush(dev, + obj->write_domain, + obj->write_domain); + i915_add_request(dev, obj->write_domain); + + obj = NULL; + continue; + } + + DRM_ERROR("inactive empty %d request empty %d flushing empty %d\n", + list_empty(&dev_priv->mm.inactive_list), + list_empty(&dev_priv->mm.request_list), + list_empty(&dev_priv->mm.flushing_list)); + /* If we didn't do any of the above, there's nothing to be done + * and we just can't fit it in. + */ + return -ENOMEM; + } + return ret; +} + +static int +i915_gem_object_get_page_list(struct drm_gem_object *obj) +{ + struct drm_i915_gem_object *obj_priv = obj->driver_private; + int page_count, i; + struct address_space *mapping; + struct inode *inode; + struct page *page; + int ret; + + if (obj_priv->page_list) + return 0; + + /* Get the list of pages out of our struct file. They'll be pinned + * at this point until we release them. + */ + page_count = obj->size / PAGE_SIZE; + BUG_ON(obj_priv->page_list != NULL); + obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *), + DRM_MEM_DRIVER); + if (obj_priv->page_list == NULL) { + DRM_ERROR("Faled to allocate page list\n"); + return -ENOMEM; + } + + inode = obj->filp->f_path.dentry->d_inode; + mapping = inode->i_mapping; + for (i = 0; i < page_count; i++) { + page = find_get_page(mapping, i); + if (page == NULL || !PageUptodate(page)) { + if (page) { + page_cache_release(page); + page = NULL; + } + ret = shmem_getpage(inode, i, &page, SGP_DIRTY, NULL); + + if (ret) { + DRM_ERROR("shmem_getpage failed: %d\n", ret); + i915_gem_object_free_page_list(obj); + return ret; + } + unlock_page(page); + } + obj_priv->page_list[i] = page; + } + return 0; +} + +/** + * Finds free space in the GTT aperture and binds the object there. + */ +static int +i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) +{ + struct drm_device *dev = obj->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_memrange_node *free_space; + int page_count, ret; + + if (alignment == 0) + alignment = PAGE_SIZE; + if (alignment & (PAGE_SIZE - 1)) { + DRM_ERROR("Invalid object alignment requested %u\n", alignment); + return -EINVAL; + } + + search_free: + free_space = drm_memrange_search_free(&dev_priv->mm.gtt_space, + obj->size, + alignment, 0); + if (free_space != NULL) { + obj_priv->gtt_space = + drm_memrange_get_block(free_space, obj->size, + alignment); + if (obj_priv->gtt_space != NULL) { + obj_priv->gtt_space->private = obj; + obj_priv->gtt_offset = obj_priv->gtt_space->start; + } + } + if (obj_priv->gtt_space == NULL) { + /* If the gtt is empty and we're still having trouble + * fitting our object in, we're out of memory. + */ +#if WATCH_LRU + DRM_INFO("%s: GTT full, evicting something\n", __func__); +#endif + if (list_empty(&dev_priv->mm.inactive_list) && + list_empty(&dev_priv->mm.flushing_list) && + list_empty(&dev_priv->mm.active_list)) { + DRM_ERROR("GTT full, but LRU list empty\n"); + return -ENOMEM; + } + + ret = i915_gem_evict_something(dev); + if (ret != 0) { + DRM_ERROR("Failed to evict a buffer %d\n", ret); + return ret; + } + goto search_free; + } + +#if WATCH_BUF + DRM_INFO("Binding object of size %d at 0x%08x\n", + obj->size, obj_priv->gtt_offset); +#endif + ret = i915_gem_object_get_page_list(obj); + if (ret) { + drm_memrange_put_block(obj_priv->gtt_space); + obj_priv->gtt_space = NULL; + return ret; + } + + page_count = obj->size / PAGE_SIZE; + /* Create an AGP memory structure pointing at our pages, and bind it + * into the GTT. + */ + obj_priv->agp_mem = drm_agp_bind_pages(dev, + obj_priv->page_list, + page_count, + obj_priv->gtt_offset); + if (obj_priv->agp_mem == NULL) { + i915_gem_object_free_page_list(obj); + drm_memrange_put_block(obj_priv->gtt_space); + obj_priv->gtt_space = NULL; + return -ENOMEM; + } + atomic_inc(&dev->gtt_count); + atomic_add(obj->size, &dev->gtt_memory); + + /* Assert that the object is not currently in any GPU domain. As it + * wasn't in the GTT, there shouldn't be any way it could have been in + * a GPU cache + */ + BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); + BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); + + return 0; +} + +static void +i915_gem_clflush_object(struct drm_gem_object *obj) +{ + struct drm_i915_gem_object *obj_priv = obj->driver_private; + + /* If we don't have a page list set up, then we're not pinned + * to GPU, and we can ignore the cache flush because it'll happen + * again at bind time. + */ + if (obj_priv->page_list == NULL) + return; + + drm_ttm_cache_flush(obj_priv->page_list, obj->size / PAGE_SIZE); +} + +/* + * Set the next domain for the specified object. This + * may not actually perform the necessary flushing/invaliding though, + * as that may want to be batched with other set_domain operations + * + * This is (we hope) the only really tricky part of gem. The goal + * is fairly simple -- track which caches hold bits of the object + * and make sure they remain coherent. A few concrete examples may + * help to explain how it works. For shorthand, we use the notation + * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the + * a pair of read and write domain masks. + * + * Case 1: the batch buffer + * + * 1. Allocated + * 2. Written by CPU + * 3. Mapped to GTT + * 4. Read by GPU + * 5. Unmapped from GTT + * 6. Freed + * + * Let's take these a step at a time + * + * 1. Allocated + * Pages allocated from the kernel may still have + * cache contents, so we set them to (CPU, CPU) always. + * 2. Written by CPU (using pwrite) + * The pwrite function calls set_domain (CPU, CPU) and + * this function does nothing (as nothing changes) + * 3. Mapped by GTT + * This function asserts that the object is not + * currently in any GPU-based read or write domains + * 4. Read by GPU + * i915_gem_execbuffer calls set_domain (COMMAND, 0). + * As write_domain is zero, this function adds in the + * current read domains (CPU+COMMAND, 0). + * flush_domains is set to CPU. + * invalidate_domains is set to COMMAND + * clflush is run to get data out of the CPU caches + * then i915_dev_set_domain calls i915_gem_flush to + * emit an MI_FLUSH and drm_agp_chipset_flush + * 5. Unmapped from GTT + * i915_gem_object_unbind calls set_domain (CPU, CPU) + * flush_domains and invalidate_domains end up both zero + * so no flushing/invalidating happens + * 6. Freed + * yay, done + * + * Case 2: The shared render buffer + * + * 1. Allocated + * 2. Mapped to GTT + * 3. Read/written by GPU + * 4. set_domain to (CPU,CPU) + * 5. Read/written by CPU + * 6. Read/written by GPU + * + * 1. Allocated + * Same as last example, (CPU, CPU) + * 2. Mapped to GTT + * Nothing changes (assertions find that it is not in the GPU) + * 3. Read/written by GPU + * execbuffer calls set_domain (RENDER, RENDER) + * flush_domains gets CPU + * invalidate_domains gets GPU + * clflush (obj) + * MI_FLUSH and drm_agp_chipset_flush + * 4. set_domain (CPU, CPU) + * flush_domains gets GPU + * invalidate_domains gets CPU + * wait_rendering (obj) to make sure all drawing is complete. + * This will include an MI_FLUSH to get the data from GPU + * to memory + * clflush (obj) to invalidate the CPU cache + * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?) + * 5. Read/written by CPU + * cache lines are loaded and dirtied + * 6. Read written by GPU + * Same as last GPU access + * + * Case 3: The constant buffer + * + * 1. Allocated + * 2. Written by CPU + * 3. Read by GPU + * 4. Updated (written) by CPU again + * 5. Read by GPU + * + * 1. Allocated + * (CPU, CPU) + * 2. Written by CPU + * (CPU, CPU) + * 3. Read by GPU + * (CPU+RENDER, 0) + * flush_domains = CPU + * invalidate_domains = RENDER + * clflush (obj) + * MI_FLUSH + * drm_agp_chipset_flush + * 4. Updated (written) by CPU again + * (CPU, CPU) + * flush_domains = 0 (no previous write domain) + * invalidate_domains = 0 (no new read domains) + * 5. Read by GPU + * (CPU+RENDER, 0) + * flush_domains = CPU + * invalidate_domains = RENDER + * clflush (obj) + * MI_FLUSH + * drm_agp_chipset_flush + */ +static int +i915_gem_object_set_domain(struct drm_gem_object *obj, + uint32_t read_domains, + uint32_t write_domain) +{ + struct drm_device *dev = obj->dev; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + uint32_t invalidate_domains = 0; + uint32_t flush_domains = 0; + int ret; + +#if WATCH_BUF + DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", + __func__, obj, + obj->read_domains, read_domains, + obj->write_domain, write_domain); +#endif + /* + * If the object isn't moving to a new write domain, + * let the object stay in multiple read domains + */ + if (write_domain == 0) + read_domains |= obj->read_domains; + else + obj_priv->dirty = 1; + + /* + * Flush the current write domain if + * the new read domains don't match. Invalidate + * any read domains which differ from the old + * write domain + */ + if (obj->write_domain && obj->write_domain != read_domains) { + flush_domains |= obj->write_domain; + invalidate_domains |= read_domains & ~obj->write_domain; + } + /* + * Invalidate any read caches which may have + * stale data. That is, any new read domains. + */ + invalidate_domains |= read_domains & ~obj->read_domains; + if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { +#if WATCH_BUF + DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", + __func__, flush_domains, invalidate_domains); +#endif + /* + * If we're invaliding the CPU cache and flushing a GPU cache, + * then pause for rendering so that the GPU caches will be + * flushed before the cpu cache is invalidated + */ + if ((invalidate_domains & I915_GEM_DOMAIN_CPU) && + (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))) { + ret = i915_gem_object_wait_rendering(obj); + if (ret) + return ret; + } + i915_gem_clflush_object(obj); + } + + if ((write_domain | flush_domains) != 0) + obj->write_domain = write_domain; + obj->read_domains = read_domains; + dev->invalidate_domains |= invalidate_domains; + dev->flush_domains |= flush_domains; +#if WATCH_BUF + DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n", + __func__, + obj->read_domains, obj->write_domain, + dev->invalidate_domains, dev->flush_domains); +#endif + return 0; +} + +/** + * Once all of the objects have been set in the proper domain, + * perform the necessary flush and invalidate operations. + * + * Returns the write domains flushed, for use in flush tracking. + */ +static uint32_t +i915_gem_dev_set_domain(struct drm_device *dev) +{ + uint32_t flush_domains = dev->flush_domains; + + /* + * Now that all the buffers are synced to the proper domains, + * flush and invalidate the collected domains + */ + if (dev->invalidate_domains | dev->flush_domains) { +#if WATCH_EXEC + DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", + __func__, + dev->invalidate_domains, + dev->flush_domains); +#endif + i915_gem_flush(dev, + dev->invalidate_domains, + dev->flush_domains); + dev->invalidate_domains = 0; + dev->flush_domains = 0; + } + + return flush_domains; +} + +#if WATCH_COHERENCY +static void +i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) +{ + struct drm_device *dev = obj->dev; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + int page; + uint32_t *gtt_mapping; + uint32_t *backing_map = NULL; + int bad_count = 0; + + DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n", + __func__, obj, obj_priv->gtt_offset, handle, + obj->size / 1024); + + gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset, + obj->size); + if (gtt_mapping == NULL) { + DRM_ERROR("failed to map GTT space\n"); + return; + } + + for (page = 0; page < obj->size / PAGE_SIZE; page++) { + int i; + + backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0); + + if (backing_map == NULL) { + DRM_ERROR("failed to map backing page\n"); + goto out; + } + + for (i = 0; i < PAGE_SIZE / 4; i++) { + uint32_t cpuval = backing_map[i]; + uint32_t gttval = readl(gtt_mapping + + page * 1024 + i); + + if (cpuval != gttval) { + DRM_INFO("incoherent CPU vs GPU at 0x%08x: " + "0x%08x vs 0x%08x\n", + (int)(obj_priv->gtt_offset + + page * PAGE_SIZE + i * 4), + cpuval, gttval); + if (bad_count++ >= 8) { + DRM_INFO("...\n"); + goto out; + } + } + } + kunmap_atomic(backing_map, KM_USER0); + backing_map = NULL; + } + + out: + if (backing_map != NULL) + kunmap_atomic(backing_map, KM_USER0); + iounmap(gtt_mapping); + + /* give syslog time to catch up */ + msleep(1); + + /* Directly flush the object, since we just loaded values with the CPU + * from thebacking pages and we don't want to disturb the cache + * management that we're trying to observe. + */ + + i915_gem_clflush_object(obj); +} +#endif + +/** + * Pin an object to the GTT and evaluate the relocations landing in it. + */ +static int +i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, + struct drm_file *file_priv, + struct drm_i915_gem_exec_object *entry) +{ + struct drm_device *dev = obj->dev; + struct drm_i915_gem_relocation_entry reloc; + struct drm_i915_gem_relocation_entry __user *relocs; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + int i, ret; + uint32_t last_reloc_offset = -1; + void *reloc_page = NULL; + + /* Choose the GTT offset for our buffer and put it there. */ + ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); + if (ret) + return ret; + + entry->offset = obj_priv->gtt_offset; + + relocs = (struct drm_i915_gem_relocation_entry __user *) + (uintptr_t) entry->relocs_ptr; + /* Apply the relocations, using the GTT aperture to avoid cache + * flushing requirements. + */ + for (i = 0; i < entry->relocation_count; i++) { + struct drm_gem_object *target_obj; + struct drm_i915_gem_object *target_obj_priv; + uint32_t reloc_val, reloc_offset, *reloc_entry; + int ret; + + ret = copy_from_user(&reloc, relocs + i, sizeof(reloc)); + if (ret != 0) { + i915_gem_object_unpin(obj); + return ret; + } + + target_obj = drm_gem_object_lookup(obj->dev, file_priv, + reloc.target_handle); + if (target_obj == NULL) { + i915_gem_object_unpin(obj); + return -EINVAL; + } + target_obj_priv = target_obj->driver_private; + + /* The target buffer should have appeared before us in the + * exec_object list, so it should have a GTT space bound by now. + */ + if (target_obj_priv->gtt_space == NULL) { + DRM_ERROR("No GTT space found for object %d\n", + reloc.target_handle); + drm_gem_object_unreference(target_obj); + i915_gem_object_unpin(obj); + return -EINVAL; + } + + if (reloc.offset > obj->size - 4) { + DRM_ERROR("Relocation beyond object bounds: " + "obj %p target %d offset %d size %d.\n", + obj, reloc.target_handle, + (int) reloc.offset, (int) obj->size); + drm_gem_object_unreference(target_obj); + i915_gem_object_unpin(obj); + return -EINVAL; + } + if (reloc.offset & 3) { + DRM_ERROR("Relocation not 4-byte aligned: " + "obj %p target %d offset %d.\n", + obj, reloc.target_handle, + (int) reloc.offset); + drm_gem_object_unreference(target_obj); + i915_gem_object_unpin(obj); + return -EINVAL; + } + + if (reloc.write_domain && target_obj->pending_write_domain && + reloc.write_domain != target_obj->pending_write_domain) { + DRM_ERROR("Write domain conflict: " + "obj %p target %d offset %d " + "new %08x old %08x\n", + obj, reloc.target_handle, + (int) reloc.offset, + reloc.write_domain, + target_obj->pending_write_domain); + drm_gem_object_unreference(target_obj); + i915_gem_object_unpin(obj); + return -EINVAL; + } + +#if WATCH_RELOC + DRM_INFO("%s: obj %p offset %08x target %d " + "read %08x write %08x gtt %08x " + "presumed %08x delta %08x\n", + __func__, + obj, + (int) reloc.offset, + (int) reloc.target_handle, + (int) reloc.read_domains, + (int) reloc.write_domain, + (int) target_obj_priv->gtt_offset, + (int) reloc.presumed_offset, + reloc.delta); +#endif + + target_obj->pending_read_domains |= reloc.read_domains; + target_obj->pending_write_domain |= reloc.write_domain; + + /* If the relocation already has the right value in it, no + * more work needs to be done. + */ + if (target_obj_priv->gtt_offset == reloc.presumed_offset) { + drm_gem_object_unreference(target_obj); + continue; + } + + /* Now that we're going to actually write some data in, + * make sure that any rendering using this buffer's contents + * is completed. + */ + i915_gem_object_wait_rendering(obj); + + /* As we're writing through the gtt, flush + * any CPU writes before we write the relocations + */ + if (obj->write_domain & I915_GEM_DOMAIN_CPU) { + i915_gem_clflush_object(obj); + drm_agp_chipset_flush(dev); + obj->write_domain = 0; + } + + /* Map the page containing the relocation we're going to + * perform. + */ + reloc_offset = obj_priv->gtt_offset + reloc.offset; + if (reloc_page == NULL || + (last_reloc_offset & ~(PAGE_SIZE - 1)) != + (reloc_offset & ~(PAGE_SIZE - 1))) { + if (reloc_page != NULL) + iounmap(reloc_page); + + reloc_page = ioremap(dev->agp->base + + (reloc_offset & ~(PAGE_SIZE - 1)), + PAGE_SIZE); + last_reloc_offset = reloc_offset; + if (reloc_page == NULL) { + drm_gem_object_unreference(target_obj); + i915_gem_object_unpin(obj); + return -ENOMEM; + } + } + + reloc_entry = (uint32_t *)((char *)reloc_page + + (reloc_offset & (PAGE_SIZE - 1))); + reloc_val = target_obj_priv->gtt_offset + reloc.delta; + +#if WATCH_BUF + DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n", + obj, (unsigned int) reloc.offset, + readl(reloc_entry), reloc_val); +#endif + writel(reloc_val, reloc_entry); + + /* Write the updated presumed offset for this entry back out + * to the user. + */ + reloc.presumed_offset = target_obj_priv->gtt_offset; + ret = copy_to_user(relocs + i, &reloc, sizeof(reloc)); + if (ret != 0) { + drm_gem_object_unreference(target_obj); + i915_gem_object_unpin(obj); + return ret; + } + + drm_gem_object_unreference(target_obj); + } + + if (reloc_page != NULL) + iounmap(reloc_page); + +#if WATCH_BUF + if (0) + i915_gem_dump_object(obj, 128, __func__, ~0); +#endif + return 0; +} + +/** Dispatch a batchbuffer to the ring + */ +static int +i915_dispatch_gem_execbuffer(struct drm_device *dev, + struct drm_i915_gem_execbuffer *exec, + uint64_t exec_offset) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *) + (uintptr_t) exec->cliprects_ptr; + int nbox = exec->num_cliprects; + int i = 0, count; + uint32_t exec_start, exec_len; + RING_LOCALS; + + exec_start = (uint32_t) exec_offset + exec->batch_start_offset; + exec_len = (uint32_t) exec->batch_len; + + if ((exec_start | exec_len) & 0x7) { + DRM_ERROR("alignment\n"); + return -EINVAL; + } + + if (!exec_start) + return -EINVAL; + + count = nbox ? nbox : 1; + + for (i = 0; i < count; i++) { + if (i < nbox) { + int ret = i915_emit_box(dev, boxes, i, + exec->DR1, exec->DR4); + if (ret) + return ret; + } + + if (IS_I830(dev) || IS_845G(dev)) { + BEGIN_LP_RING(4); + OUT_RING(MI_BATCH_BUFFER); + OUT_RING(exec_start | MI_BATCH_NON_SECURE); + OUT_RING(exec_start + exec_len - 4); + OUT_RING(0); + ADVANCE_LP_RING(); + } else { + BEGIN_LP_RING(2); + if (IS_I965G(dev)) { + OUT_RING(MI_BATCH_BUFFER_START | + (2 << 6) | + MI_BATCH_NON_SECURE_I965); + OUT_RING(exec_start); + } else { + OUT_RING(MI_BATCH_BUFFER_START | + (2 << 6)); + OUT_RING(exec_start | MI_BATCH_NON_SECURE); + } + ADVANCE_LP_RING(); + } + } + + /* XXX breadcrumb */ + return 0; +} + +/* Throttle our rendering by waiting until the ring has completed our requests + * emitted over 20 msec ago. + * + * This should get us reasonable parallelism between CPU and GPU but also + * relatively low latency when blocking on a particular request to finish. + */ +static int +i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv) +{ + struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; + int ret = 0; + uint32_t seqno; + + mutex_lock(&dev->struct_mutex); + seqno = i915_file_priv->mm.last_gem_throttle_seqno; + i915_file_priv->mm.last_gem_throttle_seqno = i915_file_priv->mm.last_gem_seqno; + if (seqno) + ret = i915_wait_request(dev, seqno); + mutex_unlock(&dev->struct_mutex); + return ret; +} + +int +i915_gem_execbuffer(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; + struct drm_i915_gem_execbuffer *args = data; + struct drm_i915_gem_exec_object *exec_list = NULL; + struct drm_gem_object **object_list = NULL; + struct drm_gem_object *batch_obj; + int ret, i, pinned = 0; + uint64_t exec_offset; + uint32_t seqno, flush_domains; + +#if WATCH_EXEC + DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", + (int) args->buffers_ptr, args->buffer_count, args->batch_len); +#endif + + /* Copy in the exec list from userland */ + exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count, + DRM_MEM_DRIVER); + object_list = drm_calloc(sizeof(*object_list), args->buffer_count, + DRM_MEM_DRIVER); + if (exec_list == NULL || object_list == NULL) { + DRM_ERROR("Failed to allocate exec or object list " + "for %d buffers\n", + args->buffer_count); + ret = -ENOMEM; + goto pre_mutex_err; + } + ret = copy_from_user(exec_list, + (struct drm_i915_relocation_entry __user *) + (uintptr_t) args->buffers_ptr, + sizeof(*exec_list) * args->buffer_count); + if (ret != 0) { + DRM_ERROR("copy %d exec entries failed %d\n", + args->buffer_count, ret); + goto pre_mutex_err; + } + + mutex_lock(&dev->struct_mutex); + + i915_verify_inactive(dev, __FILE__, __LINE__); + + if (dev_priv->mm.wedged) { + DRM_ERROR("Execbuf while wedged\n"); + mutex_unlock(&dev->struct_mutex); + return -EIO; + } + + if (dev_priv->mm.suspended) { + DRM_ERROR("Execbuf while VT-switched.\n"); + mutex_unlock(&dev->struct_mutex); + return -EBUSY; + } + + /* Zero the gloabl flush/invalidate flags. These + * will be modified as each object is bound to the + * gtt + */ + dev->invalidate_domains = 0; + dev->flush_domains = 0; + + /* Look up object handles and perform the relocations */ + for (i = 0; i < args->buffer_count; i++) { + object_list[i] = drm_gem_object_lookup(dev, file_priv, + exec_list[i].handle); + if (object_list[i] == NULL) { + DRM_ERROR("Invalid object handle %d at index %d\n", + exec_list[i].handle, i); + ret = -EINVAL; + goto err; + } + + object_list[i]->pending_read_domains = 0; + object_list[i]->pending_write_domain = 0; + ret = i915_gem_object_pin_and_relocate(object_list[i], + file_priv, + &exec_list[i]); + if (ret) { + DRM_ERROR("object bind and relocate failed %d\n", ret); + goto err; + } + pinned = i + 1; + } + + /* Set the pending read domains for the batch buffer to COMMAND */ + batch_obj = object_list[args->buffer_count-1]; + batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND; + batch_obj->pending_write_domain = 0; + + i915_verify_inactive(dev, __FILE__, __LINE__); + + for (i = 0; i < args->buffer_count; i++) { + struct drm_gem_object *obj = object_list[i]; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + + if (obj_priv->gtt_space == NULL) { + /* We evicted the buffer in the process of validating + * our set of buffers in. We could try to recover by + * kicking them everything out and trying again from + * the start. + */ + ret = -ENOMEM; + goto err; + } + + /* make sure all previous memory operations have passed */ + ret = i915_gem_object_set_domain(obj, + obj->pending_read_domains, + obj->pending_write_domain); + if (ret) + goto err; + } + + i915_verify_inactive(dev, __FILE__, __LINE__); + + /* Flush/invalidate caches and chipset buffer */ + flush_domains = i915_gem_dev_set_domain(dev); + + i915_verify_inactive(dev, __FILE__, __LINE__); + +#if WATCH_COHERENCY + for (i = 0; i < args->buffer_count; i++) { + i915_gem_object_check_coherency(object_list[i], + exec_list[i].handle); + } +#endif + + exec_offset = exec_list[args->buffer_count - 1].offset; + +#if WATCH_EXEC + i915_gem_dump_object(object_list[args->buffer_count - 1], + args->batch_len, + __func__, + ~0); +#endif + + /* Exec the batchbuffer */ + ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset); + if (ret) { + DRM_ERROR("dispatch failed %d\n", ret); + goto err; + } + + /* + * Ensure that the commands in the batch buffer are + * finished before the interrupt fires + */ + flush_domains |= i915_retire_commands(dev); + + i915_verify_inactive(dev, __FILE__, __LINE__); + + /* + * Get a seqno representing the execution of the current buffer, + * which we can wait on. We would like to mitigate these interrupts, + * likely by only creating seqnos occasionally (so that we have + * *some* interrupts representing completion of buffers that we can + * wait on when trying to clear up gtt space). + */ + seqno = i915_add_request(dev, flush_domains); + BUG_ON(seqno == 0); + i915_file_priv->mm.last_gem_seqno = seqno; + for (i = 0; i < args->buffer_count; i++) { + struct drm_gem_object *obj = object_list[i]; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + + i915_gem_object_move_to_active(obj); + obj_priv->last_rendering_seqno = seqno; +#if WATCH_LRU + DRM_INFO("%s: move to exec list %p\n", __func__, obj); +#endif + } +#if WATCH_LRU + i915_dump_lru(dev, __func__); +#endif + + i915_verify_inactive(dev, __FILE__, __LINE__); + + /* Copy the new buffer offsets back to the user's exec list. */ + ret = copy_to_user((struct drm_i915_relocation_entry __user *) + (uintptr_t) args->buffers_ptr, + exec_list, + sizeof(*exec_list) * args->buffer_count); + if (ret) + DRM_ERROR("failed to copy %d exec entries " + "back to user (%d)\n", + args->buffer_count, ret); +err: + if (object_list != NULL) { + for (i = 0; i < pinned; i++) + i915_gem_object_unpin(object_list[i]); + + for (i = 0; i < args->buffer_count; i++) + drm_gem_object_unreference(object_list[i]); + } + mutex_unlock(&dev->struct_mutex); + +pre_mutex_err: + drm_free(object_list, sizeof(*object_list) * args->buffer_count, + DRM_MEM_DRIVER); + drm_free(exec_list, sizeof(*exec_list) * args->buffer_count, + DRM_MEM_DRIVER); + + return ret; +} + +int +i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) +{ + struct drm_device *dev = obj->dev; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + int ret; + + i915_verify_inactive(dev, __FILE__, __LINE__); + if (obj_priv->gtt_space == NULL) { + ret = i915_gem_object_bind_to_gtt(obj, alignment); + if (ret != 0) { + DRM_ERROR("Failure to bind: %d", ret); + return ret; + } + } + obj_priv->pin_count++; + + /* If the object is not active and not pending a flush, + * remove it from the inactive list + */ + if (obj_priv->pin_count == 1) { + atomic_inc(&dev->pin_count); + atomic_add(obj->size, &dev->pin_memory); + if (!obj_priv->active && (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) == 0 && + !list_empty(&obj_priv->list)) + list_del_init(&obj_priv->list); + } + i915_verify_inactive(dev, __FILE__, __LINE__); + + return 0; +} + +void +i915_gem_object_unpin(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + + i915_verify_inactive(dev, __FILE__, __LINE__); + obj_priv->pin_count--; + BUG_ON(obj_priv->pin_count < 0); + BUG_ON(obj_priv->gtt_space == NULL); + + /* If the object is no longer pinned, and is + * neither active nor being flushed, then stick it on + * the inactive list + */ + if (obj_priv->pin_count == 0) { + if (!obj_priv->active && (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) == 0) + list_move_tail(&obj_priv->list, + &dev_priv->mm.inactive_list); + atomic_dec(&dev->pin_count); + atomic_sub(obj->size, &dev->pin_memory); + } + i915_verify_inactive(dev, __FILE__, __LINE__); +} + +int +i915_gem_pin_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_i915_gem_pin *args = data; + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + int ret; + + mutex_lock(&dev->struct_mutex); + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) { + DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n", + args->handle); + mutex_unlock(&dev->struct_mutex); + return -EINVAL; + } + obj_priv = obj->driver_private; + + ret = i915_gem_object_pin(obj, args->alignment); + if (ret != 0) { + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + return ret; + } + + /** XXX - flush the CPU caches for pinned objects + * as the X server doesn't manage domains yet + */ + if (obj->write_domain & I915_GEM_DOMAIN_CPU) { + i915_gem_clflush_object(obj); + drm_agp_chipset_flush(dev); + obj->write_domain = 0; + } + args->offset = obj_priv->gtt_offset; + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + + return 0; +} + +int +i915_gem_unpin_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_i915_gem_pin *args = data; + struct drm_gem_object *obj; + + mutex_lock(&dev->struct_mutex); + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) { + DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n", + args->handle); + mutex_unlock(&dev->struct_mutex); + return -EINVAL; + } + + i915_gem_object_unpin(obj); + + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + return 0; +} + +int +i915_gem_busy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_i915_gem_busy *args = data; + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + + mutex_lock(&dev->struct_mutex); + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) { + DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n", + args->handle); + mutex_unlock(&dev->struct_mutex); + return -EINVAL; + } + + obj_priv = obj->driver_private; + args->busy = obj_priv->active; + + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + return 0; +} + +int +i915_gem_throttle_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + return i915_gem_ring_throttle(dev, file_priv); +} + +int i915_gem_init_object(struct drm_gem_object *obj) +{ + struct drm_i915_gem_object *obj_priv; + + obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER); + if (obj_priv == NULL) + return -ENOMEM; + + /* + * We've just allocated pages from the kernel, + * so they've just been written by the CPU with + * zeros. They'll need to be clflushed before we + * use them with the GPU. + */ + obj->write_domain = I915_GEM_DOMAIN_CPU; + obj->read_domains = I915_GEM_DOMAIN_CPU; + + obj->driver_private = obj_priv; + obj_priv->obj = obj; + INIT_LIST_HEAD(&obj_priv->list); + return 0; +} + +void i915_gem_free_object(struct drm_gem_object *obj) +{ + struct drm_i915_gem_object *obj_priv = obj->driver_private; + + while (obj_priv->pin_count > 0) + i915_gem_object_unpin(obj); + + i915_gem_object_unbind(obj); + + drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); +} + +int +i915_gem_set_domain(struct drm_gem_object *obj, + struct drm_file *file_priv, + uint32_t read_domains, + uint32_t write_domain) +{ + struct drm_device *dev = obj->dev; + int ret; + uint32_t flush_domains; + + BUG_ON(!mutex_is_locked(&dev->struct_mutex)); + + ret = i915_gem_object_set_domain(obj, read_domains, write_domain); + if (ret) + return ret; + flush_domains = i915_gem_dev_set_domain(obj->dev); + + if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) + (void) i915_add_request(dev, flush_domains); + + return 0; +} + +/** Unbinds all objects that are on the given buffer list. */ +static int +i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) +{ + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + int ret; + + while (!list_empty(head)) { + obj_priv = list_first_entry(head, + struct drm_i915_gem_object, + list); + obj = obj_priv->obj; + + if (obj_priv->pin_count != 0) { + DRM_ERROR("Pinned object in unbind list\n"); + mutex_unlock(&dev->struct_mutex); + return -EINVAL; + } + + ret = i915_gem_object_unbind(obj); + if (ret != 0) { + DRM_ERROR("Error unbinding object in LeaveVT: %d\n", + ret); + mutex_unlock(&dev->struct_mutex); + return ret; + } + } + + + return 0; +} + +static int +i915_gem_idle(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t seqno, cur_seqno, last_seqno; + int stuck; + + if (dev_priv->mm.suspended) + return 0; + + /* Hack! Don't let anybody do execbuf while we don't control the chip. + * We need to replace this with a semaphore, or something. + */ + dev_priv->mm.suspended = 1; + + i915_kernel_lost_context(dev); + + /* Flush the GPU along with all non-CPU write domains + */ + i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT), + ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); + seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); + + if (seqno == 0) { + mutex_unlock(&dev->struct_mutex); + return -ENOMEM; + } + + dev_priv->mm.waiting_gem_seqno = seqno; + last_seqno = 0; + stuck = 0; + for (;;) { + cur_seqno = i915_get_gem_seqno(dev); + if (i915_seqno_passed(cur_seqno, seqno)) + break; + if (last_seqno == cur_seqno) { + if (stuck++ > 100) { + DRM_ERROR("hardware wedged\n"); + dev_priv->mm.wedged = 1; + DRM_WAKEUP(&dev_priv->irq_queue); + break; + } + } + msleep(10); + last_seqno = cur_seqno; + } + dev_priv->mm.waiting_gem_seqno = 0; + + i915_gem_retire_requests(dev); + + /* Active and flushing should now be empty as we've + * waited for a sequence higher than any pending execbuffer + */ + BUG_ON(!list_empty(&dev_priv->mm.active_list)); + BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); + + /* Request should now be empty as we've also waited + * for the last request in the list + */ + BUG_ON(!list_empty(&dev_priv->mm.request_list)); + + /* Move all buffers out of the GTT. */ + i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list); + + BUG_ON(!list_empty(&dev_priv->mm.active_list)); + BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); + BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); + BUG_ON(!list_empty(&dev_priv->mm.request_list)); + return 0; +} + +int +i915_gem_init_ringbuffer(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + int ret; + + obj = drm_gem_object_alloc(dev, 128 * 1024); + if (obj == NULL) { + DRM_ERROR("Failed to allocate ringbuffer\n"); + return -ENOMEM; + } + obj_priv = obj->driver_private; + + ret = i915_gem_object_pin(obj, 4096); + if (ret != 0) { + drm_gem_object_unreference(obj); + return ret; + } + + /* Set up the kernel mapping for the ring. */ + dev_priv->ring.Size = obj->size; + dev_priv->ring.tail_mask = obj->size - 1; + + dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset; + dev_priv->ring.map.size = obj->size; + dev_priv->ring.map.type = 0; + dev_priv->ring.map.flags = 0; + dev_priv->ring.map.mtrr = 0; + + drm_core_ioremap(&dev_priv->ring.map, dev); + if (dev_priv->ring.map.handle == NULL) { + DRM_ERROR("Failed to map ringbuffer.\n"); + memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); + drm_gem_object_unreference(obj); + return -EINVAL; + } + dev_priv->ring.ring_obj = obj; + dev_priv->ring.virtual_start = dev_priv->ring.map.handle; + + /* Stop the ring if it's running. */ + I915_WRITE(PRB0_CTL, 0); + I915_WRITE(PRB0_HEAD, 0); + I915_WRITE(PRB0_TAIL, 0); + I915_WRITE(PRB0_START, 0); + + /* Initialize the ring. */ + I915_WRITE(PRB0_START, obj_priv->gtt_offset); + I915_WRITE(PRB0_CTL, (((obj->size - 4096) & RING_NR_PAGES) | + RING_NO_REPORT | + RING_VALID)); + + /* Update our cache of the ring state */ + i915_kernel_lost_context(dev); + + return 0; +} + +void +i915_gem_cleanup_ringbuffer(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->ring.ring_obj == NULL) + return; + + drm_core_ioremapfree(&dev_priv->ring.map, dev); + + i915_gem_object_unpin(dev_priv->ring.ring_obj); + drm_gem_object_unreference(dev_priv->ring.ring_obj); + + memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); +} + +int +i915_gem_entervt_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + if (dev_priv->mm.wedged) { + DRM_ERROR("Renabling wedged hardware, good luck\n"); + dev_priv->mm.wedged = 0; + } + + ret = i915_gem_init_ringbuffer(dev); + if (ret != 0) + return ret; + + mutex_lock(&dev->struct_mutex); + BUG_ON(!list_empty(&dev_priv->mm.active_list)); + BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); + BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); + BUG_ON(!list_empty(&dev_priv->mm.request_list)); + dev_priv->mm.suspended = 0; + mutex_unlock(&dev->struct_mutex); + return 0; +} + +int +i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + int ret; + + mutex_lock(&dev->struct_mutex); + ret = i915_gem_idle(dev); + if (ret == 0) + i915_gem_cleanup_ringbuffer(dev); + mutex_unlock(&dev->struct_mutex); + + return 0; +} + +static int i915_gem_active_info(char *buf, char **start, off_t offset, + int request, int *eof, void *data) +{ + struct drm_minor *minor = (struct drm_minor *) data; + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv; + int len = 0; + + if (offset > DRM_PROC_LIMIT) { + *eof = 1; + return 0; + } + + *start = &buf[offset]; + *eof = 0; + DRM_PROC_PRINT("Active:\n"); + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, + list) + { + struct drm_gem_object *obj = obj_priv->obj; + if (obj->name) { + DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n", + obj, obj->name, + obj->read_domains, obj->write_domain, + obj_priv->last_rendering_seqno); + } else { + DRM_PROC_PRINT(" %p: %08x %08x %d\n", + obj, + obj->read_domains, obj->write_domain, + obj_priv->last_rendering_seqno); + } + } + if (len > request + offset) + return request; + *eof = 1; + return len - offset; +} + +static int i915_gem_flushing_info(char *buf, char **start, off_t offset, + int request, int *eof, void *data) +{ + struct drm_minor *minor = (struct drm_minor *) data; + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv; + int len = 0; + + if (offset > DRM_PROC_LIMIT) { + *eof = 1; + return 0; + } + + *start = &buf[offset]; + *eof = 0; + DRM_PROC_PRINT("Flushing:\n"); + list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, + list) + { + struct drm_gem_object *obj = obj_priv->obj; + if (obj->name) { + DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n", + obj, obj->name, + obj->read_domains, obj->write_domain, + obj_priv->last_rendering_seqno); + } else { + DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj, + obj->read_domains, obj->write_domain, + obj_priv->last_rendering_seqno); + } + } + if (len > request + offset) + return request; + *eof = 1; + return len - offset; +} + +static int i915_gem_inactive_info(char *buf, char **start, off_t offset, + int request, int *eof, void *data) +{ + struct drm_minor *minor = (struct drm_minor *) data; + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv; + int len = 0; + + if (offset > DRM_PROC_LIMIT) { + *eof = 1; + return 0; + } + + *start = &buf[offset]; + *eof = 0; + DRM_PROC_PRINT("Inactive:\n"); + list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, + list) + { + struct drm_gem_object *obj = obj_priv->obj; + if (obj->name) { + DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n", + obj, obj->name, + obj->read_domains, obj->write_domain, + obj_priv->last_rendering_seqno); + } else { + DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj, + obj->read_domains, obj->write_domain, + obj_priv->last_rendering_seqno); + } + } + if (len > request + offset) + return request; + *eof = 1; + return len - offset; +} + +static int i915_gem_request_info(char *buf, char **start, off_t offset, + int request, int *eof, void *data) +{ + struct drm_minor *minor = (struct drm_minor *) data; + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_request *gem_request; + int len = 0; + + if (offset > DRM_PROC_LIMIT) { + *eof = 1; + return 0; + } + + *start = &buf[offset]; + *eof = 0; + DRM_PROC_PRINT("Request:\n"); + list_for_each_entry(gem_request, &dev_priv->mm.request_list, + list) + { + DRM_PROC_PRINT (" %d @ %d %08x\n", + gem_request->seqno, + (int) (jiffies - gem_request->emitted_jiffies), + gem_request->flush_domains); + } + if (len > request + offset) + return request; + *eof = 1; + return len - offset; +} + +static int i915_gem_seqno_info(char *buf, char **start, off_t offset, + int request, int *eof, void *data) +{ + struct drm_minor *minor = (struct drm_minor *) data; + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int len = 0; + + if (offset > DRM_PROC_LIMIT) { + *eof = 1; + return 0; + } + + *start = &buf[offset]; + *eof = 0; + DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev)); + DRM_PROC_PRINT("Waiter sequence: %d\n", dev_priv->mm.waiting_gem_seqno); + DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno); + if (len > request + offset) + return request; + *eof = 1; + return len - offset; +} + + +static int i915_interrupt_info(char *buf, char **start, off_t offset, + int request, int *eof, void *data) +{ + struct drm_minor *minor = (struct drm_minor *) data; + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int len = 0; + + if (offset > DRM_PROC_LIMIT) { + *eof = 1; + return 0; + } + + *start = &buf[offset]; + *eof = 0; + DRM_PROC_PRINT("Interrupt enable: %08x\n", + I915_READ(IER)); + DRM_PROC_PRINT("Interrupt identity: %08x\n", + I915_READ(IIR)); + DRM_PROC_PRINT("Interrupt mask: %08x\n", + I915_READ(IMR)); + DRM_PROC_PRINT("Pipe A stat: %08x\n", + I915_READ(PIPEASTAT)); + DRM_PROC_PRINT("Pipe B stat: %08x\n", + I915_READ(PIPEBSTAT)); + DRM_PROC_PRINT("Interrupts received: %d\n", + atomic_read(&dev_priv->irq_received)); + DRM_PROC_PRINT("Current sequence: %d\n", + i915_get_gem_seqno(dev)); + DRM_PROC_PRINT("Waiter sequence: %d\n", + dev_priv->mm.waiting_gem_seqno); + DRM_PROC_PRINT("IRQ sequence: %d\n", + dev_priv->mm.irq_gem_seqno); + if (len > request + offset) + return request; + *eof = 1; + return len - offset; +} + +static struct drm_proc_list { + const char *name; /**< file name */ + int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/ +} i915_gem_proc_list[] = { + {"i915_gem_active", i915_gem_active_info}, + {"i915_gem_flushing", i915_gem_flushing_info}, + {"i915_gem_inactive", i915_gem_inactive_info}, + {"i915_gem_request", i915_gem_request_info}, + {"i915_gem_seqno", i915_gem_seqno_info}, + {"i915_gem_interrupt", i915_interrupt_info}, +}; + +#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list) + +int i915_gem_proc_init(struct drm_minor *minor) +{ + struct proc_dir_entry *ent; + int i, j; + + for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) { + ent = create_proc_entry(i915_gem_proc_list[i].name, + S_IFREG | S_IRUGO, minor->dev_root); + if (!ent) { + DRM_ERROR("Cannot create /proc/dri/.../%s\n", + i915_gem_proc_list[i].name); + for (j = 0; j < i; j++) + remove_proc_entry(i915_gem_proc_list[i].name, + minor->dev_root); + return -1; + } + ent->read_proc = i915_gem_proc_list[i].f; + ent->data = minor; + } + return 0; +} + +void i915_gem_proc_cleanup(struct drm_minor *minor) +{ + int i; + + if (!minor->dev_root) + return; + + for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) + remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root); +} + +void +i915_gem_lastclose(struct drm_device *dev) +{ + int ret; + struct drm_i915_private *dev_priv = dev->dev_private; + + mutex_lock(&dev->struct_mutex); + + if (dev_priv->ring.ring_obj != NULL) { + ret = i915_gem_idle(dev); + if (ret) + DRM_ERROR("failed to idle hardware: %d\n", ret); + + i915_gem_cleanup_ringbuffer(dev); + } + + mutex_unlock(&dev->struct_mutex); +} diff --git a/linux-core/intel_crt.c b/linux-core/intel_crt.c index 8e1b833a..2505d983 100644 --- a/linux-core/intel_crt.c +++ b/linux-core/intel_crt.c @@ -136,8 +136,8 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, * * Not for i915G/i915GM * - * \return TRUE if CRT is connected. - * \return FALSE if CRT is disconnected. + * \return true if CRT is connected. + * \return false if CRT is disconnected. */ static bool intel_crt_detect_hotplug(struct drm_connector *connector) { diff --git a/linux-core/intel_display.c b/linux-core/intel_display.c index 29aae169..a761b61b 100644 --- a/linux-core/intel_display.c +++ b/linux-core/intel_display.c @@ -369,6 +369,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y) struct drm_i915_master_private *master_priv; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_framebuffer *intel_fb; + struct drm_i915_gem_object *obj_priv; int pipe = intel_crtc->pipe; unsigned long Start, Offset; int dspbase = (pipe == 0 ? DSPAADDR : DSPBADDR); @@ -385,7 +386,9 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y) intel_fb = to_intel_framebuffer(crtc->fb); - Start = intel_fb->bo->offset; + obj_priv = intel_fb->obj->driver_private; + + Start = obj_priv->gtt_offset; Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); I915_WRITE(dspstride, crtc->fb->pitch); @@ -507,7 +510,7 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) intel_crtc_load_lut(crtc); /* Give the overlay scaler a chance to enable if it's on this pipe */ - //intel_crtc_dpms_video(crtc, TRUE); TODO + //intel_crtc_dpms_video(crtc, true); TODO break; case DRM_MODE_DPMS_OFF: /* Give the overlay scaler a chance to disable if it's on this pipe */ @@ -734,19 +737,19 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc, switch (intel_output->type) { case INTEL_OUTPUT_LVDS: - is_lvds = TRUE; + is_lvds = true; break; case INTEL_OUTPUT_SDVO: - is_sdvo = TRUE; + is_sdvo = true; break; case INTEL_OUTPUT_DVO: - is_dvo = TRUE; + is_dvo = true; break; case INTEL_OUTPUT_TVOUT: - is_tv = TRUE; + is_tv = true; break; case INTEL_OUTPUT_ANALOG: - is_crt = TRUE; + is_crt = true; break; } } @@ -1174,7 +1177,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, } encoder->crtc = crtc; - intel_output->load_detect_temp = TRUE; + intel_output->load_detect_temp = true; intel_crtc = to_intel_crtc(crtc); *dpms_mode = intel_crtc->dpms_mode; @@ -1209,7 +1212,7 @@ void intel_release_load_detect_pipe(struct intel_output *intel_output, int dpms_ if (intel_output->load_detect_temp) { encoder->crtc = NULL; - intel_output->load_detect_temp = FALSE; + intel_output->load_detect_temp = false; crtc->enabled = drm_helper_crtc_in_use(crtc); drm_helper_disable_unused_functions(dev); } @@ -1493,7 +1496,7 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = { }; struct drm_framebuffer *intel_user_framebuffer_create(struct drm_device *dev, - struct drm_file *file_priv, + struct drm_file *filp, struct drm_mode_fb_cmd *mode_cmd) { struct intel_framebuffer *intel_fb; @@ -1505,15 +1508,15 @@ struct drm_framebuffer *intel_user_framebuffer_create(struct drm_device *dev, drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); - if (file_priv) { - mutex_lock(&dev->struct_mutex); - intel_fb->bo = drm_lookup_buffer_object(file_priv, intel_fb->base.mm_handle, 0); - mutex_unlock(&dev->struct_mutex); - if (!intel_fb->bo) { + if (filp) { + intel_fb->obj = drm_gem_object_lookup(dev, filp, + mode_cmd->handle); + if (!intel_fb->obj) { kfree(intel_fb); return NULL; } } + drm_gem_object_unreference(intel_fb->obj); return &intel_fb->base; } @@ -1521,22 +1524,25 @@ static int intel_insert_new_fb(struct drm_device *dev, struct drm_file *file_pri struct drm_framebuffer *fb, struct drm_mode_fb_cmd *mode_cmd) { struct intel_framebuffer *intel_fb; - struct drm_buffer_object *bo; + struct drm_gem_object *obj; struct drm_crtc *crtc; intel_fb = to_intel_framebuffer(fb); mutex_lock(&dev->struct_mutex); - bo = drm_lookup_buffer_object(file_priv, mode_cmd->handle, 0); - mutex_unlock(&dev->struct_mutex); + obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle); - if (!bo) + if (!obj) { + mutex_unlock(&dev->struct_mutex); return -EINVAL; + } drm_helper_mode_fill_fb_struct(fb, mode_cmd); - - drm_bo_usage_deref_unlocked(&intel_fb->bo); - intel_fb->bo = bo; + drm_gem_object_unreference(intel_fb->obj); + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); + + intel_fb->obj = obj; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { if (crtc->fb == fb) { diff --git a/linux-core/intel_drv.h b/linux-core/intel_drv.h index 1008e271..bffbeef0 100644 --- a/linux-core/intel_drv.h +++ b/linux-core/intel_drv.h @@ -50,8 +50,7 @@ struct intel_i2c_chan { struct intel_framebuffer { struct drm_framebuffer base; - struct drm_buffer_object *bo; - struct drm_bo_kmap_obj kmap; + struct drm_gem_object *obj; }; diff --git a/linux-core/intel_fb.c b/linux-core/intel_fb.c index 5637ea2f..bc056bc3 100644 --- a/linux-core/intel_fb.c +++ b/linux-core/intel_fb.c @@ -525,6 +525,92 @@ static int intelfb_pan_display(struct fb_var_screeninfo *var, return ret; } +static void intelfb_on(struct fb_info *info) +{ + struct intelfb_par *par = info->par; + struct drm_device *dev = par->dev; + struct drm_crtc *crtc; + struct drm_encoder *encoder; + int i; + + /* + * For each CRTC in this fb, find all associated encoders + * and turn them off, then turn off the CRTC. + */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + + for (i = 0; i < par->crtc_count; i++) + if (crtc->base.id == par->crtc_ids[i]) + break; + + crtc_funcs->dpms(crtc, DPMSModeOn); + + /* Found a CRTC on this fb, now find encoders */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (encoder->crtc == crtc) { + struct drm_encoder_helper_funcs *encoder_funcs; + encoder_funcs = encoder->helper_private; + encoder_funcs->dpms(encoder, DPMSModeOn); + } + } + } +} + +static void intelfb_off(struct fb_info *info, int dpms_mode) +{ + struct intelfb_par *par = info->par; + struct drm_device *dev = par->dev; + struct drm_crtc *crtc; + struct drm_encoder *encoder; + int i; + + /* + * For each CRTC in this fb, find all associated encoders + * and turn them off, then turn off the CRTC. + */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + + for (i = 0; i < par->crtc_count; i++) + if (crtc->base.id == par->crtc_ids[i]) + break; + + /* Found a CRTC on this fb, now find encoders */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (encoder->crtc == crtc) { + struct drm_encoder_helper_funcs *encoder_funcs; + encoder_funcs = encoder->helper_private; + encoder_funcs->dpms(encoder, dpms_mode); + } + } + if (dpms_mode == DPMSModeOff) + crtc_funcs->dpms(crtc, dpms_mode); + } +} + +int intelfb_blank(int blank, struct fb_info *info) +{ + switch (blank) { + case FB_BLANK_UNBLANK: + intelfb_on(info); + break; + case FB_BLANK_NORMAL: + intelfb_off(info, DPMSModeStandby); + break; + case FB_BLANK_HSYNC_SUSPEND: + intelfb_off(info, DPMSModeStandby); + break; + case FB_BLANK_VSYNC_SUSPEND: + intelfb_off(info, DPMSModeSuspend); + break; + case FB_BLANK_POWERDOWN: + intelfb_off(info, DPMSModeOff); + break; + } + return 0; +} + static struct fb_ops intelfb_ops = { .owner = THIS_MODULE, //.fb_open = intelfb_open, @@ -539,6 +625,7 @@ static struct fb_ops intelfb_ops = { .fb_copyarea = cfb_copyarea, //intelfb_copyarea, .fb_imageblit = cfb_imageblit, //intelfb_imageblit, .fb_pan_display = intelfb_pan_display, + .fb_blank = intelfb_blank, }; /** @@ -606,9 +693,10 @@ int intelfb_create(struct drm_device *dev, uint32_t fb_width, uint32_t fb_height struct drm_framebuffer *fb; struct intel_framebuffer *intel_fb; struct drm_mode_fb_cmd mode_cmd; - struct drm_buffer_object *fbo = NULL; + struct drm_gem_object *fbo = NULL; + struct drm_i915_gem_object *obj_priv; struct device *device = &dev->pdev->dev; - int ret; + int size, aligned_size, ret; mode_cmd.width = surface_width;/* crtc->desired_mode->hdisplay; */ mode_cmd.height = surface_height;/* crtc->desired_mode->vdisplay; */ @@ -617,26 +705,28 @@ int intelfb_create(struct drm_device *dev, uint32_t fb_width, uint32_t fb_height mode_cmd.pitch = mode_cmd.width * ((mode_cmd.bpp + 1) / 8); mode_cmd.depth = 24; - ret = drm_buffer_object_create(dev, mode_cmd.pitch * mode_cmd.height, - drm_bo_type_kernel, - DRM_BO_FLAG_READ | - DRM_BO_FLAG_WRITE | - DRM_BO_FLAG_MEM_TT | - DRM_BO_FLAG_MEM_VRAM | - DRM_BO_FLAG_NO_EVICT, - DRM_BO_HINT_DONT_FENCE, 0, 0, - &fbo); - if (ret || !fbo) { + size = mode_cmd.pitch * mode_cmd.height; + aligned_size = ALIGN(size, PAGE_SIZE); + fbo = drm_gem_object_alloc(dev, aligned_size); + if (!fbo) { printk(KERN_ERR "failed to allocate framebuffer\n"); - return -EINVAL; + ret = -ENOMEM; + goto out; + } + obj_priv = fbo->driver_private; + + mutex_lock(&dev->struct_mutex); + ret = i915_gem_object_pin(fbo, PAGE_SIZE); + if (ret) { + DRM_ERROR("failed to pin fb: %d\n", ret); + goto out_unref; } - fb = intel_user_framebuffer_create(dev, NULL, &mode_cmd); if (!fb) { - drm_bo_usage_deref_unlocked(&fbo); DRM_ERROR("failed to allocate fb.\n"); - return -EINVAL; + ret = -ENOMEM; + goto out_unref; } list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list); @@ -644,11 +734,13 @@ int intelfb_create(struct drm_device *dev, uint32_t fb_width, uint32_t fb_height intel_fb = to_intel_framebuffer(fb); *intel_fb_p = intel_fb; - intel_fb->bo = fbo; + intel_fb->obj = fbo; info = framebuffer_alloc(sizeof(struct intelfb_par), device); - if (!info) - return -EINVAL; + if (!info) { + ret = -ENOMEM; + goto out_unref; + } par = info->par; @@ -667,19 +759,20 @@ int intelfb_create(struct drm_device *dev, uint32_t fb_width, uint32_t fb_height info->fbops = &intelfb_ops; info->fix.line_length = fb->pitch; - info->fix.smem_start = intel_fb->bo->offset + dev->mode_config.fb_base; - info->fix.smem_len = info->fix.line_length * fb->height; + info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset; + info->fix.smem_len = size; info->flags = FBINFO_DEFAULT; - ret = drm_bo_kmap(intel_fb->bo, 0, intel_fb->bo->num_pages, &intel_fb->kmap); - if (ret) - DRM_ERROR("error mapping fb: %d\n", ret); - - info->screen_base = intel_fb->kmap.virtual; - info->screen_size = info->fix.smem_len; /* FIXME */ + info->screen_base = ioremap_wc(dev->agp->base + obj_priv->gtt_offset, + size); + if (!info->screen_base) { + ret = -ENOSPC; + goto out_unref; + } + info->screen_size = size; - memset(intel_fb->kmap.virtual, 0, info->screen_size); + memset(info->screen_base, 0, size); info->pseudo_palette = fb->pseudo_palette; info->var.xres_virtual = fb->width; @@ -770,10 +863,17 @@ int intelfb_create(struct drm_device *dev, uint32_t fb_width, uint32_t fb_height par->dev = dev; /* To allow resizeing without swapping buffers */ - printk("allocated %dx%d fb: 0x%08lx, bo %p\n", intel_fb->base.width, - intel_fb->base.height, intel_fb->bo->offset, fbo); + printk("allocated %dx%d fb: 0x%08x, bo %p\n", intel_fb->base.width, + intel_fb->base.height, obj_priv->gtt_offset, fbo); + mutex_unlock(&dev->struct_mutex); return 0; + +out_unref: + drm_gem_object_unreference(fbo); + mutex_unlock(&dev->struct_mutex); +out: + return ret; } static int intelfb_multi_fb_probe_crtc(struct drm_device *dev, struct drm_crtc *crtc) @@ -1029,8 +1129,10 @@ int intelfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) if (info) { unregister_framebuffer(info); - drm_bo_kunmap(&intel_fb->kmap); - drm_bo_usage_deref_unlocked(&intel_fb->bo); + iounmap(info->screen_base); + mutex_lock(&dev->struct_mutex); + drm_gem_object_unreference(intel_fb->obj); + mutex_unlock(&dev->struct_mutex); framebuffer_release(info); } diff --git a/linux-core/intel_lvds.c b/linux-core/intel_lvds.c index 06b99867..fa8209c7 100644 --- a/linux-core/intel_lvds.c +++ b/linux-core/intel_lvds.c @@ -407,8 +407,8 @@ void intel_lvds_init(struct drm_device *dev) drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); connector->display_info.subpixel_order = SubPixelHorizontalRGB; - connector->interlace_allowed = FALSE; - connector->doublescan_allowed = FALSE; + connector->interlace_allowed = false; + connector->doublescan_allowed = false; /* diff --git a/linux-core/intel_tv.c b/linux-core/intel_tv.c index 29cfc031..3efa9d2d 100644 --- a/linux-core/intel_tv.c +++ b/linux-core/intel_tv.c @@ -422,18 +422,18 @@ const static struct tv_mode tv_modes[] = { .hsync_end = 64, .hblank_end = 124, .hblank_start = 836, .htotal = 857, - .progressive = FALSE, .trilevel_sync = FALSE, + .progressive = false, .trilevel_sync = false, .vsync_start_f1 = 6, .vsync_start_f2 = 7, .vsync_len = 6, - .veq_ena = TRUE, .veq_start_f1 = 0, + .veq_ena = true, .veq_start_f1 = 0, .veq_start_f2 = 1, .veq_len = 18, .vi_end_f1 = 20, .vi_end_f2 = 21, .nbr_end = 240, - .burst_ena = TRUE, + .burst_ena = true, .hburst_start = 72, .hburst_len = 34, .vburst_start_f1 = 9, .vburst_end_f1 = 240, .vburst_start_f2 = 10, .vburst_end_f2 = 240, @@ -445,7 +445,7 @@ const static struct tv_mode tv_modes[] = { .dda2_inc = 7624, .dda2_size = 20013, .dda3_inc = 0, .dda3_size = 0, .sc_reset = TV_SC_RESET_EVERY_4, - .pal_burst = FALSE, + .pal_burst = false, .composite_levels = &ntsc_m_levels_composite, .composite_color = &ntsc_m_csc_composite, @@ -464,12 +464,12 @@ const static struct tv_mode tv_modes[] = { .hsync_end = 64, .hblank_end = 124, .hblank_start = 836, .htotal = 857, - .progressive = FALSE, .trilevel_sync = FALSE, + .progressive = false, .trilevel_sync = false, .vsync_start_f1 = 6, .vsync_start_f2 = 7, .vsync_len = 6, - .veq_ena = TRUE, .veq_start_f1 = 0, + .veq_ena = true, .veq_start_f1 = 0, .veq_start_f2 = 1, .veq_len = 18, .vi_end_f1 = 20, .vi_end_f2 = 21, @@ -487,7 +487,7 @@ const static struct tv_mode tv_modes[] = { .dda2_inc = 18557, .dda2_size = 20625, .dda3_inc = 0, .dda3_size = 0, .sc_reset = TV_SC_RESET_EVERY_8, - .pal_burst = TRUE, + .pal_burst = true, .composite_levels = &ntsc_m_levels_composite, .composite_color = &ntsc_m_csc_composite, @@ -507,18 +507,18 @@ const static struct tv_mode tv_modes[] = { .hsync_end = 64, .hblank_end = 124, .hblank_start = 836, .htotal = 857, - .progressive = FALSE, .trilevel_sync = FALSE, + .progressive = false, .trilevel_sync = false, .vsync_start_f1 = 6, .vsync_start_f2 = 7, .vsync_len = 6, - .veq_ena = TRUE, .veq_start_f1 = 0, + .veq_ena = true, .veq_start_f1 = 0, .veq_start_f2 = 1, .veq_len = 18, .vi_end_f1 = 20, .vi_end_f2 = 21, .nbr_end = 240, - .burst_ena = TRUE, + .burst_ena = true, .hburst_start = 72, .hburst_len = 34, .vburst_start_f1 = 9, .vburst_end_f1 = 240, .vburst_start_f2 = 10, .vburst_end_f2 = 240, @@ -530,7 +530,7 @@ const static struct tv_mode tv_modes[] = { .dda2_inc = 7624, .dda2_size = 20013, .dda3_inc = 0, .dda3_size = 0, .sc_reset = TV_SC_RESET_EVERY_4, - .pal_burst = FALSE, + .pal_burst = false, .composite_levels = &ntsc_j_levels_composite, .composite_color = &ntsc_j_csc_composite, @@ -550,18 +550,18 @@ const static struct tv_mode tv_modes[] = { .hsync_end = 64, .hblank_end = 124, .hblank_start = 836, .htotal = 857, - .progressive = FALSE, .trilevel_sync = FALSE, + .progressive = false, .trilevel_sync = false, .vsync_start_f1 = 6, .vsync_start_f2 = 7, .vsync_len = 6, - .veq_ena = TRUE, .veq_start_f1 = 0, + .veq_ena = true, .veq_start_f1 = 0, .veq_start_f2 = 1, .veq_len = 18, .vi_end_f1 = 20, .vi_end_f2 = 21, .nbr_end = 240, - .burst_ena = TRUE, + .burst_ena = true, .hburst_start = 72, .hburst_len = 34, .vburst_start_f1 = 9, .vburst_end_f1 = 240, .vburst_start_f2 = 10, .vburst_end_f2 = 240, @@ -573,7 +573,7 @@ const static struct tv_mode tv_modes[] = { .dda2_inc = 7624, .dda2_size = 20013, .dda3_inc = 0, .dda3_size = 0, .sc_reset = TV_SC_RESET_EVERY_4, - .pal_burst = FALSE, + .pal_burst = false, .composite_levels = &pal_m_levels_composite, .composite_color = &pal_m_csc_composite, @@ -593,19 +593,19 @@ const static struct tv_mode tv_modes[] = { .hsync_end = 64, .hblank_end = 128, .hblank_start = 844, .htotal = 863, - .progressive = FALSE, .trilevel_sync = FALSE, + .progressive = false, .trilevel_sync = false, .vsync_start_f1 = 6, .vsync_start_f2 = 7, .vsync_len = 6, - .veq_ena = TRUE, .veq_start_f1 = 0, + .veq_ena = true, .veq_start_f1 = 0, .veq_start_f2 = 1, .veq_len = 18, .vi_end_f1 = 24, .vi_end_f2 = 25, .nbr_end = 286, - .burst_ena = TRUE, + .burst_ena = true, .hburst_start = 73, .hburst_len = 34, .vburst_start_f1 = 8, .vburst_end_f1 = 285, .vburst_start_f2 = 8, .vburst_end_f2 = 286, @@ -618,7 +618,7 @@ const static struct tv_mode tv_modes[] = { .dda2_inc = 18557, .dda2_size = 20625, .dda3_inc = 0, .dda3_size = 0, .sc_reset = TV_SC_RESET_EVERY_8, - .pal_burst = TRUE, + .pal_burst = true, .composite_levels = &pal_n_levels_composite, .composite_color = &pal_n_csc_composite, @@ -638,18 +638,18 @@ const static struct tv_mode tv_modes[] = { .hsync_end = 64, .hblank_end = 128, .hblank_start = 844, .htotal = 863, - .progressive = FALSE, .trilevel_sync = FALSE, + .progressive = false, .trilevel_sync = false, .vsync_start_f1 = 5, .vsync_start_f2 = 6, .vsync_len = 5, - .veq_ena = TRUE, .veq_start_f1 = 0, + .veq_ena = true, .veq_start_f1 = 0, .veq_start_f2 = 1, .veq_len = 15, .vi_end_f1 = 24, .vi_end_f2 = 25, .nbr_end = 286, - .burst_ena = TRUE, + .burst_ena = true, .hburst_start = 73, .hburst_len = 32, .vburst_start_f1 = 8, .vburst_end_f1 = 285, .vburst_start_f2 = 8, .vburst_end_f2 = 286, @@ -661,7 +661,7 @@ const static struct tv_mode tv_modes[] = { .dda2_inc = 18557, .dda2_size = 20625, .dda3_inc = 0, .dda3_size = 0, .sc_reset = TV_SC_RESET_EVERY_8, - .pal_burst = TRUE, + .pal_burst = true, .composite_levels = &pal_levels_composite, .composite_color = &pal_csc_composite, @@ -680,17 +680,17 @@ const static struct tv_mode tv_modes[] = { .hsync_end = 64, .hblank_end = 122, .hblank_start = 842, .htotal = 857, - .progressive = TRUE,.trilevel_sync = FALSE, + .progressive = true,.trilevel_sync = false, .vsync_start_f1 = 12, .vsync_start_f2 = 12, .vsync_len = 12, - .veq_ena = FALSE, + .veq_ena = false, .vi_end_f1 = 44, .vi_end_f2 = 44, .nbr_end = 496, - .burst_ena = FALSE, + .burst_ena = false, .filter_table = filter_table, }, @@ -704,17 +704,17 @@ const static struct tv_mode tv_modes[] = { .hsync_end = 64, .hblank_end = 122, .hblank_start = 842, .htotal = 856, - .progressive = TRUE,.trilevel_sync = FALSE, + .progressive = true,.trilevel_sync = false, .vsync_start_f1 = 12, .vsync_start_f2 = 12, .vsync_len = 12, - .veq_ena = FALSE, + .veq_ena = false, .vi_end_f1 = 44, .vi_end_f2 = 44, .nbr_end = 496, - .burst_ena = FALSE, + .burst_ena = false, .filter_table = filter_table, }, @@ -728,17 +728,17 @@ const static struct tv_mode tv_modes[] = { .hsync_end = 64, .hblank_end = 139, .hblank_start = 859, .htotal = 863, - .progressive = TRUE, .trilevel_sync = FALSE, + .progressive = true, .trilevel_sync = false, .vsync_start_f1 = 10, .vsync_start_f2 = 10, .vsync_len = 10, - .veq_ena = FALSE, + .veq_ena = false, .vi_end_f1 = 48, .vi_end_f2 = 48, .nbr_end = 575, - .burst_ena = FALSE, + .burst_ena = false, .filter_table = filter_table, }, @@ -752,17 +752,17 @@ const static struct tv_mode tv_modes[] = { .hsync_end = 80, .hblank_end = 300, .hblank_start = 1580, .htotal = 1649, - .progressive = TRUE, .trilevel_sync = TRUE, + .progressive = true, .trilevel_sync = true, .vsync_start_f1 = 10, .vsync_start_f2 = 10, .vsync_len = 10, - .veq_ena = FALSE, + .veq_ena = false, .vi_end_f1 = 29, .vi_end_f2 = 29, .nbr_end = 719, - .burst_ena = FALSE, + .burst_ena = false, .filter_table = filter_table, }, @@ -776,17 +776,17 @@ const static struct tv_mode tv_modes[] = { .hsync_end = 80, .hblank_end = 300, .hblank_start = 1580, .htotal = 1651, - .progressive = TRUE, .trilevel_sync = TRUE, + .progressive = true, .trilevel_sync = true, .vsync_start_f1 = 10, .vsync_start_f2 = 10, .vsync_len = 10, - .veq_ena = FALSE, + .veq_ena = false, .vi_end_f1 = 29, .vi_end_f2 = 29, .nbr_end = 719, - .burst_ena = FALSE, + .burst_ena = false, .filter_table = filter_table, }, @@ -800,17 +800,17 @@ const static struct tv_mode tv_modes[] = { .hsync_end = 80, .hblank_end = 300, .hblank_start = 1580, .htotal = 1979, - .progressive = TRUE, .trilevel_sync = TRUE, + .progressive = true, .trilevel_sync = true, .vsync_start_f1 = 10, .vsync_start_f2 = 10, .vsync_len = 10, - .veq_ena = FALSE, + .veq_ena = false, .vi_end_f1 = 29, .vi_end_f2 = 29, .nbr_end = 719, - .burst_ena = FALSE, + .burst_ena = false, .filter_table = filter_table, .max_srcw = 800 @@ -825,19 +825,19 @@ const static struct tv_mode tv_modes[] = { .hsync_end = 88, .hblank_end = 235, .hblank_start = 2155, .htotal = 2639, - .progressive = FALSE, .trilevel_sync = TRUE, + .progressive = false, .trilevel_sync = true, .vsync_start_f1 = 4, .vsync_start_f2 = 5, .vsync_len = 10, - .veq_ena = TRUE, .veq_start_f1 = 4, + .veq_ena = true, .veq_start_f1 = 4, .veq_start_f2 = 4, .veq_len = 10, .vi_end_f1 = 21, .vi_end_f2 = 22, .nbr_end = 539, - .burst_ena = FALSE, + .burst_ena = false, .filter_table = filter_table, }, @@ -851,19 +851,19 @@ const static struct tv_mode tv_modes[] = { .hsync_end = 88, .hblank_end = 235, .hblank_start = 2155, .htotal = 2199, - .progressive = FALSE, .trilevel_sync = TRUE, + .progressive = false, .trilevel_sync = true, .vsync_start_f1 = 4, .vsync_start_f2 = 5, .vsync_len = 10, - .veq_ena = TRUE, .veq_start_f1 = 4, + .veq_ena = true, .veq_start_f1 = 4, .veq_start_f2 = 4, .veq_len = 10, .vi_end_f1 = 21, .vi_end_f2 = 22, .nbr_end = 539, - .burst_ena = FALSE, + .burst_ena = false, .filter_table = filter_table, }, @@ -877,19 +877,19 @@ const static struct tv_mode tv_modes[] = { .hsync_end = 88, .hblank_end = 235, .hblank_start = 2155, .htotal = 2200, - .progressive = FALSE, .trilevel_sync = TRUE, + .progressive = false, .trilevel_sync = true, .vsync_start_f1 = 4, .vsync_start_f2 = 5, .vsync_len = 10, - .veq_ena = TRUE, .veq_start_f1 = 4, + .veq_ena = true, .veq_start_f1 = 4, .veq_start_f2 = 4, .veq_len = 10, .vi_end_f1 = 21, .vi_end_f2 = 22, .nbr_end = 539, - .burst_ena = FALSE, + .burst_ena = false, .filter_table = filter_table, }, @@ -1098,17 +1098,17 @@ intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_encoder *other_encoder; if (!tv_mode) - return FALSE; + return false; /* FIXME: lock encoder list */ list_for_each_entry(other_encoder, &drm_config->encoder_list, head) { if (other_encoder != encoder && other_encoder->crtc == encoder->crtc) - return FALSE; + return false; } adjusted_mode->clock = tv_mode->clock; - return TRUE; + return true; } static void @@ -1152,7 +1152,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, color_conversion = &sdtv_csc_yprpb; else color_conversion = &hdtv_csc_yprpb; - burst_ena = FALSE; + burst_ena = false; break; case DRM_MODE_CONNECTOR_SVIDEO: tv_ctl |= TV_ENC_OUTPUT_SVIDEO; @@ -1352,8 +1352,8 @@ static const struct drm_display_mode reported_modes[] = { * * Requires that the current pipe's DPLL is active. - * \return TRUE if TV is connected. - * \return FALSE if TV is disconnected. + * \return true if TV is connected. + * \return false if TV is disconnected. */ static int intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output) @@ -1703,8 +1703,8 @@ intel_tv_init(struct drm_device *dev) drm_encoder_helper_add(&intel_output->enc, &intel_tv_helper_funcs); drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); - connector->interlace_allowed = FALSE; - connector->doublescan_allowed = FALSE; + connector->interlace_allowed = false; + connector->doublescan_allowed = false; /* Create TV properties then attach current values */ tv_format_names = drm_alloc(sizeof(char *) * NUM_TV_MODES, diff --git a/linux-core/nouveau_bo.c b/linux-core/nouveau_bo.c index ab3b23a4..86347e03 100644 --- a/linux-core/nouveau_bo.c +++ b/linux-core/nouveau_bo.c @@ -229,7 +229,7 @@ out_cleanup: if (tmp_mem.mm_node) { mutex_lock(&dev->struct_mutex); if (tmp_mem.mm_node != bo->pinned_node) - drm_mm_put_block(tmp_mem.mm_node); + drm_memrange_put_block(tmp_mem.mm_node); tmp_mem.mm_node = NULL; mutex_unlock(&dev->struct_mutex); } diff --git a/linux-core/nouveau_sgdma.c b/linux-core/nouveau_sgdma.c index cc4d5a92..81704ea1 100644 --- a/linux-core/nouveau_sgdma.c +++ b/linux-core/nouveau_sgdma.c @@ -280,7 +280,7 @@ nouveau_sgdma_nottm_hack_init(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_ttm_backend *be; struct drm_scatter_gather sgreq; - struct drm_mm_node mm_node; + struct drm_memrange_node mm_node; struct drm_bo_mem_reg mem; int ret; diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 64401ae5..b31ac2d4 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -135,7 +135,7 @@ int xgi_submit_cmdlist(struct drm_device * dev, void * data, DRM_DEBUG("info->cmdring.last_ptr != NULL\n"); if (pCmdInfo->type == BTYPE_3D) { - xgi_emit_flush(info, FALSE); + xgi_emit_flush(info, false); } info->cmdring.last_ptr[1] = cpu_to_le32(begin[1]); @@ -214,7 +214,7 @@ void xgi_cmdlist_cleanup(struct xgi_info * info) * list chain with a flush command. */ if (info->cmdring.last_ptr != NULL) { - xgi_emit_flush(info, FALSE); + xgi_emit_flush(info, false); xgi_emit_nop(info); } @@ -322,5 +322,5 @@ void xgi_emit_irq(struct xgi_info * info) if (info->cmdring.last_ptr == NULL) return; - xgi_emit_flush(info, TRUE); + xgi_emit_flush(info, true); } diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index f0225f89..f8ed7de8 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -307,8 +307,8 @@ void xgi_driver_lastclose(struct drm_device * dev) || info->pcie_heap_initialized) { drm_sman_cleanup(&info->sman); - info->fb_heap_initialized = FALSE; - info->pcie_heap_initialized = FALSE; + info->fb_heap_initialized = false; + info->pcie_heap_initialized = false; } } } diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 2b3a1788..2a9632f6 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -46,41 +46,41 @@ static bool xgi_validate_signal(struct drm_map * map) check = le16_to_cpu(DRM_READ16(map, 0x2360)); if ((check & 0x3f) != ((check & 0x3f00) >> 8)) { - return FALSE; + return false; } /* Check RO channel */ DRM_WRITE8(map, 0x235c, 0x83); check = le16_to_cpu(DRM_READ16(map, 0x2360)); if ((check & 0x0f) != ((check & 0xf0) >> 4)) { - return FALSE; + return false; } /* Check RW channel */ DRM_WRITE8(map, 0x235c, 0x88); check = le16_to_cpu(DRM_READ16(map, 0x2360)); if ((check & 0x0f) != ((check & 0xf0) >> 4)) { - return FALSE; + return false; } /* Check RO channel outstanding */ DRM_WRITE8(map, 0x235c, 0x8f); check = le16_to_cpu(DRM_READ16(map, 0x2360)); if (0 != (check & 0x3ff)) { - return FALSE; + return false; } /* Check RW channel outstanding */ DRM_WRITE8(map, 0x235c, 0x90); check = le16_to_cpu(DRM_READ16(map, 0x2360)); if (0 != (check & 0x3ff)) { - return FALSE; + return false; } /* No pending PCIE request. GE stall. */ } - return TRUE; + return true; } @@ -138,7 +138,7 @@ static void xgi_ge_hang_reset(struct drm_map * map) bool xgi_ge_irq_handler(struct xgi_info * info) { const u32 int_status = le32_to_cpu(DRM_READ32(info->mmio_map, 0x2810)); - bool is_support_auto_reset = FALSE; + bool is_support_auto_reset = false; /* Check GE on/off */ if (0 == (0xffffc0f0 & int_status)) { @@ -179,15 +179,15 @@ bool xgi_ge_irq_handler(struct xgi_info * info) cpu_to_le32((int_status & ~0x01) | 0x04000000)); } - return TRUE; + return true; } - return FALSE; + return false; } bool xgi_crt_irq_handler(struct xgi_info * info) { - bool ret = FALSE; + bool ret = false; u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce); /* CRT1 interrupt just happened @@ -205,7 +205,7 @@ bool xgi_crt_irq_handler(struct xgi_info * info) op3cf_3d = IN3CFB(info->mmio_map, 0x3d); OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d | 0x04)); OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d & ~0x04)); - ret = TRUE; + ret = true; } DRM_WRITE8(info->mmio_map, 0x3ce, save_3ce); @@ -214,7 +214,7 @@ bool xgi_crt_irq_handler(struct xgi_info * info) bool xgi_dvi_irq_handler(struct xgi_info * info) { - bool ret = FALSE; + bool ret = false; const u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce); /* DVI interrupt just happened @@ -242,7 +242,7 @@ bool xgi_dvi_irq_handler(struct xgi_info * info) OUT3C5B(info->mmio_map, 0x39, (op3cf_39 & ~0x01)); OUT3C5B(info->mmio_map, 0x39, (op3cf_39 | 0x01)); - ret = TRUE; + ret = true; } DRM_WRITE8(info->mmio_map, 0x3ce, save_3ce); |