summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--linux-core/Kconfig15
-rw-r--r--linux-core/Makefile1
-rw-r--r--linux-core/i915_buffer.c303
-rw-r--r--linux-core/i915_compat.c215
l---------linux-core/i915_dma.c1
l---------linux-core/i915_drm.h1
-rw-r--r--linux-core/i915_drv.c222
l---------linux-core/i915_drv.h1
-rw-r--r--linux-core/i915_execbuf.c917
-rw-r--r--linux-core/i915_fence.c273
-rw-r--r--linux-core/i915_gem.c2502
-rw-r--r--linux-core/i915_gem_debug.c202
-rw-r--r--linux-core/i915_gem_proc.c293
-rw-r--r--linux-core/i915_gem_tiling.c309
-rw-r--r--linux-core/i915_ioc32.c284
l---------linux-core/i915_irq.c1
l---------linux-core/i915_mem.c1
-rw-r--r--linux-core/i915_opregion.c389
l---------linux-core/i915_suspend.c1
19 files changed, 0 insertions, 5931 deletions
diff --git a/linux-core/Kconfig b/linux-core/Kconfig
index 2d02c765..df8dd5c0 100644
--- a/linux-core/Kconfig
+++ b/linux-core/Kconfig
@@ -48,21 +48,6 @@ config DRM_I810
selected, the module will be called i810. AGP support is required
for this driver to work.
-choice
- prompt "Intel 830M, 845G, 852GM, 855GM, 865G"
- depends on DRM && AGP && AGP_INTEL
- optional
-
-config DRM_I915
- tristate "i915 driver"
- help
- Choose this option if you have a system that has Intel 830M, 845G,
- 852GM, 855GM, 865G, 915G, 915GM, 945G, 945GM and 965G integrated
- graphics. If M is selected, the module will be called i915.
- AGP support is required for this driver to work.
-
-endchoice
-
config DRM_MGA
tristate "Matrox g200/g400"
depends on DRM && (!X86_64 || BROKEN) && (!PPC || BROKEN)
diff --git a/linux-core/Makefile b/linux-core/Makefile
index 887418c7..9257a49b 100644
--- a/linux-core/Makefile
+++ b/linux-core/Makefile
@@ -83,7 +83,6 @@ R128HEADERS = r128_drv.h r128_drm.h $(DRMHEADERS)
RADEONHEADERS = radeon_drv.h radeon_drm.h r300_reg.h $(DRMHEADERS)
MGAHEADERS = mga_drv.h mga_drm.h mga_ucode.h $(DRMHEADERS)
I810HEADERS = i810_drv.h i810_drm.h $(DRMHEADERS)
-I915HEADERS = i915_drv.h i915_drm.h $(DRMHEADERS)
SISHEADERS= sis_drv.h sis_drm.h drm_hashtab.h drm_sman.h $(DRMHEADERS)
SAVAGEHEADERS= savage_drv.h savage_drm.h $(DRMHEADERS)
VIAHEADERS = via_drm.h via_drv.h via_3d_reg.h via_verifier.h $(DRMHEADERS)
diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c
deleted file mode 100644
index 8d991c42..00000000
--- a/linux-core/i915_buffer.c
+++ /dev/null
@@ -1,303 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#include "drmP.h"
-#include "i915_drm.h"
-#include "i915_drv.h"
-
-struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev)
-{
- return drm_agp_init_ttm(dev);
-}
-
-int i915_fence_type(struct drm_buffer_object *bo,
- uint32_t *fclass,
- uint32_t *type)
-{
- if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
- *type = 3;
- else
- *type = 1;
- return 0;
-}
-
-int i915_invalidate_caches(struct drm_device *dev, uint64_t flags)
-{
- /*
- * FIXME: Only emit once per batchbuffer submission.
- */
-
- uint32_t flush_cmd = MI_NO_WRITE_FLUSH;
-
- if (flags & DRM_BO_FLAG_READ)
- flush_cmd |= MI_READ_FLUSH;
- if (flags & DRM_BO_FLAG_EXE)
- flush_cmd |= MI_EXE_FLUSH;
-
- return i915_emit_mi_flush(dev, flush_cmd);
-}
-
-int i915_init_mem_type(struct drm_device *dev, uint32_t type,
- struct drm_mem_type_manager *man)
-{
- switch (type) {
- case DRM_BO_MEM_LOCAL:
- man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
- _DRM_FLAG_MEMTYPE_CACHED;
- man->drm_bus_maptype = 0;
- man->gpu_offset = 0;
- break;
- case DRM_BO_MEM_TT:
- if (!(drm_core_has_AGP(dev) && dev->agp)) {
- DRM_ERROR("AGP is not enabled for memory type %u\n",
- (unsigned)type);
- return -EINVAL;
- }
- man->io_offset = dev->agp->agp_info.aper_base;
- man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
- man->io_addr = NULL;
- man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
- _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
- man->drm_bus_maptype = _DRM_AGP;
- man->gpu_offset = 0;
- break;
- case DRM_BO_MEM_PRIV0:
- if (!(drm_core_has_AGP(dev) && dev->agp)) {
- DRM_ERROR("AGP is not enabled for memory type %u\n",
- (unsigned)type);
- return -EINVAL;
- }
- man->io_offset = dev->agp->agp_info.aper_base;
- man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
- man->io_addr = NULL;
- man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
- _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP;
- man->drm_bus_maptype = _DRM_AGP;
- man->gpu_offset = 0;
- break;
- default:
- DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
- return -EINVAL;
- }
- return 0;
-}
-
-/*
- * i915_evict_flags:
- *
- * @bo: the buffer object to be evicted
- *
- * Return the bo flags for a buffer which is not mapped to the hardware.
- * These will be placed in proposed_flags so that when the move is
- * finished, they'll end up in bo->mem.flags
- */
-uint64_t i915_evict_flags(struct drm_buffer_object *bo)
-{
- switch (bo->mem.mem_type) {
- case DRM_BO_MEM_LOCAL:
- case DRM_BO_MEM_TT:
- return DRM_BO_FLAG_MEM_LOCAL;
- default:
- return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED;
- }
-}
-
-#if 0 /* See comment below */
-
-static void i915_emit_copy_blit(struct drm_device * dev,
- uint32_t src_offset,
- uint32_t dst_offset,
- uint32_t pages, int direction)
-{
- uint32_t cur_pages;
- uint32_t stride = PAGE_SIZE;
- drm_i915_private_t *dev_priv = dev->dev_private;
- RING_LOCALS;
-
- if (!dev_priv)
- return;
-
- i915_kernel_lost_context(dev);
- while (pages > 0) {
- cur_pages = pages;
- if (cur_pages > 2048)
- cur_pages = 2048;
- pages -= cur_pages;
-
- BEGIN_LP_RING(6);
- OUT_RING(SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
- XY_SRC_COPY_BLT_WRITE_RGB);
- OUT_RING((stride & 0xffff) | (0xcc << 16) | (1 << 24) |
- (1 << 25) | (direction ? (1 << 30) : 0));
- OUT_RING((cur_pages << 16) | PAGE_SIZE);
- OUT_RING(dst_offset);
- OUT_RING(stride & 0xffff);
- OUT_RING(src_offset);
- ADVANCE_LP_RING();
- }
- return;
-}
-
-static int i915_move_blit(struct drm_buffer_object * bo,
- int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
-{
- struct drm_bo_mem_reg *old_mem = &bo->mem;
- int dir = 0;
-
- if ((old_mem->mem_type == new_mem->mem_type) &&
- (new_mem->mm_node->start <
- old_mem->mm_node->start + old_mem->mm_node->size)) {
- dir = 1;
- }
-
- i915_emit_copy_blit(bo->dev,
- old_mem->mm_node->start << PAGE_SHIFT,
- new_mem->mm_node->start << PAGE_SHIFT,
- new_mem->num_pages, dir);
-
- i915_emit_mi_flush(bo->dev, MI_READ_FLUSH | MI_EXE_FLUSH);
-
- return drm_bo_move_accel_cleanup(bo, evict, no_wait, 0,
- DRM_FENCE_TYPE_EXE |
- DRM_I915_FENCE_TYPE_RW,
- DRM_I915_FENCE_FLAG_FLUSHED, new_mem);
-}
-
-/*
- * Flip destination ttm into cached-coherent AGP,
- * then blit and subsequently move out again.
- */
-
-static int i915_move_flip(struct drm_buffer_object * bo,
- int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
-{
- struct drm_device *dev = bo->dev;
- struct drm_bo_mem_reg tmp_mem;
- int ret;
-
- tmp_mem = *new_mem;
- tmp_mem.mm_node = NULL;
- tmp_mem.mask = DRM_BO_FLAG_MEM_TT |
- DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING;
-
- ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
- if (ret)
- return ret;
-
- ret = drm_bind_ttm(bo->ttm, &tmp_mem);
- if (ret)
- goto out_cleanup;
-
- ret = i915_move_blit(bo, 1, no_wait, &tmp_mem);
- if (ret)
- goto out_cleanup;
-
- ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem);
-out_cleanup:
- if (tmp_mem.mm_node) {
- mutex_lock(&dev->struct_mutex);
- if (tmp_mem.mm_node != bo->pinned_node)
- drm_mm_put_block(tmp_mem.mm_node);
- tmp_mem.mm_node = NULL;
- mutex_unlock(&dev->struct_mutex);
- }
- return ret;
-}
-
-#endif
-
-/*
- * Disable i915_move_flip for now, since we can't guarantee that the hardware
- * lock is held here. To re-enable we need to make sure either
- * a) The X server is using DRM to submit commands to the ring, or
- * b) DRM can use the HP ring for these blits. This means i915 needs to
- * implement a new ring submission mechanism and fence class.
- */
-int i915_move(struct drm_buffer_object *bo,
- int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
-{
- struct drm_bo_mem_reg *old_mem = &bo->mem;
-
- if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
- return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
- } else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
- if (1) /*i915_move_flip(bo, evict, no_wait, new_mem)*/
- return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
- } else {
- if (1) /*i915_move_blit(bo, evict, no_wait, new_mem)*/
- return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
- }
- return 0;
-}
-
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24))
-static inline void clflush(volatile void *__p)
-{
- asm volatile("clflush %0" : "+m" (*(char __force *)__p));
-}
-#endif
-
-static inline void drm_cache_flush_addr(void *virt)
-{
- int i;
-
- for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
- clflush(virt+i);
-}
-
-static inline void drm_cache_flush_page(struct page *p)
-{
- drm_cache_flush_addr(page_address(p));
-}
-
-void i915_flush_ttm(struct drm_ttm *ttm)
-{
- int i;
-
- if (!ttm)
- return;
-
- DRM_MEMORYBARRIER();
-
-#ifdef CONFIG_X86_32
- /* Hopefully nobody has built an x86-64 processor without clflush */
- if (!cpu_has_clflush) {
- wbinvd();
- DRM_MEMORYBARRIER();
- return;
- }
-#endif
-
- for (i = ttm->num_pages - 1; i >= 0; i--)
- drm_cache_flush_page(drm_ttm_get_page(ttm, i));
-
- DRM_MEMORYBARRIER();
-}
diff --git a/linux-core/i915_compat.c b/linux-core/i915_compat.c
deleted file mode 100644
index f3e0a081..00000000
--- a/linux-core/i915_compat.c
+++ /dev/null
@@ -1,215 +0,0 @@
-#include "drmP.h"
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
-
-#include "i915_drm.h"
-#include "i915_drv.h"
-
-#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970
-#define PCI_DEVICE_ID_INTEL_82965G_1_HB 0x2980
-#define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990
-#define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0
-#define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00
-#define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10
-#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC
-#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0
-#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0
-#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0
-
-#define I915_IFPADDR 0x60
-#define I965_IFPADDR 0x70
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
-#define upper_32_bits(_val) (((u64)(_val)) >> 32)
-#endif
-
-static struct _i9xx_private_compat {
- void __iomem *flush_page;
- int resource_valid;
- struct resource ifp_resource;
-} i9xx_private;
-
-static struct _i8xx_private_compat {
- void *flush_page;
- struct page *page;
-} i8xx_private;
-
-static void
-intel_compat_align_resource(void *data, struct resource *res,
- resource_size_t size, resource_size_t align)
-{
- return;
-}
-
-
-static int intel_alloc_chipset_flush_resource(struct pci_dev *pdev)
-{
- int ret;
- ret = pci_bus_alloc_resource(pdev->bus, &i9xx_private.ifp_resource, PAGE_SIZE,
- PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
- intel_compat_align_resource, pdev);
- if (ret != 0)
- return ret;
-
- return 0;
-}
-
-static void intel_i915_setup_chipset_flush(struct pci_dev *pdev)
-{
- int ret;
- u32 temp;
-
- pci_read_config_dword(pdev, I915_IFPADDR, &temp);
- if (!(temp & 0x1)) {
- intel_alloc_chipset_flush_resource(pdev);
- i9xx_private.resource_valid = 1;
- pci_write_config_dword(pdev, I915_IFPADDR, (i9xx_private.ifp_resource.start & 0xffffffff) | 0x1);
- } else {
- temp &= ~1;
-
- i9xx_private.resource_valid = 1;
- i9xx_private.ifp_resource.start = temp;
- i9xx_private.ifp_resource.end = temp + PAGE_SIZE;
- ret = request_resource(&iomem_resource, &i9xx_private.ifp_resource);
- if (ret) {
- i9xx_private.resource_valid = 0;
- printk("Failed inserting resource into tree\n");
- }
- }
-}
-
-static void intel_i965_g33_setup_chipset_flush(struct pci_dev *pdev)
-{
- u32 temp_hi, temp_lo;
- int ret;
-
- pci_read_config_dword(pdev, I965_IFPADDR + 4, &temp_hi);
- pci_read_config_dword(pdev, I965_IFPADDR, &temp_lo);
-
- if (!(temp_lo & 0x1)) {
-
- intel_alloc_chipset_flush_resource(pdev);
-
- i9xx_private.resource_valid = 1;
- pci_write_config_dword(pdev, I965_IFPADDR + 4,
- upper_32_bits(i9xx_private.ifp_resource.start));
- pci_write_config_dword(pdev, I965_IFPADDR, (i9xx_private.ifp_resource.start & 0xffffffff) | 0x1);
- } else {
- u64 l64;
-
- temp_lo &= ~0x1;
- l64 = ((u64)temp_hi << 32) | temp_lo;
-
- i9xx_private.resource_valid = 1;
- i9xx_private.ifp_resource.start = l64;
- i9xx_private.ifp_resource.end = l64 + PAGE_SIZE;
- ret = request_resource(&iomem_resource, &i9xx_private.ifp_resource);
- if (ret) {
- i9xx_private.resource_valid = 0;
- printk("Failed inserting resource into tree\n");
- }
- }
-}
-
-static void intel_i8xx_fini_flush(struct drm_device *dev)
-{
- kunmap(i8xx_private.page);
- i8xx_private.flush_page = NULL;
- unmap_page_from_agp(i8xx_private.page);
- flush_agp_mappings();
-
- __free_page(i8xx_private.page);
-}
-
-static void intel_i8xx_setup_flush(struct drm_device *dev)
-{
-
- i8xx_private.page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
- if (!i8xx_private.page) {
- return;
- }
-
- /* make page uncached */
- map_page_into_agp(i8xx_private.page);
- flush_agp_mappings();
-
- i8xx_private.flush_page = kmap(i8xx_private.page);
- if (!i8xx_private.flush_page)
- intel_i8xx_fini_flush(dev);
-}
-
-
-static void intel_i8xx_flush_page(struct drm_device *dev)
-{
- unsigned int *pg = i8xx_private.flush_page;
- int i;
-
- /* HAI NUT CAN I HAZ HAMMER?? */
- for (i = 0; i < 256; i++)
- *(pg + i) = i;
-
- DRM_MEMORYBARRIER();
-}
-
-static void intel_i9xx_setup_flush(struct drm_device *dev)
-{
- struct pci_dev *agp_dev = dev->agp->agp_info.device;
-
- i9xx_private.ifp_resource.name = "GMCH IFPBAR";
- i9xx_private.ifp_resource.flags = IORESOURCE_MEM;
-
- /* Setup chipset flush for 915 */
- if (IS_I965G(dev) || IS_G33(dev)) {
- intel_i965_g33_setup_chipset_flush(agp_dev);
- } else {
- intel_i915_setup_chipset_flush(agp_dev);
- }
-
- if (i9xx_private.ifp_resource.start) {
- i9xx_private.flush_page = ioremap_nocache(i9xx_private.ifp_resource.start, PAGE_SIZE);
- if (!i9xx_private.flush_page)
- printk("unable to ioremap flush page - no chipset flushing");
- }
-}
-
-static void intel_i9xx_fini_flush(struct drm_device *dev)
-{
- iounmap(i9xx_private.flush_page);
- if (i9xx_private.resource_valid)
- release_resource(&i9xx_private.ifp_resource);
- i9xx_private.resource_valid = 0;
-}
-
-static void intel_i9xx_flush_page(struct drm_device *dev)
-{
- if (i9xx_private.flush_page)
- writel(1, i9xx_private.flush_page);
-}
-
-void intel_init_chipset_flush_compat(struct drm_device *dev)
-{
- /* not flush on i8xx */
- if (IS_I9XX(dev))
- intel_i9xx_setup_flush(dev);
- else
- intel_i8xx_setup_flush(dev);
-
-}
-
-void intel_fini_chipset_flush_compat(struct drm_device *dev)
-{
- /* not flush on i8xx */
- if (IS_I9XX(dev))
- intel_i9xx_fini_flush(dev);
- else
- intel_i8xx_fini_flush(dev);
-}
-
-void drm_agp_chipset_flush(struct drm_device *dev)
-{
- if (IS_I9XX(dev))
- intel_i9xx_flush_page(dev);
- else
- intel_i8xx_flush_page(dev);
-}
-#endif
diff --git a/linux-core/i915_dma.c b/linux-core/i915_dma.c
deleted file mode 120000
index c61d967e..00000000
--- a/linux-core/i915_dma.c
+++ /dev/null
@@ -1 +0,0 @@
-../shared-core/i915_dma.c \ No newline at end of file
diff --git a/linux-core/i915_drm.h b/linux-core/i915_drm.h
deleted file mode 120000
index ed53f01d..00000000
--- a/linux-core/i915_drm.h
+++ /dev/null
@@ -1 +0,0 @@
-../shared-core/i915_drm.h \ No newline at end of file
diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c
deleted file mode 100644
index 28923201..00000000
--- a/linux-core/i915_drv.c
+++ /dev/null
@@ -1,222 +0,0 @@
-/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
- */
-/*
- *
- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drm.h"
-#include "i915_drv.h"
-
-#include "drm_pciids.h"
-
-static struct pci_device_id pciidlist[] = {
- i915_PCI_IDS
-};
-
-#ifdef I915_HAVE_FENCE
-extern struct drm_fence_driver i915_fence_driver;
-#endif
-
-#ifdef I915_HAVE_BUFFER
-
-static uint32_t i915_mem_prios[] = {DRM_BO_MEM_PRIV0, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL};
-static uint32_t i915_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_PRIV0, DRM_BO_MEM_LOCAL};
-
-static struct drm_bo_driver i915_bo_driver = {
- .mem_type_prio = i915_mem_prios,
- .mem_busy_prio = i915_busy_prios,
- .num_mem_type_prio = sizeof(i915_mem_prios)/sizeof(uint32_t),
- .num_mem_busy_prio = sizeof(i915_busy_prios)/sizeof(uint32_t),
- .create_ttm_backend_entry = i915_create_ttm_backend_entry,
- .fence_type = i915_fence_type,
- .invalidate_caches = i915_invalidate_caches,
- .init_mem_type = i915_init_mem_type,
- .evict_flags = i915_evict_flags,
- .move = i915_move,
- .ttm_cache_flush = i915_flush_ttm,
- .command_stream_barrier = NULL,
-};
-#endif
-
-static int i915_suspend(struct drm_device *dev, pm_message_t state)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!dev || !dev_priv) {
- printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv);
- printk(KERN_ERR "DRM not initialized, aborting suspend.\n");
- return -ENODEV;
- }
-
- if (state.event == PM_EVENT_PRETHAW)
- return 0;
-
- pci_save_state(dev->pdev);
-
- i915_save_state(dev);
-
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
- intel_opregion_free(dev);
-#endif
-
- if (state.event == PM_EVENT_SUSPEND) {
- /* Shut down the device */
- pci_disable_device(dev->pdev);
- pci_set_power_state(dev->pdev, PCI_D3hot);
- }
-
- return 0;
-}
-
-static int i915_resume(struct drm_device *dev)
-{
- pci_set_power_state(dev->pdev, PCI_D0);
- pci_restore_state(dev->pdev);
- if (pci_enable_device(dev->pdev))
- return -1;
- pci_set_master(dev->pdev);
-
- i915_restore_state(dev);
-
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
- intel_opregion_init(dev);
-#endif
-
- return 0;
-}
-
-static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
-static void remove(struct pci_dev *pdev);
-
-static struct drm_driver driver = {
- /* don't use mtrr's here, the Xserver or user space app should
- * deal with them for intel hardware.
- */
- .driver_features =
- DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR | */
- DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
- .load = i915_driver_load,
- .unload = i915_driver_unload,
- .firstopen = i915_driver_firstopen,
- .open = i915_driver_open,
- .lastclose = i915_driver_lastclose,
- .preclose = i915_driver_preclose,
- .postclose = i915_driver_postclose,
- .suspend = i915_suspend,
- .resume = i915_resume,
- .device_is_agp = i915_driver_device_is_agp,
- .get_vblank_counter = i915_get_vblank_counter,
- .enable_vblank = i915_enable_vblank,
- .disable_vblank = i915_disable_vblank,
- .irq_preinstall = i915_driver_irq_preinstall,
- .irq_postinstall = i915_driver_irq_postinstall,
- .irq_uninstall = i915_driver_irq_uninstall,
- .irq_handler = i915_driver_irq_handler,
- .reclaim_buffers = drm_core_reclaim_buffers,
- .get_map_ofs = drm_core_get_map_ofs,
- .get_reg_ofs = drm_core_get_reg_ofs,
- .proc_init = i915_gem_proc_init,
- .proc_cleanup = i915_gem_proc_cleanup,
- .ioctls = i915_ioctls,
- .gem_init_object = i915_gem_init_object,
- .gem_free_object = i915_gem_free_object,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
-#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
- .compat_ioctl = i915_compat_ioctl,
-#endif
- },
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- .probe = probe,
- .remove = remove,
- },
-#ifdef I915_HAVE_FENCE
- .fence_driver = &i915_fence_driver,
-#endif
-#ifdef I915_HAVE_BUFFER
- .bo_driver = &i915_bo_driver,
-#endif
- .name = DRIVER_NAME,
- .desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCHLEVEL,
-};
-
-static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int ret;
-
- /* On the 945G/GM, the chipset reports the MSI capability on the
- * integrated graphics even though the support isn't actually there
- * according to the published specs. It doesn't appear to function
- * correctly in testing on 945G.
- * This may be a side effect of MSI having been made available for PEG
- * and the registers being closely associated.
- */
- if (pdev->device != 0x2772 && pdev->device != 0x27A2)
- (void )pci_enable_msi(pdev);
-
- ret = drm_get_dev(pdev, ent, &driver);
- if (ret && pdev->msi_enabled)
- pci_disable_msi(pdev);
- return ret;
-}
-static void remove(struct pci_dev *pdev)
-{
- if (pdev->msi_enabled)
- pci_disable_msi(pdev);
- drm_cleanup_pci(pdev);
-}
-
-static int __init i915_init(void)
-{
- driver.num_ioctls = i915_max_ioctl;
- return drm_init(&driver, pciidlist);
-}
-
-static void __exit i915_exit(void)
-{
- drm_exit(&driver);
-}
-
-module_init(i915_init);
-module_exit(i915_exit);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL and additional rights");
diff --git a/linux-core/i915_drv.h b/linux-core/i915_drv.h
deleted file mode 120000
index 085558ca..00000000
--- a/linux-core/i915_drv.h
+++ /dev/null
@@ -1 +0,0 @@
-../shared-core/i915_drv.h \ No newline at end of file
diff --git a/linux-core/i915_execbuf.c b/linux-core/i915_execbuf.c
deleted file mode 100644
index 804f3ac1..00000000
--- a/linux-core/i915_execbuf.c
+++ /dev/null
@@ -1,917 +0,0 @@
-/*
- * Copyright 2003-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
- * Dave Airlie
- * Keith Packard
- * ... ?
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drm.h"
-#include "i915_drv.h"
-
-#if DRM_DEBUG_CODE
-#define DRM_DEBUG_RELOCATION (drm_debug != 0)
-#else
-#define DRM_DEBUG_RELOCATION 0
-#endif
-
-enum i915_buf_idle {
- I915_RELOC_UNCHECKED,
- I915_RELOC_IDLE,
- I915_RELOC_BUSY
-};
-
-struct i915_relocatee_info {
- struct drm_buffer_object *buf;
- unsigned long offset;
- uint32_t *data_page;
- unsigned page_offset;
- struct drm_bo_kmap_obj kmap;
- int is_iomem;
- int dst;
- int idle;
- int performed_ring_relocs;
-#ifdef DRM_KMAP_ATOMIC_PROT_PFN
- unsigned long pfn;
- pgprot_t pg_prot;
-#endif
-};
-
-struct drm_i915_validate_buffer {
- struct drm_buffer_object *buffer;
- int presumed_offset_correct;
- void __user *data;
- int ret;
- enum i915_buf_idle idle;
-};
-
-/*
- * I'd like to use MI_STORE_DATA_IMM here, but I can't make
- * it work. Seems like GART writes are broken with that
- * instruction. Also I'm not sure that MI_FLUSH will
- * act as a memory barrier for that instruction. It will
- * for this single dword 2D blit.
- */
-
-static void i915_emit_ring_reloc(struct drm_device *dev, uint32_t offset,
- uint32_t value)
-{
- struct drm_i915_private *dev_priv =
- (struct drm_i915_private *)dev->dev_private;
-
- RING_LOCALS;
- i915_kernel_lost_context(dev);
- BEGIN_LP_RING(6);
- OUT_RING((0x02 << 29) | (0x40 << 22) | (0x3 << 20) | (0x3));
- OUT_RING((0x3 << 24) | (0xF0 << 16) | (0x40));
- OUT_RING((0x1 << 16) | (0x4));
- OUT_RING(offset);
- OUT_RING(value);
- OUT_RING(0);
- ADVANCE_LP_RING();
-}
-
-static void i915_dereference_buffers_locked(struct drm_i915_validate_buffer
- *buffers, unsigned num_buffers)
-{
- while (num_buffers--)
- drm_bo_usage_deref_locked(&buffers[num_buffers].buffer);
-}
-
-int i915_apply_reloc(struct drm_file *file_priv, int num_buffers,
- struct drm_i915_validate_buffer *buffers,
- struct i915_relocatee_info *relocatee, uint32_t * reloc)
-{
- unsigned index;
- unsigned long new_cmd_offset;
- u32 val;
- int ret, i;
- int buf_index = -1;
-
- /*
- * FIXME: O(relocs * buffers) complexity.
- */
-
- for (i = 0; i <= num_buffers; i++)
- if (buffers[i].buffer)
- if (reloc[2] == buffers[i].buffer->base.hash.key)
- buf_index = i;
-
- if (buf_index == -1) {
- DRM_ERROR("Illegal relocation buffer %08X\n", reloc[2]);
- return -EINVAL;
- }
-
- /*
- * Short-circuit relocations that were correctly
- * guessed by the client
- */
- if (buffers[buf_index].presumed_offset_correct && !DRM_DEBUG_RELOCATION)
- return 0;
-
- new_cmd_offset = reloc[0];
- if (!relocatee->data_page ||
- !drm_bo_same_page(relocatee->offset, new_cmd_offset)) {
- struct drm_bo_mem_reg *mem = &relocatee->buf->mem;
-
- drm_bo_kunmap(&relocatee->kmap);
- relocatee->data_page = NULL;
- relocatee->offset = new_cmd_offset;
-
- if (unlikely(relocatee->idle == I915_RELOC_UNCHECKED)) {
- ret = drm_bo_wait(relocatee->buf, 0, 1, 0, 0);
- if (ret)
- return ret;
- relocatee->idle = I915_RELOC_IDLE;
- }
-
- if (unlikely((mem->mem_type != DRM_BO_MEM_LOCAL) &&
- (mem->flags & DRM_BO_FLAG_CACHED_MAPPED)))
- drm_bo_evict_cached(relocatee->buf);
-
- ret = drm_bo_kmap(relocatee->buf, new_cmd_offset >> PAGE_SHIFT,
- 1, &relocatee->kmap);
- if (ret) {
- DRM_ERROR
- ("Could not map command buffer to apply relocs\n %08lx",
- new_cmd_offset);
- return ret;
- }
- relocatee->data_page = drm_bmo_virtual(&relocatee->kmap,
- &relocatee->is_iomem);
- relocatee->page_offset = (relocatee->offset & PAGE_MASK);
- }
-
- val = buffers[buf_index].buffer->offset;
- index = (reloc[0] - relocatee->page_offset) >> 2;
-
- /* add in validate */
- val = val + reloc[1];
-
- if (DRM_DEBUG_RELOCATION) {
- if (buffers[buf_index].presumed_offset_correct &&
- relocatee->data_page[index] != val) {
- DRM_DEBUG
- ("Relocation mismatch source %d target %d buffer %d user %08x kernel %08x\n",
- reloc[0], reloc[1], buf_index,
- relocatee->data_page[index], val);
- }
- }
-
- if (relocatee->is_iomem)
- iowrite32(val, relocatee->data_page + index);
- else
- relocatee->data_page[index] = val;
- return 0;
-}
-
-int i915_process_relocs(struct drm_file *file_priv,
- uint32_t buf_handle,
- uint32_t __user ** reloc_user_ptr,
- struct i915_relocatee_info *relocatee,
- struct drm_i915_validate_buffer *buffers,
- uint32_t num_buffers)
-{
- int ret, reloc_stride;
- uint32_t cur_offset;
- uint32_t reloc_count;
- uint32_t reloc_type;
- uint32_t reloc_buf_size;
- uint32_t *reloc_buf = NULL;
- int i;
-
- /* do a copy from user from the user ptr */
- ret = get_user(reloc_count, *reloc_user_ptr);
- if (ret) {
- DRM_ERROR("Could not map relocation buffer.\n");
- goto out;
- }
-
- ret = get_user(reloc_type, (*reloc_user_ptr) + 1);
- if (ret) {
- DRM_ERROR("Could not map relocation buffer.\n");
- goto out;
- }
-
- if (reloc_type != 0) {
- DRM_ERROR("Unsupported relocation type requested\n");
- ret = -EINVAL;
- goto out;
- }
-
- reloc_buf_size =
- (I915_RELOC_HEADER +
- (reloc_count * I915_RELOC0_STRIDE)) * sizeof(uint32_t);
- reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL);
- if (!reloc_buf) {
- DRM_ERROR("Out of memory for reloc buffer\n");
- ret = -ENOMEM;
- goto out;
- }
-
- if (copy_from_user(reloc_buf, *reloc_user_ptr, reloc_buf_size)) {
- ret = -EFAULT;
- goto out;
- }
-
- /* get next relocate buffer handle */
- *reloc_user_ptr = (uint32_t *) * (unsigned long *)&reloc_buf[2];
-
- reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t); /* may be different for other types of relocs */
-
- DRM_DEBUG("num relocs is %d, next is %p\n", reloc_count,
- *reloc_user_ptr);
-
- for (i = 0; i < reloc_count; i++) {
- cur_offset = I915_RELOC_HEADER + (i * I915_RELOC0_STRIDE);
-
- ret = i915_apply_reloc(file_priv, num_buffers, buffers,
- relocatee, reloc_buf + cur_offset);
- if (ret)
- goto out;
- }
-
- out:
- if (reloc_buf)
- kfree(reloc_buf);
-
- if (relocatee->data_page) {
- drm_bo_kunmap(&relocatee->kmap);
- relocatee->data_page = NULL;
- }
-
- return ret;
-}
-
-static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle,
- uint32_t __user * reloc_user_ptr,
- struct drm_i915_validate_buffer *buffers,
- uint32_t buf_count)
-{
- struct drm_device *dev = file_priv->minor->dev;
- struct i915_relocatee_info relocatee;
- int ret = 0;
- int b;
-
- /*
- * Short circuit relocations when all previous
- * buffers offsets were correctly guessed by
- * the client
- */
- if (!DRM_DEBUG_RELOCATION) {
- for (b = 0; b < buf_count; b++)
- if (!buffers[b].presumed_offset_correct)
- break;
-
- if (b == buf_count)
- return 0;
- }
-
- memset(&relocatee, 0, sizeof(relocatee));
- relocatee.idle = I915_RELOC_UNCHECKED;
-
- mutex_lock(&dev->struct_mutex);
- relocatee.buf = drm_lookup_buffer_object(file_priv, buf_handle, 1);
- mutex_unlock(&dev->struct_mutex);
- if (!relocatee.buf) {
- DRM_DEBUG("relocatee buffer invalid %08x\n", buf_handle);
- ret = -EINVAL;
- goto out_err;
- }
-
- mutex_lock(&relocatee.buf->mutex);
- while (reloc_user_ptr) {
- ret =
- i915_process_relocs(file_priv, buf_handle, &reloc_user_ptr,
- &relocatee, buffers, buf_count);
- if (ret) {
- DRM_ERROR("process relocs failed\n");
- goto out_err1;
- }
- }
-
- out_err1:
- mutex_unlock(&relocatee.buf->mutex);
- drm_bo_usage_deref_unlocked(&relocatee.buf);
- out_err:
- return ret;
-}
-
-static void i915_clear_relocatee(struct i915_relocatee_info *relocatee)
-{
- if (relocatee->data_page) {
-#ifndef DRM_KMAP_ATOMIC_PROT_PFN
- drm_bo_kunmap(&relocatee->kmap);
-#else
- kunmap_atomic(relocatee->data_page, KM_USER0);
-#endif
- relocatee->data_page = NULL;
- }
- relocatee->buf = NULL;
- relocatee->dst = ~0;
-}
-
-static int i915_update_relocatee(struct i915_relocatee_info *relocatee,
- struct drm_i915_validate_buffer *buffers,
- unsigned int dst, unsigned long dst_offset)
-{
- int ret;
-
- if (unlikely(dst != relocatee->dst || NULL == relocatee->buf)) {
- i915_clear_relocatee(relocatee);
- relocatee->dst = dst;
- relocatee->buf = buffers[dst].buffer;
- relocatee->idle = buffers[dst].idle;
-
- /*
- * Check for buffer idle. If the buffer is busy, revert to
- * ring relocations.
- */
-
- if (relocatee->idle == I915_RELOC_UNCHECKED) {
- preempt_enable();
- mutex_lock(&relocatee->buf->mutex);
-
- ret = drm_bo_wait(relocatee->buf, 0, 1, 1, 0);
- if (ret == 0)
- relocatee->idle = I915_RELOC_IDLE;
- else {
- relocatee->idle = I915_RELOC_BUSY;
- relocatee->performed_ring_relocs = 1;
- }
- mutex_unlock(&relocatee->buf->mutex);
- preempt_disable();
- buffers[dst].idle = relocatee->idle;
- }
- }
-
- if (relocatee->idle == I915_RELOC_BUSY)
- return 0;
-
- if (unlikely(dst_offset > relocatee->buf->num_pages * PAGE_SIZE)) {
- DRM_ERROR("Relocation destination out of bounds.\n");
- return -EINVAL;
- }
- if (unlikely(!drm_bo_same_page(relocatee->page_offset, dst_offset) ||
- NULL == relocatee->data_page)) {
-#ifdef DRM_KMAP_ATOMIC_PROT_PFN
- if (NULL != relocatee->data_page) {
- kunmap_atomic(relocatee->data_page, KM_USER0);
- relocatee->data_page = NULL;
- }
- ret = drm_bo_pfn_prot(relocatee->buf, dst_offset,
- &relocatee->pfn, &relocatee->pg_prot);
- if (ret) {
- DRM_ERROR("Can't map relocation destination.\n");
- return -EINVAL;
- }
- relocatee->data_page =
- kmap_atomic_prot_pfn(relocatee->pfn, KM_USER0,
- relocatee->pg_prot);
-#else
- if (NULL != relocatee->data_page) {
- drm_bo_kunmap(&relocatee->kmap);
- relocatee->data_page = NULL;
- }
-
- ret = drm_bo_kmap(relocatee->buf, dst_offset >> PAGE_SHIFT,
- 1, &relocatee->kmap);
- if (ret) {
- DRM_ERROR("Can't map relocation destination.\n");
- return ret;
- }
-
- relocatee->data_page = drm_bmo_virtual(&relocatee->kmap,
- &relocatee->is_iomem);
-#endif
- relocatee->page_offset = dst_offset & PAGE_MASK;
- }
- return 0;
-}
-
-static int i915_apply_post_reloc(uint32_t reloc[],
- struct drm_i915_validate_buffer *buffers,
- uint32_t num_buffers,
- struct i915_relocatee_info *relocatee)
-{
- uint32_t reloc_buffer = reloc[2];
- uint32_t dst_buffer = reloc[3];
- uint32_t val;
- uint32_t index;
- int ret;
-
- if (likely(buffers[reloc_buffer].presumed_offset_correct))
- return 0;
- if (unlikely(reloc_buffer >= num_buffers)) {
- DRM_ERROR("Invalid reloc buffer index.\n");
- return -EINVAL;
- }
- if (unlikely(dst_buffer >= num_buffers)) {
- DRM_ERROR("Invalid dest buffer index.\n");
- return -EINVAL;
- }
-
- ret = i915_update_relocatee(relocatee, buffers, dst_buffer, reloc[0]);
- if (unlikely(ret))
- return ret;
-
- val = buffers[reloc_buffer].buffer->offset;
- index = (reloc[0] - relocatee->page_offset) >> 2;
- val = val + reloc[1];
-
- if (relocatee->idle == I915_RELOC_BUSY) {
- i915_emit_ring_reloc(relocatee->buf->dev,
- relocatee->buf->offset + reloc[0], val);
- return 0;
- }
-#ifdef DRM_KMAP_ATOMIC_PROT_PFN
- relocatee->data_page[index] = val;
-#else
- if (likely(relocatee->is_iomem))
- iowrite32(val, relocatee->data_page + index);
- else
- relocatee->data_page[index] = val;
-#endif
-
- return 0;
-}
-
-static int i915_post_relocs(struct drm_file *file_priv,
- uint32_t __user * new_reloc_ptr,
- struct drm_i915_validate_buffer *buffers,
- unsigned int num_buffers)
-{
- uint32_t *reloc;
- uint32_t reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t);
- uint32_t header_size = I915_RELOC_HEADER * sizeof(uint32_t);
- struct i915_relocatee_info relocatee;
- uint32_t reloc_type;
- uint32_t num_relocs;
- uint32_t count;
- int ret = 0;
- int i;
- int short_circuit = 1;
- uint32_t __user *reloc_ptr;
- uint64_t new_reloc_data;
- uint32_t reloc_buf_size;
- uint32_t *reloc_buf;
-
- for (i = 0; i < num_buffers; ++i) {
- if (unlikely(!buffers[i].presumed_offset_correct)) {
- short_circuit = 0;
- break;
- }
- }
-
- if (likely(short_circuit))
- return 0;
-
- memset(&relocatee, 0, sizeof(relocatee));
-
- while (new_reloc_ptr) {
- reloc_ptr = new_reloc_ptr;
-
- ret = get_user(num_relocs, reloc_ptr);
- if (unlikely(ret))
- goto out;
- if (unlikely(!access_ok(VERIFY_READ, reloc_ptr,
- header_size +
- num_relocs * reloc_stride)))
- return -EFAULT;
-
- ret = __get_user(reloc_type, reloc_ptr + 1);
- if (unlikely(ret))
- goto out;
-
- if (unlikely(reloc_type != 1)) {
- DRM_ERROR("Unsupported relocation type requested.\n");
- ret = -EINVAL;
- goto out;
- }
-
- ret = __get_user(new_reloc_data, reloc_ptr + 2);
- new_reloc_ptr = (uint32_t __user *) (unsigned long)
- new_reloc_data;
-
- reloc_ptr += I915_RELOC_HEADER;
-
- if (num_relocs == 0)
- goto out;
-
- reloc_buf_size =
- (num_relocs * I915_RELOC0_STRIDE) * sizeof(uint32_t);
- reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL);
- if (!reloc_buf) {
- DRM_ERROR("Out of memory for reloc buffer\n");
- ret = -ENOMEM;
- goto out;
- }
-
- if (__copy_from_user(reloc_buf, reloc_ptr, reloc_buf_size)) {
- ret = -EFAULT;
- goto out;
- }
- reloc = reloc_buf;
- preempt_disable();
- for (count = 0; count < num_relocs; ++count) {
- ret = i915_apply_post_reloc(reloc, buffers,
- num_buffers, &relocatee);
- if (unlikely(ret)) {
- preempt_enable();
- goto out;
- }
- reloc += I915_RELOC0_STRIDE;
- }
- preempt_enable();
-
- if (reloc_buf) {
- kfree(reloc_buf);
- reloc_buf = NULL;
- }
- i915_clear_relocatee(&relocatee);
- }
-
- out:
- /*
- * Flush ring relocs so the command parser will pick them up.
- */
-
- if (relocatee.performed_ring_relocs)
- (void)i915_emit_mi_flush(file_priv->minor->dev, 0);
-
- i915_clear_relocatee(&relocatee);
- if (reloc_buf) {
- kfree(reloc_buf);
- reloc_buf = NULL;
- }
-
- return ret;
-}
-
-static int i915_check_presumed(struct drm_i915_op_arg *arg,
- struct drm_buffer_object *bo,
- uint32_t __user * data, int *presumed_ok)
-{
- struct drm_bo_op_req *req = &arg->d.req;
- uint32_t hint_offset;
- uint32_t hint = req->bo_req.hint;
-
- *presumed_ok = 0;
-
- if (!(hint & DRM_BO_HINT_PRESUMED_OFFSET))
- return 0;
- if (bo->offset == req->bo_req.presumed_offset) {
- *presumed_ok = 1;
- return 0;
- }
-
- /*
- * We need to turn off the HINT_PRESUMED_OFFSET for this buffer in
- * the user-space IOCTL argument list, since the buffer has moved,
- * we're about to apply relocations and we might subsequently
- * hit an -EAGAIN. In that case the argument list will be reused by
- * user-space, but the presumed offset is no longer valid.
- *
- * Needless to say, this is a bit ugly.
- */
-
- hint_offset = (uint32_t *) & req->bo_req.hint - (uint32_t *) arg;
- hint &= ~DRM_BO_HINT_PRESUMED_OFFSET;
- return __put_user(hint, data + hint_offset);
-}
-
-/*
- * Validate, add fence and relocate a block of bos from a userspace list
- */
-int i915_validate_buffer_list(struct drm_file *file_priv,
- unsigned int fence_class, uint64_t data,
- struct drm_i915_validate_buffer *buffers,
- uint32_t * num_buffers,
- uint32_t __user ** post_relocs)
-{
- struct drm_i915_op_arg arg;
- struct drm_bo_op_req *req = &arg.d.req;
- int ret = 0;
- unsigned buf_count = 0;
- uint32_t buf_handle;
- uint32_t __user *reloc_user_ptr;
- struct drm_i915_validate_buffer *item = buffers;
- *post_relocs = NULL;
-
- do {
- if (buf_count >= *num_buffers) {
- DRM_ERROR("Buffer count exceeded %d\n.", *num_buffers);
- ret = -EINVAL;
- goto out_err;
- }
- item = buffers + buf_count;
- item->buffer = NULL;
- item->presumed_offset_correct = 0;
- item->idle = I915_RELOC_UNCHECKED;
-
- if (copy_from_user
- (&arg, (void __user *)(unsigned long)data, sizeof(arg))) {
- ret = -EFAULT;
- goto out_err;
- }
-
- ret = 0;
- if (req->op != drm_bo_validate) {
- DRM_ERROR
- ("Buffer object operation wasn't \"validate\".\n");
- ret = -EINVAL;
- goto out_err;
- }
- item->ret = 0;
- item->data = (void __user *)(unsigned long)data;
-
- buf_handle = req->bo_req.handle;
- reloc_user_ptr = (uint32_t *) (unsigned long)arg.reloc_ptr;
-
- /*
- * Switch mode to post-validation relocations?
- */
-
- if (unlikely((buf_count == 0) && (*post_relocs == NULL) &&
- (reloc_user_ptr != NULL))) {
- uint32_t reloc_type;
-
- ret = get_user(reloc_type, reloc_user_ptr + 1);
- if (ret)
- goto out_err;
-
- if (reloc_type == 1)
- *post_relocs = reloc_user_ptr;
-
- }
-
- if ((*post_relocs == NULL) && (reloc_user_ptr != NULL)) {
- ret =
- i915_exec_reloc(file_priv, buf_handle,
- reloc_user_ptr, buffers, buf_count);
- if (ret)
- goto out_err;
- DRM_MEMORYBARRIER();
- }
-
- ret = drm_bo_handle_validate(file_priv, req->bo_req.handle,
- req->bo_req.flags,
- req->bo_req.mask, req->bo_req.hint,
- req->bo_req.fence_class,
- NULL, &item->buffer);
- if (ret) {
- DRM_ERROR("error on handle validate %d\n", ret);
- goto out_err;
- }
-
- buf_count++;
-
- ret = i915_check_presumed(&arg, item->buffer,
- (uint32_t __user *)
- (unsigned long)data,
- &item->presumed_offset_correct);
- if (ret)
- goto out_err;
-
- data = arg.next;
- } while (data != 0);
- out_err:
- *num_buffers = buf_count;
- item->ret = (ret != -EAGAIN) ? ret : 0;
- return ret;
-}
-
-/*
- * Remove all buffers from the unfenced list.
- * If the execbuffer operation was aborted, for example due to a signal,
- * this also make sure that buffers retain their original state and
- * fence pointers.
- * Copy back buffer information to user-space unless we were interrupted
- * by a signal. In which case the IOCTL must be rerun.
- */
-
-static int i915_handle_copyback(struct drm_device *dev,
- struct drm_i915_validate_buffer *buffers,
- unsigned int num_buffers, int ret)
-{
- int err = ret;
- int i;
- struct drm_i915_op_arg arg;
- struct drm_buffer_object *bo;
-
- if (ret)
- drm_putback_buffer_objects(dev);
-
- if (ret != -EAGAIN) {
- for (i = 0; i < num_buffers; ++i) {
- arg.handled = 1;
- arg.d.rep.ret = buffers->ret;
- bo = buffers->buffer;
- mutex_lock(&bo->mutex);
- drm_bo_fill_rep_arg(bo, &arg.d.rep.bo_info);
- mutex_unlock(&bo->mutex);
- if (__copy_to_user(buffers->data, &arg, sizeof(arg)))
- err = -EFAULT;
- buffers++;
- }
- }
-
- return err;
-}
-
-/*
- * Create a fence object, and if that fails, pretend that everything is
- * OK and just idle the GPU.
- */
-
-void i915_fence_or_sync(struct drm_file *file_priv,
- uint32_t fence_flags,
- struct drm_fence_arg *fence_arg,
- struct drm_fence_object **fence_p)
-{
- struct drm_device *dev = file_priv->minor->dev;
- int ret;
- struct drm_fence_object *fence;
-
- ret = drm_fence_buffer_objects(dev, NULL, fence_flags, NULL, &fence);
-
- if (ret) {
-
- /*
- * Fence creation failed.
- * Fall back to synchronous operation and idle the engine.
- */
-
- (void)i915_emit_mi_flush(dev, MI_READ_FLUSH);
- (void)i915_quiescent(dev);
-
- if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) {
-
- /*
- * Communicate to user-space that
- * fence creation has failed and that
- * the engine is idle.
- */
-
- fence_arg->handle = ~0;
- fence_arg->error = ret;
- }
- drm_putback_buffer_objects(dev);
- if (fence_p)
- *fence_p = NULL;
- return;
- }
-
- if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) {
-
- ret = drm_fence_add_user_object(file_priv, fence,
- fence_flags &
- DRM_FENCE_FLAG_SHAREABLE);
- if (!ret)
- drm_fence_fill_arg(fence, fence_arg);
- else {
- /*
- * Fence user object creation failed.
- * We must idle the engine here as well, as user-
- * space expects a fence object to wait on. Since we
- * have a fence object we wait for it to signal
- * to indicate engine "sufficiently" idle.
- */
-
- (void)drm_fence_object_wait(fence, 0, 1, fence->type);
- drm_fence_usage_deref_unlocked(&fence);
- fence_arg->handle = ~0;
- fence_arg->error = ret;
- }
- }
-
- if (fence_p)
- *fence_p = fence;
- else if (fence)
- drm_fence_usage_deref_unlocked(&fence);
-}
-
-int i915_execbuffer(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
- dev_priv->sarea_priv;
- struct drm_i915_execbuffer *exec_buf = data;
- struct drm_i915_batchbuffer *batch = &exec_buf->batch;
- struct drm_fence_arg *fence_arg = &exec_buf->fence_arg;
- int num_buffers;
- int ret;
- uint32_t __user *post_relocs;
-
- if (!dev_priv->allow_batchbuffer) {
- DRM_ERROR("Batchbuffer ioctl disabled\n");
- return -EINVAL;
- }
-
- if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
- batch->num_cliprects *
- sizeof(struct
- drm_clip_rect)))
- return -EFAULT;
-
- if (exec_buf->num_buffers > dev_priv->max_validate_buffers)
- return -EINVAL;
-
- ret = drm_bo_read_lock(&dev->bm.bm_lock, 1);
- if (ret)
- return ret;
-
- /*
- * The cmdbuf_mutex makes sure the validate-submit-fence
- * operation is atomic.
- */
-
- ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
- if (ret) {
- drm_bo_read_unlock(&dev->bm.bm_lock);
- return -EAGAIN;
- }
-
- num_buffers = exec_buf->num_buffers;
-
- if (!dev_priv->val_bufs) {
- dev_priv->val_bufs =
- vmalloc(sizeof(struct drm_i915_validate_buffer) *
- dev_priv->max_validate_buffers);
- }
- if (!dev_priv->val_bufs) {
- drm_bo_read_unlock(&dev->bm.bm_lock);
- mutex_unlock(&dev_priv->cmdbuf_mutex);
- return -ENOMEM;
- }
-
- /* validate buffer list + fixup relocations */
- ret = i915_validate_buffer_list(file_priv, 0, exec_buf->ops_list,
- dev_priv->val_bufs, &num_buffers,
- &post_relocs);
- if (ret)
- goto out_err0;
-
- if (post_relocs) {
- ret = i915_post_relocs(file_priv, post_relocs,
- dev_priv->val_bufs, num_buffers);
- if (ret)
- goto out_err0;
- }
-
- /* make sure all previous memory operations have passed */
- DRM_MEMORYBARRIER();
-
- if (!post_relocs) {
- drm_agp_chipset_flush(dev);
- batch->start =
- dev_priv->val_bufs[num_buffers - 1].buffer->offset;
- } else {
- batch->start += dev_priv->val_bufs[0].buffer->offset;
- }
-
- DRM_DEBUG("i915 exec batchbuffer, start %x used %d cliprects %d\n",
- batch->start, batch->used, batch->num_cliprects);
-
- ret = i915_dispatch_batchbuffer(dev, batch);
- if (ret)
- goto out_err0;
- if (sarea_priv)
- sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
- i915_fence_or_sync(file_priv, fence_arg->flags, fence_arg, NULL);
-
- out_err0:
- ret = i915_handle_copyback(dev, dev_priv->val_bufs, num_buffers, ret);
- mutex_lock(&dev->struct_mutex);
- i915_dereference_buffers_locked(dev_priv->val_bufs, num_buffers);
- mutex_unlock(&dev->struct_mutex);
- mutex_unlock(&dev_priv->cmdbuf_mutex);
- drm_bo_read_unlock(&dev->bm.bm_lock);
- return ret;
-}
diff --git a/linux-core/i915_fence.c b/linux-core/i915_fence.c
deleted file mode 100644
index 45613c3a..00000000
--- a/linux-core/i915_fence.c
+++ /dev/null
@@ -1,273 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drm.h"
-#include "i915_drv.h"
-
-/*
- * Initiate a sync flush if it's not already pending.
- */
-
-static inline void i915_initiate_rwflush(struct drm_i915_private *dev_priv,
- struct drm_fence_class_manager *fc)
-{
- if ((fc->pending_flush & DRM_I915_FENCE_TYPE_RW) &&
- !dev_priv->flush_pending) {
- dev_priv->flush_sequence = (uint32_t) READ_BREADCRUMB(dev_priv);
- dev_priv->flush_flags = fc->pending_flush;
- dev_priv->saved_flush_status = READ_HWSP(dev_priv, 0);
- I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
- dev_priv->flush_pending = 1;
- fc->pending_flush &= ~DRM_I915_FENCE_TYPE_RW;
- }
-}
-
-static inline void i915_report_rwflush(struct drm_device *dev,
- struct drm_i915_private *dev_priv)
-{
- if (unlikely(dev_priv->flush_pending)) {
-
- uint32_t flush_flags;
- uint32_t i_status;
- uint32_t flush_sequence;
-
- i_status = READ_HWSP(dev_priv, 0);
- if ((i_status & (1 << 12)) !=
- (dev_priv->saved_flush_status & (1 << 12))) {
- flush_flags = dev_priv->flush_flags;
- flush_sequence = dev_priv->flush_sequence;
- dev_priv->flush_pending = 0;
- drm_fence_handler(dev, 0, flush_sequence,
- flush_flags, 0);
- }
- }
-}
-
-static void i915_fence_flush(struct drm_device *dev,
- uint32_t fence_class)
-{
- struct drm_i915_private *dev_priv =
- (struct drm_i915_private *) dev->dev_private;
- struct drm_fence_manager *fm = &dev->fm;
- struct drm_fence_class_manager *fc = &fm->fence_class[0];
- unsigned long irq_flags;
-
- if (unlikely(!dev_priv))
- return;
-
- write_lock_irqsave(&fm->lock, irq_flags);
- i915_initiate_rwflush(dev_priv, fc);
- write_unlock_irqrestore(&fm->lock, irq_flags);
-}
-
-
-static void i915_fence_poll(struct drm_device *dev, uint32_t fence_class,
- uint32_t waiting_types)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- struct drm_fence_manager *fm = &dev->fm;
- struct drm_fence_class_manager *fc = &fm->fence_class[0];
- uint32_t sequence;
-
- if (unlikely(!dev_priv))
- return;
-
- /*
- * First, report any executed sync flush:
- */
-
- i915_report_rwflush(dev, dev_priv);
-
- /*
- * Report A new breadcrumb, and adjust IRQs.
- */
-
- if (waiting_types & DRM_FENCE_TYPE_EXE) {
-
- sequence = READ_BREADCRUMB(dev_priv);
- drm_fence_handler(dev, 0, sequence,
- DRM_FENCE_TYPE_EXE, 0);
-
- if (dev_priv->fence_irq_on &&
- !(fc->waiting_types & DRM_FENCE_TYPE_EXE)) {
- i915_user_irq_off(dev_priv);
- dev_priv->fence_irq_on = 0;
- } else if (!dev_priv->fence_irq_on &&
- (fc->waiting_types & DRM_FENCE_TYPE_EXE)) {
- i915_user_irq_on(dev_priv);
- dev_priv->fence_irq_on = 1;
- }
- }
-
- /*
- * There may be new RW flushes pending. Start them.
- */
-
- i915_initiate_rwflush(dev_priv, fc);
-
- /*
- * And possibly, but unlikely, they finish immediately.
- */
-
- i915_report_rwflush(dev, dev_priv);
-
-}
-
-static int i915_fence_emit_sequence(struct drm_device *dev, uint32_t class,
- uint32_t flags, uint32_t *sequence,
- uint32_t *native_type)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- if (unlikely(!dev_priv))
- return -EINVAL;
-
- i915_emit_irq(dev);
- *sequence = (uint32_t) dev_priv->counter;
- *native_type = DRM_FENCE_TYPE_EXE;
- if (flags & DRM_I915_FENCE_FLAG_FLUSHED)
- *native_type |= DRM_I915_FENCE_TYPE_RW;
-
- return 0;
-}
-
-void i915_fence_handler(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
- struct drm_fence_manager *fm = &dev->fm;
- struct drm_fence_class_manager *fc = &fm->fence_class[0];
-
- write_lock(&fm->lock);
- if (likely(dev_priv->fence_irq_on))
- i915_fence_poll(dev, 0, fc->waiting_types);
- write_unlock(&fm->lock);
-}
-
-/*
- * We need a separate wait function since we need to poll for
- * sync flushes.
- */
-
-static int i915_fence_wait(struct drm_fence_object *fence,
- int lazy, int interruptible, uint32_t mask)
-{
- struct drm_device *dev = fence->dev;
- drm_i915_private_t *dev_priv = (struct drm_i915_private *) dev->dev_private;
- struct drm_fence_manager *fm = &dev->fm;
- struct drm_fence_class_manager *fc = &fm->fence_class[0];
- int ret;
- unsigned long _end = jiffies + 3 * DRM_HZ;
-
- drm_fence_object_flush(fence, mask);
- if (likely(interruptible))
- ret = wait_event_interruptible_timeout
- (fc->fence_queue, drm_fence_object_signaled(fence, DRM_FENCE_TYPE_EXE),
- 3 * DRM_HZ);
- else
- ret = wait_event_timeout
- (fc->fence_queue, drm_fence_object_signaled(fence, DRM_FENCE_TYPE_EXE),
- 3 * DRM_HZ);
-
- if (unlikely(ret == -ERESTARTSYS))
- return -EAGAIN;
-
- if (unlikely(ret == 0))
- return -EBUSY;
-
- if (likely(mask == DRM_FENCE_TYPE_EXE ||
- drm_fence_object_signaled(fence, mask)))
- return 0;
-
- /*
- * Remove this code snippet when fixed. HWSTAM doesn't let
- * flush info through...
- */
-
- if (unlikely(dev_priv && !dev_priv->irq_enabled)) {
- unsigned long irq_flags;
-
- DRM_ERROR("X server disabled IRQs before releasing frame buffer.\n");
- msleep(100);
- dev_priv->flush_pending = 0;
- write_lock_irqsave(&fm->lock, irq_flags);
- drm_fence_handler(dev, fence->fence_class,
- fence->sequence, fence->type, 0);
- write_unlock_irqrestore(&fm->lock, irq_flags);
- }
-
- /*
- * Poll for sync flush completion.
- */
-
- return drm_fence_wait_polling(fence, lazy, interruptible, mask, _end);
-}
-
-static uint32_t i915_fence_needed_flush(struct drm_fence_object *fence)
-{
- uint32_t flush_flags = fence->waiting_types &
- ~(DRM_FENCE_TYPE_EXE | fence->signaled_types);
-
- if (likely(flush_flags == 0 ||
- ((flush_flags & ~fence->native_types) == 0) ||
- (fence->signaled_types != DRM_FENCE_TYPE_EXE)))
- return 0;
- else {
- struct drm_device *dev = fence->dev;
- struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
- struct drm_fence_driver *driver = dev->driver->fence_driver;
-
- if (unlikely(!dev_priv))
- return 0;
-
- if (dev_priv->flush_pending) {
- uint32_t diff = (dev_priv->flush_sequence - fence->sequence) &
- driver->sequence_mask;
-
- if (diff < driver->wrap_diff)
- return 0;
- }
- }
- return flush_flags;
-}
-
-struct drm_fence_driver i915_fence_driver = {
- .num_classes = 1,
- .wrap_diff = (1U << (BREADCRUMB_BITS - 1)),
- .flush_diff = (1U << (BREADCRUMB_BITS - 2)),
- .sequence_mask = BREADCRUMB_MASK,
- .has_irq = NULL,
- .emit = i915_fence_emit_sequence,
- .flush = i915_fence_flush,
- .poll = i915_fence_poll,
- .needed_flush = i915_fence_needed_flush,
- .wait = i915_fence_wait,
-};
diff --git a/linux-core/i915_gem.c b/linux-core/i915_gem.c
deleted file mode 100644
index 35dc5bd7..00000000
--- a/linux-core/i915_gem.c
+++ /dev/null
@@ -1,2502 +0,0 @@
-/*
- * Copyright © 2008 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Eric Anholt <eric@anholt.net>
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "drm_compat.h"
-#include "i915_drm.h"
-#include "i915_drv.h"
-#include <linux/swap.h>
-
-static int
-i915_gem_object_set_domain(struct drm_gem_object *obj,
- uint32_t read_domains,
- uint32_t write_domain);
-static int
-i915_gem_object_set_domain_range(struct drm_gem_object *obj,
- uint64_t offset,
- uint64_t size,
- uint32_t read_domains,
- uint32_t write_domain);
-int
-i915_gem_set_domain(struct drm_gem_object *obj,
- struct drm_file *file_priv,
- uint32_t read_domains,
- uint32_t write_domain);
-static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
-static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
-static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
-
-int
-i915_gem_init_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_init *args = data;
-
- mutex_lock(&dev->struct_mutex);
-
- if (args->gtt_start >= args->gtt_end ||
- (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
- (args->gtt_end & (PAGE_SIZE - 1)) != 0) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start,
- args->gtt_end - args->gtt_start);
-
- dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start);
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-
-/**
- * Creates a new mm object and returns a handle to it.
- */
-int
-i915_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_gem_create *args = data;
- struct drm_gem_object *obj;
- int handle, ret;
-
- args->size = roundup(args->size, PAGE_SIZE);
-
- /* Allocate the new object */
- obj = drm_gem_object_alloc(dev, args->size);
- if (obj == NULL)
- return -ENOMEM;
-
- ret = drm_gem_handle_create(file_priv, obj, &handle);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_handle_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
-
- if (ret)
- return ret;
-
- args->handle = handle;
-
- return 0;
-}
-
-/**
- * Reads data from the object referenced by handle.
- *
- * On error, the contents of *data are undefined.
- */
-int
-i915_gem_pread_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_gem_pread *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- ssize_t read;
- loff_t offset;
- int ret;
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -EBADF;
- obj_priv = obj->driver_private;
-
- /* Bounds check source.
- *
- * XXX: This could use review for overflow issues...
- */
- if (args->offset > obj->size || args->size > obj->size ||
- args->offset + args->size > obj->size) {
- drm_gem_object_unreference(obj);
- return -EINVAL;
- }
-
- mutex_lock(&dev->struct_mutex);
-
- ret = i915_gem_object_set_domain_range(obj, args->offset, args->size,
- I915_GEM_DOMAIN_CPU, 0);
- if (ret != 0) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- }
-
- offset = args->offset;
-
- read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
- args->size, &offset);
- if (read != args->size) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- if (read < 0)
- return read;
- else
- return -EINVAL;
- }
-
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-#include "drm_compat.h"
-
-static int
-i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
-{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- ssize_t remain;
- loff_t offset;
- char __user *user_data;
- char *vaddr;
- int i, o, l;
- int ret = 0;
- unsigned long pfn;
- unsigned long unwritten;
-
- user_data = (char __user *) (uintptr_t) args->data_ptr;
- remain = args->size;
- if (!access_ok(VERIFY_READ, user_data, remain))
- return -EFAULT;
-
-
- mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_pin(obj, 0);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
- ret = i915_gem_set_domain(obj, file_priv,
- I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
- if (ret)
- goto fail;
-
- obj_priv = obj->driver_private;
- offset = obj_priv->gtt_offset + args->offset;
- obj_priv->dirty = 1;
-
- while (remain > 0) {
- /* Operation in this page
- *
- * i = page number
- * o = offset within page
- * l = bytes to copy
- */
- i = offset >> PAGE_SHIFT;
- o = offset & (PAGE_SIZE-1);
- l = remain;
- if ((o + l) > PAGE_SIZE)
- l = PAGE_SIZE - o;
-
- pfn = (dev->agp->base >> PAGE_SHIFT) + i;
-
-#ifdef DRM_KMAP_ATOMIC_PROT_PFN
- /* kmap_atomic can't map IO pages on non-HIGHMEM kernels
- */
- vaddr = kmap_atomic_prot_pfn(pfn, KM_USER0,
- __pgprot(__PAGE_KERNEL));
-#if WATCH_PWRITE
- DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n",
- i, o, l, pfn, vaddr);
-#endif
- unwritten = __copy_from_user_inatomic_nocache(vaddr + o,
- user_data, l);
- kunmap_atomic(vaddr, KM_USER0);
-
- if (unwritten)
-#endif
- {
- vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
-#if WATCH_PWRITE
- DRM_INFO("pwrite slow i %d o %d l %d "
- "pfn %ld vaddr %p\n",
- i, o, l, pfn, vaddr);
-#endif
- if (vaddr == NULL) {
- ret = -EFAULT;
- goto fail;
- }
- unwritten = __copy_from_user(vaddr + o, user_data, l);
-#if WATCH_PWRITE
- DRM_INFO("unwritten %ld\n", unwritten);
-#endif
- iounmap(vaddr);
- if (unwritten) {
- ret = -EFAULT;
- goto fail;
- }
- }
-
- remain -= l;
- user_data += l;
- offset += l;
- }
-#if WATCH_PWRITE && 1
- i915_gem_clflush_object(obj);
- i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0);
- i915_gem_clflush_object(obj);
-#endif
-
-fail:
- i915_gem_object_unpin(obj);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-
-int
-i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
- struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
-{
- int ret;
- loff_t offset;
- ssize_t written;
-
- mutex_lock(&dev->struct_mutex);
-
- ret = i915_gem_set_domain(obj, file_priv,
- I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
-
- offset = args->offset;
-
- written = vfs_write(obj->filp,
- (char __user *)(uintptr_t) args->data_ptr,
- args->size, &offset);
- if (written != args->size) {
- mutex_unlock(&dev->struct_mutex);
- if (written < 0)
- return written;
- else
- return -EINVAL;
- }
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-/**
- * Writes data to the object referenced by handle.
- *
- * On error, the contents of the buffer that were to be modified are undefined.
- */
-int
-i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_gem_pwrite *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret = 0;
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -EBADF;
- obj_priv = obj->driver_private;
-
- /* Bounds check destination.
- *
- * XXX: This could use review for overflow issues...
- */
- if (args->offset > obj->size || args->size > obj->size ||
- args->offset + args->size > obj->size) {
- drm_gem_object_unreference(obj);
- return -EINVAL;
- }
-
- /* We can only do the GTT pwrite on untiled buffers, as otherwise
- * it would end up going through the fenced access, and we'll get
- * different detiling behavior between reading and writing.
- * pread/pwrite currently are reading and writing from the CPU
- * perspective, requiring manual detiling by the client.
- */
- if (obj_priv->tiling_mode == I915_TILING_NONE &&
- dev->gtt_total != 0)
- ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
- else
- ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
-
-#if WATCH_PWRITE
- if (ret)
- DRM_INFO("pwrite failed %d\n", ret);
-#endif
-
- drm_gem_object_unreference(obj);
-
- return ret;
-}
-
-/**
- * Called when user space prepares to use an object
- */
-int
-i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_gem_set_domain *args = data;
- struct drm_gem_object *obj;
- int ret;
-
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -EBADF;
-
- mutex_lock(&dev->struct_mutex);
-#if WATCH_BUF
- DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
- obj, obj->size, args->read_domains, args->write_domain);
-#endif
- ret = i915_gem_set_domain(obj, file_priv,
- args->read_domains, args->write_domain);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
-}
-
-/**
- * Called when user space has done writes to this buffer
- */
-int
-i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_gem_sw_finish *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret = 0;
-
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
- mutex_lock(&dev->struct_mutex);
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL) {
- mutex_unlock(&dev->struct_mutex);
- return -EBADF;
- }
-
-#if WATCH_BUF
- DRM_INFO("%s: sw_finish %d (%p %d)\n",
- __func__, args->handle, obj, obj->size);
-#endif
- obj_priv = obj->driver_private;
-
- /* Pinned buffers may be scanout, so flush the cache */
- if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) {
- i915_gem_clflush_object(obj);
- drm_agp_chipset_flush(dev);
- }
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
-}
-
-/**
- * Maps the contents of an object, returning the address it is mapped
- * into.
- *
- * While the mapping holds a reference on the contents of the object, it doesn't
- * imply a ref on the object itself.
- */
-int
-i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_gem_mmap *args = data;
- struct drm_gem_object *obj;
- loff_t offset;
- unsigned long addr;
-
- if (!(dev->driver->driver_features & DRIVER_GEM))
- return -ENODEV;
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -EBADF;
-
- offset = args->offset;
-
- down_write(&current->mm->mmap_sem);
- addr = do_mmap(obj->filp, 0, args->size,
- PROT_READ | PROT_WRITE, MAP_SHARED,
- args->offset);
- up_write(&current->mm->mmap_sem);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- if (IS_ERR((void *)addr))
- return addr;
-
- args->addr_ptr = (uint64_t) addr;
-
- return 0;
-}
-
-static void
-i915_gem_object_free_page_list(struct drm_gem_object *obj)
-{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int page_count = obj->size / PAGE_SIZE;
- int i;
-
- if (obj_priv->page_list == NULL)
- return;
-
-
- for (i = 0; i < page_count; i++)
- if (obj_priv->page_list[i] != NULL) {
- if (obj_priv->dirty)
- set_page_dirty(obj_priv->page_list[i]);
- mark_page_accessed(obj_priv->page_list[i]);
- page_cache_release(obj_priv->page_list[i]);
- }
- obj_priv->dirty = 0;
-
- drm_free(obj_priv->page_list,
- page_count * sizeof(struct page *),
- DRM_MEM_DRIVER);
- obj_priv->page_list = NULL;
-}
-
-static void
-i915_gem_object_move_to_active(struct drm_gem_object *obj)
-{
- struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
-
- /* Add a reference if we're newly entering the active list. */
- if (!obj_priv->active) {
- drm_gem_object_reference(obj);
- obj_priv->active = 1;
- }
- /* Move from whatever list we were on to the tail of execution. */
- list_move_tail(&obj_priv->list,
- &dev_priv->mm.active_list);
-}
-
-
-static void
-i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
-{
- struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
-
- i915_verify_inactive(dev, __FILE__, __LINE__);
- if (obj_priv->pin_count != 0)
- list_del_init(&obj_priv->list);
- else
- list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
-
- if (obj_priv->active) {
- obj_priv->active = 0;
- drm_gem_object_unreference(obj);
- }
- i915_verify_inactive(dev, __FILE__, __LINE__);
-}
-
-/**
- * Creates a new sequence number, emitting a write of it to the status page
- * plus an interrupt, which will trigger i915_user_interrupt_handler.
- *
- * Must be called with struct_lock held.
- *
- * Returned sequence numbers are nonzero on success.
- */
-static uint32_t
-i915_add_request(struct drm_device *dev, uint32_t flush_domains)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_request *request;
- uint32_t seqno;
- int was_empty;
- RING_LOCALS;
-
- request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
- if (request == NULL)
- return 0;
-
- /* Grab the seqno we're going to make this request be, and bump the
- * next (skipping 0 so it can be the reserved no-seqno value).
- */
- seqno = dev_priv->mm.next_gem_seqno;
- dev_priv->mm.next_gem_seqno++;
- if (dev_priv->mm.next_gem_seqno == 0)
- dev_priv->mm.next_gem_seqno++;
-
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(seqno);
-
- OUT_RING(MI_USER_INTERRUPT);
- ADVANCE_LP_RING();
-
- DRM_DEBUG("%d\n", seqno);
-
- request->seqno = seqno;
- request->emitted_jiffies = jiffies;
- request->flush_domains = flush_domains;
- was_empty = list_empty(&dev_priv->mm.request_list);
- list_add_tail(&request->list, &dev_priv->mm.request_list);
-
- if (was_empty)
- schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
- return seqno;
-}
-
-/**
- * Command execution barrier
- *
- * Ensures that all commands in the ring are finished
- * before signalling the CPU
- */
-uint32_t
-i915_retire_commands(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
- uint32_t flush_domains = 0;
- RING_LOCALS;
-
- /* The sampler always gets flushed on i965 (sigh) */
- if (IS_I965G(dev))
- flush_domains |= I915_GEM_DOMAIN_SAMPLER;
- BEGIN_LP_RING(2);
- OUT_RING(cmd);
- OUT_RING(0); /* noop */
- ADVANCE_LP_RING();
- return flush_domains;
-}
-
-/**
- * Moves buffers associated only with the given active seqno from the active
- * to inactive list, potentially freeing them.
- */
-static void
-i915_gem_retire_request(struct drm_device *dev,
- struct drm_i915_gem_request *request)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (request->flush_domains != 0) {
- struct drm_i915_gem_object *obj_priv, *next;
-
- /* First clear any buffers that were only waiting for a flush
- * matching the one just retired.
- */
-
- list_for_each_entry_safe(obj_priv, next,
- &dev_priv->mm.flushing_list, list) {
- struct drm_gem_object *obj = obj_priv->obj;
-
- if (obj->write_domain & request->flush_domains) {
- obj->write_domain = 0;
- i915_gem_object_move_to_inactive(obj);
- }
- }
-
- }
-
- /* Move any buffers on the active list that are no longer referenced
- * by the ringbuffer to the flushing/inactive lists as appropriate.
- */
- while (!list_empty(&dev_priv->mm.active_list)) {
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
-
- obj_priv = list_first_entry(&dev_priv->mm.active_list,
- struct drm_i915_gem_object,
- list);
- obj = obj_priv->obj;
-
- /* If the seqno being retired doesn't match the oldest in the
- * list, then the oldest in the list must still be newer than
- * this seqno.
- */
- if (obj_priv->last_rendering_seqno != request->seqno)
- return;
-#if WATCH_LRU
- DRM_INFO("%s: retire %d moves to inactive list %p\n",
- __func__, request->seqno, obj);
-#endif
-
- /* If this request flushes the write domain,
- * clear the write domain from the object now
- */
- if (request->flush_domains & obj->write_domain)
- obj->write_domain = 0;
-
- if (obj->write_domain != 0) {
- list_move_tail(&obj_priv->list,
- &dev_priv->mm.flushing_list);
- } else {
- i915_gem_object_move_to_inactive(obj);
- }
- }
-}
-
-/**
- * Returns true if seq1 is later than seq2.
- */
-static int
-i915_seqno_passed(uint32_t seq1, uint32_t seq2)
-{
- return (int32_t)(seq1 - seq2) >= 0;
-}
-
-uint32_t
-i915_get_gem_seqno(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
-}
-
-/**
- * This function clears the request list as sequence numbers are passed.
- */
-void
-i915_gem_retire_requests(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t seqno;
-
- seqno = i915_get_gem_seqno(dev);
-
- while (!list_empty(&dev_priv->mm.request_list)) {
- struct drm_i915_gem_request *request;
- uint32_t retiring_seqno;
-
- request = list_first_entry(&dev_priv->mm.request_list,
- struct drm_i915_gem_request,
- list);
- retiring_seqno = request->seqno;
-
- if (i915_seqno_passed(seqno, retiring_seqno) ||
- dev_priv->mm.wedged) {
- i915_gem_retire_request(dev, request);
-
- list_del(&request->list);
- drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
- } else
- break;
- }
-}
-
-void
-i915_gem_retire_work_handler(struct work_struct *work)
-{
- drm_i915_private_t *dev_priv;
- struct drm_device *dev;
-
- dev_priv = container_of(work, drm_i915_private_t,
- mm.retire_work.work);
- dev = dev_priv->dev;
-
- mutex_lock(&dev->struct_mutex);
- i915_gem_retire_requests(dev);
- if (!list_empty(&dev_priv->mm.request_list))
- schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
- mutex_unlock(&dev->struct_mutex);
-}
-
-/**
- * Waits for a sequence number to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- */
-int
-i915_wait_request(struct drm_device *dev, uint32_t seqno)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int ret = 0;
-
- BUG_ON(seqno == 0);
-
- if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
- dev_priv->mm.waiting_gem_seqno = seqno;
- i915_user_irq_on(dev_priv);
- ret = wait_event_interruptible(dev_priv->irq_queue,
- i915_seqno_passed(i915_get_gem_seqno(dev),
- seqno) ||
- dev_priv->mm.wedged);
- i915_user_irq_off(dev_priv);
- dev_priv->mm.waiting_gem_seqno = 0;
- }
- if (dev_priv->mm.wedged)
- ret = -EIO;
-
- if (ret && ret != -ERESTARTSYS)
- DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
- __func__, ret, seqno, i915_get_gem_seqno(dev));
-
- /* Directly dispatch request retiring. While we have the work queue
- * to handle this, the waiter on a request often wants an associated
- * buffer to have made it to the inactive list, and we would need
- * a separate wait queue to handle that.
- */
- if (ret == 0)
- i915_gem_retire_requests(dev);
-
- return ret;
-}
-
-static void
-i915_gem_flush(struct drm_device *dev,
- uint32_t invalidate_domains,
- uint32_t flush_domains)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t cmd;
- RING_LOCALS;
-
-#if WATCH_EXEC
- DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
- invalidate_domains, flush_domains);
-#endif
-
- if (flush_domains & I915_GEM_DOMAIN_CPU)
- drm_agp_chipset_flush(dev);
-
- if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
- I915_GEM_DOMAIN_GTT)) {
- /*
- * read/write caches:
- *
- * I915_GEM_DOMAIN_RENDER is always invalidated, but is
- * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
- * also flushed at 2d versus 3d pipeline switches.
- *
- * read-only caches:
- *
- * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
- * MI_READ_FLUSH is set, and is always flushed on 965.
- *
- * I915_GEM_DOMAIN_COMMAND may not exist?
- *
- * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
- * invalidated when MI_EXE_FLUSH is set.
- *
- * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
- * invalidated with every MI_FLUSH.
- *
- * TLBs:
- *
- * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
- * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
- * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
- * are flushed at any MI_FLUSH.
- */
-
- cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
- if ((invalidate_domains|flush_domains) &
- I915_GEM_DOMAIN_RENDER)
- cmd &= ~MI_NO_WRITE_FLUSH;
- if (!IS_I965G(dev)) {
- /*
- * On the 965, the sampler cache always gets flushed
- * and this bit is reserved.
- */
- if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
- cmd |= MI_READ_FLUSH;
- }
- if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
- cmd |= MI_EXE_FLUSH;
-
-#if WATCH_EXEC
- DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
-#endif
- BEGIN_LP_RING(2);
- OUT_RING(cmd);
- OUT_RING(0); /* noop */
- ADVANCE_LP_RING();
- }
-}
-
-/**
- * Ensures that all rendering to the object has completed and the object is
- * safe to unbind from the GTT or access from the CPU.
- */
-static int
-i915_gem_object_wait_rendering(struct drm_gem_object *obj)
-{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int ret;
- uint32_t write_domain;
-
- /* If there are writes queued to the buffer, flush and
- * create a new seqno to wait for.
- */
- write_domain = obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT);
- if (write_domain) {
-#if WATCH_BUF
- DRM_INFO("%s: flushing object %p from write domain %08x\n",
- __func__, obj, write_domain);
-#endif
- i915_gem_flush(dev, 0, write_domain);
-
- i915_gem_object_move_to_active(obj);
- obj_priv->last_rendering_seqno = i915_add_request(dev,
- write_domain);
- BUG_ON(obj_priv->last_rendering_seqno == 0);
-#if WATCH_LRU
- DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
-#endif
- }
-
- /* If there is rendering queued on the buffer being evicted, wait for
- * it.
- */
- if (obj_priv->active) {
-#if WATCH_BUF
- DRM_INFO("%s: object %p wait for seqno %08x\n",
- __func__, obj, obj_priv->last_rendering_seqno);
-#endif
- ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
- if (ret != 0)
- return ret;
- }
-
- return 0;
-}
-
-/**
- * Unbinds an object from the GTT aperture.
- */
-static int
-i915_gem_object_unbind(struct drm_gem_object *obj)
-{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int ret = 0;
-
-#if WATCH_BUF
- DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
- DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
-#endif
- if (obj_priv->gtt_space == NULL)
- return 0;
-
- if (obj_priv->pin_count != 0) {
- DRM_ERROR("Attempting to unbind pinned buffer\n");
- return -EINVAL;
- }
-
- /* Wait for any rendering to complete
- */
- ret = i915_gem_object_wait_rendering(obj);
- if (ret) {
- DRM_ERROR("wait_rendering failed: %d\n", ret);
- return ret;
- }
-
- /* Move the object to the CPU domain to ensure that
- * any possible CPU writes while it's not in the GTT
- * are flushed when we go to remap it. This will
- * also ensure that all pending GPU writes are finished
- * before we unbind.
- */
- ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU,
- I915_GEM_DOMAIN_CPU);
- if (ret) {
- DRM_ERROR("set_domain failed: %d\n", ret);
- return ret;
- }
-
- if (obj_priv->agp_mem != NULL) {
- drm_unbind_agp(obj_priv->agp_mem);
- drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
- obj_priv->agp_mem = NULL;
- }
-
- BUG_ON(obj_priv->active);
-
- i915_gem_object_free_page_list(obj);
-
- if (obj_priv->gtt_space) {
- atomic_dec(&dev->gtt_count);
- atomic_sub(obj->size, &dev->gtt_memory);
-
- drm_mm_put_block(obj_priv->gtt_space);
- obj_priv->gtt_space = NULL;
- }
-
- /* Remove ourselves from the LRU list if present. */
- if (!list_empty(&obj_priv->list))
- list_del_init(&obj_priv->list);
-
- return 0;
-}
-
-static int
-i915_gem_evict_something(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret = 0;
-
- for (;;) {
- /* If there's an inactive buffer available now, grab it
- * and be done.
- */
- if (!list_empty(&dev_priv->mm.inactive_list)) {
- obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
- struct drm_i915_gem_object,
- list);
- obj = obj_priv->obj;
- BUG_ON(obj_priv->pin_count != 0);
-#if WATCH_LRU
- DRM_INFO("%s: evicting %p\n", __func__, obj);
-#endif
- BUG_ON(obj_priv->active);
-
- /* Wait on the rendering and unbind the buffer. */
- ret = i915_gem_object_unbind(obj);
- break;
- }
-
- /* If we didn't get anything, but the ring is still processing
- * things, wait for one of those things to finish and hopefully
- * leave us a buffer to evict.
- */
- if (!list_empty(&dev_priv->mm.request_list)) {
- struct drm_i915_gem_request *request;
-
- request = list_first_entry(&dev_priv->mm.request_list,
- struct drm_i915_gem_request,
- list);
-
- ret = i915_wait_request(dev, request->seqno);
- if (ret)
- break;
-
- /* if waiting caused an object to become inactive,
- * then loop around and wait for it. Otherwise, we
- * assume that waiting freed and unbound something,
- * so there should now be some space in the GTT
- */
- if (!list_empty(&dev_priv->mm.inactive_list))
- continue;
- break;
- }
-
- /* If we didn't have anything on the request list but there
- * are buffers awaiting a flush, emit one and try again.
- * When we wait on it, those buffers waiting for that flush
- * will get moved to inactive.
- */
- if (!list_empty(&dev_priv->mm.flushing_list)) {
- obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
- struct drm_i915_gem_object,
- list);
- obj = obj_priv->obj;
-
- i915_gem_flush(dev,
- obj->write_domain,
- obj->write_domain);
- i915_add_request(dev, obj->write_domain);
-
- obj = NULL;
- continue;
- }
-
- DRM_ERROR("inactive empty %d request empty %d "
- "flushing empty %d\n",
- list_empty(&dev_priv->mm.inactive_list),
- list_empty(&dev_priv->mm.request_list),
- list_empty(&dev_priv->mm.flushing_list));
- /* If we didn't do any of the above, there's nothing to be done
- * and we just can't fit it in.
- */
- return -ENOMEM;
- }
- return ret;
-}
-
-static int
-i915_gem_object_get_page_list(struct drm_gem_object *obj)
-{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int page_count, i;
- struct address_space *mapping;
- struct inode *inode;
- struct page *page;
- int ret;
-
- if (obj_priv->page_list)
- return 0;
-
- /* Get the list of pages out of our struct file. They'll be pinned
- * at this point until we release them.
- */
- page_count = obj->size / PAGE_SIZE;
- BUG_ON(obj_priv->page_list != NULL);
- obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
- DRM_MEM_DRIVER);
- if (obj_priv->page_list == NULL) {
- DRM_ERROR("Faled to allocate page list\n");
- return -ENOMEM;
- }
-
- inode = obj->filp->f_path.dentry->d_inode;
- mapping = inode->i_mapping;
- for (i = 0; i < page_count; i++) {
- page = read_mapping_page(mapping, i, NULL);
- if (IS_ERR(page)) {
- ret = PTR_ERR(page);
- DRM_ERROR("read_mapping_page failed: %d\n", ret);
- i915_gem_object_free_page_list(obj);
- return ret;
- }
- obj_priv->page_list[i] = page;
- }
- return 0;
-}
-
-/**
- * Finds free space in the GTT aperture and binds the object there.
- */
-static int
-i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
-{
- struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- struct drm_mm_node *free_space;
- int page_count, ret;
-
- if (alignment == 0)
- alignment = PAGE_SIZE;
- if (alignment & (PAGE_SIZE - 1)) {
- DRM_ERROR("Invalid object alignment requested %u\n", alignment);
- return -EINVAL;
- }
-
- search_free:
- free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
- obj->size, alignment, 0);
- if (free_space != NULL) {
- obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
- alignment);
- if (obj_priv->gtt_space != NULL) {
- obj_priv->gtt_space->private = obj;
- obj_priv->gtt_offset = obj_priv->gtt_space->start;
- }
- }
- if (obj_priv->gtt_space == NULL) {
- /* If the gtt is empty and we're still having trouble
- * fitting our object in, we're out of memory.
- */
-#if WATCH_LRU
- DRM_INFO("%s: GTT full, evicting something\n", __func__);
-#endif
- if (list_empty(&dev_priv->mm.inactive_list) &&
- list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->mm.active_list)) {
- DRM_ERROR("GTT full, but LRU list empty\n");
- return -ENOMEM;
- }
-
- ret = i915_gem_evict_something(dev);
- if (ret != 0) {
- DRM_ERROR("Failed to evict a buffer %d\n", ret);
- return ret;
- }
- goto search_free;
- }
-
-#if WATCH_BUF
- DRM_INFO("Binding object of size %d at 0x%08x\n",
- obj->size, obj_priv->gtt_offset);
-#endif
- ret = i915_gem_object_get_page_list(obj);
- if (ret) {
- drm_mm_put_block(obj_priv->gtt_space);
- obj_priv->gtt_space = NULL;
- return ret;
- }
-
- page_count = obj->size / PAGE_SIZE;
- /* Create an AGP memory structure pointing at our pages, and bind it
- * into the GTT.
- */
- obj_priv->agp_mem = drm_agp_bind_pages(dev,
- obj_priv->page_list,
- page_count,
- obj_priv->gtt_offset);
- if (obj_priv->agp_mem == NULL) {
- i915_gem_object_free_page_list(obj);
- drm_mm_put_block(obj_priv->gtt_space);
- obj_priv->gtt_space = NULL;
- return -ENOMEM;
- }
- atomic_inc(&dev->gtt_count);
- atomic_add(obj->size, &dev->gtt_memory);
-
- /* Assert that the object is not currently in any GPU domain. As it
- * wasn't in the GTT, there shouldn't be any way it could have been in
- * a GPU cache
- */
- BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
- BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
-
- return 0;
-}
-
-void
-i915_gem_clflush_object(struct drm_gem_object *obj)
-{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
-
- /* If we don't have a page list set up, then we're not pinned
- * to GPU, and we can ignore the cache flush because it'll happen
- * again at bind time.
- */
- if (obj_priv->page_list == NULL)
- return;
-
- drm_ttm_cache_flush(obj_priv->page_list, obj->size / PAGE_SIZE);
-}
-
-/*
- * Set the next domain for the specified object. This
- * may not actually perform the necessary flushing/invaliding though,
- * as that may want to be batched with other set_domain operations
- *
- * This is (we hope) the only really tricky part of gem. The goal
- * is fairly simple -- track which caches hold bits of the object
- * and make sure they remain coherent. A few concrete examples may
- * help to explain how it works. For shorthand, we use the notation
- * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
- * a pair of read and write domain masks.
- *
- * Case 1: the batch buffer
- *
- * 1. Allocated
- * 2. Written by CPU
- * 3. Mapped to GTT
- * 4. Read by GPU
- * 5. Unmapped from GTT
- * 6. Freed
- *
- * Let's take these a step at a time
- *
- * 1. Allocated
- * Pages allocated from the kernel may still have
- * cache contents, so we set them to (CPU, CPU) always.
- * 2. Written by CPU (using pwrite)
- * The pwrite function calls set_domain (CPU, CPU) and
- * this function does nothing (as nothing changes)
- * 3. Mapped by GTT
- * This function asserts that the object is not
- * currently in any GPU-based read or write domains
- * 4. Read by GPU
- * i915_gem_execbuffer calls set_domain (COMMAND, 0).
- * As write_domain is zero, this function adds in the
- * current read domains (CPU+COMMAND, 0).
- * flush_domains is set to CPU.
- * invalidate_domains is set to COMMAND
- * clflush is run to get data out of the CPU caches
- * then i915_dev_set_domain calls i915_gem_flush to
- * emit an MI_FLUSH and drm_agp_chipset_flush
- * 5. Unmapped from GTT
- * i915_gem_object_unbind calls set_domain (CPU, CPU)
- * flush_domains and invalidate_domains end up both zero
- * so no flushing/invalidating happens
- * 6. Freed
- * yay, done
- *
- * Case 2: The shared render buffer
- *
- * 1. Allocated
- * 2. Mapped to GTT
- * 3. Read/written by GPU
- * 4. set_domain to (CPU,CPU)
- * 5. Read/written by CPU
- * 6. Read/written by GPU
- *
- * 1. Allocated
- * Same as last example, (CPU, CPU)
- * 2. Mapped to GTT
- * Nothing changes (assertions find that it is not in the GPU)
- * 3. Read/written by GPU
- * execbuffer calls set_domain (RENDER, RENDER)
- * flush_domains gets CPU
- * invalidate_domains gets GPU
- * clflush (obj)
- * MI_FLUSH and drm_agp_chipset_flush
- * 4. set_domain (CPU, CPU)
- * flush_domains gets GPU
- * invalidate_domains gets CPU
- * wait_rendering (obj) to make sure all drawing is complete.
- * This will include an MI_FLUSH to get the data from GPU
- * to memory
- * clflush (obj) to invalidate the CPU cache
- * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
- * 5. Read/written by CPU
- * cache lines are loaded and dirtied
- * 6. Read written by GPU
- * Same as last GPU access
- *
- * Case 3: The constant buffer
- *
- * 1. Allocated
- * 2. Written by CPU
- * 3. Read by GPU
- * 4. Updated (written) by CPU again
- * 5. Read by GPU
- *
- * 1. Allocated
- * (CPU, CPU)
- * 2. Written by CPU
- * (CPU, CPU)
- * 3. Read by GPU
- * (CPU+RENDER, 0)
- * flush_domains = CPU
- * invalidate_domains = RENDER
- * clflush (obj)
- * MI_FLUSH
- * drm_agp_chipset_flush
- * 4. Updated (written) by CPU again
- * (CPU, CPU)
- * flush_domains = 0 (no previous write domain)
- * invalidate_domains = 0 (no new read domains)
- * 5. Read by GPU
- * (CPU+RENDER, 0)
- * flush_domains = CPU
- * invalidate_domains = RENDER
- * clflush (obj)
- * MI_FLUSH
- * drm_agp_chipset_flush
- */
-static int
-i915_gem_object_set_domain(struct drm_gem_object *obj,
- uint32_t read_domains,
- uint32_t write_domain)
-{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- uint32_t invalidate_domains = 0;
- uint32_t flush_domains = 0;
- int ret;
-
-#if WATCH_BUF
- DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
- __func__, obj,
- obj->read_domains, read_domains,
- obj->write_domain, write_domain);
-#endif
- /*
- * If the object isn't moving to a new write domain,
- * let the object stay in multiple read domains
- */
- if (write_domain == 0)
- read_domains |= obj->read_domains;
- else
- obj_priv->dirty = 1;
-
- /*
- * Flush the current write domain if
- * the new read domains don't match. Invalidate
- * any read domains which differ from the old
- * write domain
- */
- if (obj->write_domain && obj->write_domain != read_domains) {
- flush_domains |= obj->write_domain;
- invalidate_domains |= read_domains & ~obj->write_domain;
- }
- /*
- * Invalidate any read caches which may have
- * stale data. That is, any new read domains.
- */
- invalidate_domains |= read_domains & ~obj->read_domains;
- if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
-#if WATCH_BUF
- DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
- __func__, flush_domains, invalidate_domains);
-#endif
- /*
- * If we're invaliding the CPU cache and flushing a GPU cache,
- * then pause for rendering so that the GPU caches will be
- * flushed before the cpu cache is invalidated
- */
- if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
- (flush_domains & ~(I915_GEM_DOMAIN_CPU |
- I915_GEM_DOMAIN_GTT))) {
- ret = i915_gem_object_wait_rendering(obj);
- if (ret)
- return ret;
- }
- i915_gem_clflush_object(obj);
- }
-
- if ((write_domain | flush_domains) != 0)
- obj->write_domain = write_domain;
-
- /* If we're invalidating the CPU domain, clear the per-page CPU
- * domain list as well.
- */
- if (obj_priv->page_cpu_valid != NULL &&
- (obj->read_domains & I915_GEM_DOMAIN_CPU) &&
- ((read_domains & I915_GEM_DOMAIN_CPU) == 0)) {
- memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
- }
- obj->read_domains = read_domains;
-
- dev->invalidate_domains |= invalidate_domains;
- dev->flush_domains |= flush_domains;
-#if WATCH_BUF
- DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
- __func__,
- obj->read_domains, obj->write_domain,
- dev->invalidate_domains, dev->flush_domains);
-#endif
- return 0;
-}
-
-/**
- * Set the read/write domain on a range of the object.
- *
- * Currently only implemented for CPU reads, otherwise drops to normal
- * i915_gem_object_set_domain().
- */
-static int
-i915_gem_object_set_domain_range(struct drm_gem_object *obj,
- uint64_t offset,
- uint64_t size,
- uint32_t read_domains,
- uint32_t write_domain)
-{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int ret, i;
-
- if (obj->read_domains & I915_GEM_DOMAIN_CPU)
- return 0;
-
- if (read_domains != I915_GEM_DOMAIN_CPU ||
- write_domain != 0)
- return i915_gem_object_set_domain(obj,
- read_domains, write_domain);
-
- /* Wait on any GPU rendering to the object to be flushed. */
- if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) {
- ret = i915_gem_object_wait_rendering(obj);
- if (ret)
- return ret;
- }
-
- if (obj_priv->page_cpu_valid == NULL) {
- obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
- DRM_MEM_DRIVER);
- }
-
- /* Flush the cache on any pages that are still invalid from the CPU's
- * perspective.
- */
- for (i = offset / PAGE_SIZE; i < (offset + size - 1) / PAGE_SIZE; i++) {
- if (obj_priv->page_cpu_valid[i])
- continue;
-
- drm_ttm_cache_flush(obj_priv->page_list + i, 1);
-
- obj_priv->page_cpu_valid[i] = 1;
- }
-
- return 0;
-}
-
-/**
- * Once all of the objects have been set in the proper domain,
- * perform the necessary flush and invalidate operations.
- *
- * Returns the write domains flushed, for use in flush tracking.
- */
-static uint32_t
-i915_gem_dev_set_domain(struct drm_device *dev)
-{
- uint32_t flush_domains = dev->flush_domains;
-
- /*
- * Now that all the buffers are synced to the proper domains,
- * flush and invalidate the collected domains
- */
- if (dev->invalidate_domains | dev->flush_domains) {
-#if WATCH_EXEC
- DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
- __func__,
- dev->invalidate_domains,
- dev->flush_domains);
-#endif
- i915_gem_flush(dev,
- dev->invalidate_domains,
- dev->flush_domains);
- dev->invalidate_domains = 0;
- dev->flush_domains = 0;
- }
-
- return flush_domains;
-}
-
-/**
- * Pin an object to the GTT and evaluate the relocations landing in it.
- */
-static int
-i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
- struct drm_file *file_priv,
- struct drm_i915_gem_exec_object *entry)
-{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_relocation_entry reloc;
- struct drm_i915_gem_relocation_entry __user *relocs;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int i, ret;
- uint32_t last_reloc_offset = -1;
- void *reloc_page = NULL;
-
- /* Choose the GTT offset for our buffer and put it there. */
- ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
- if (ret)
- return ret;
-
- entry->offset = obj_priv->gtt_offset;
-
- relocs = (struct drm_i915_gem_relocation_entry __user *)
- (uintptr_t) entry->relocs_ptr;
- /* Apply the relocations, using the GTT aperture to avoid cache
- * flushing requirements.
- */
- for (i = 0; i < entry->relocation_count; i++) {
- struct drm_gem_object *target_obj;
- struct drm_i915_gem_object *target_obj_priv;
- uint32_t reloc_val, reloc_offset, *reloc_entry;
- int ret;
-
- ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
- if (ret != 0) {
- i915_gem_object_unpin(obj);
- return ret;
- }
-
- target_obj = drm_gem_object_lookup(obj->dev, file_priv,
- reloc.target_handle);
- if (target_obj == NULL) {
- i915_gem_object_unpin(obj);
- return -EBADF;
- }
- target_obj_priv = target_obj->driver_private;
-
- /* The target buffer should have appeared before us in the
- * exec_object list, so it should have a GTT space bound by now.
- */
- if (target_obj_priv->gtt_space == NULL) {
- DRM_ERROR("No GTT space found for object %d\n",
- reloc.target_handle);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
- }
-
- if (reloc.offset > obj->size - 4) {
- DRM_ERROR("Relocation beyond object bounds: "
- "obj %p target %d offset %d size %d.\n",
- obj, reloc.target_handle,
- (int) reloc.offset, (int) obj->size);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
- }
- if (reloc.offset & 3) {
- DRM_ERROR("Relocation not 4-byte aligned: "
- "obj %p target %d offset %d.\n",
- obj, reloc.target_handle,
- (int) reloc.offset);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
- }
-
- if (reloc.write_domain && target_obj->pending_write_domain &&
- reloc.write_domain != target_obj->pending_write_domain) {
- DRM_ERROR("Write domain conflict: "
- "obj %p target %d offset %d "
- "new %08x old %08x\n",
- obj, reloc.target_handle,
- (int) reloc.offset,
- reloc.write_domain,
- target_obj->pending_write_domain);
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -EINVAL;
- }
-
-#if WATCH_RELOC
- DRM_INFO("%s: obj %p offset %08x target %d "
- "read %08x write %08x gtt %08x "
- "presumed %08x delta %08x\n",
- __func__,
- obj,
- (int) reloc.offset,
- (int) reloc.target_handle,
- (int) reloc.read_domains,
- (int) reloc.write_domain,
- (int) target_obj_priv->gtt_offset,
- (int) reloc.presumed_offset,
- reloc.delta);
-#endif
-
- target_obj->pending_read_domains |= reloc.read_domains;
- target_obj->pending_write_domain |= reloc.write_domain;
-
- /* If the relocation already has the right value in it, no
- * more work needs to be done.
- */
- if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
- drm_gem_object_unreference(target_obj);
- continue;
- }
-
- /* Now that we're going to actually write some data in,
- * make sure that any rendering using this buffer's contents
- * is completed.
- */
- i915_gem_object_wait_rendering(obj);
-
- /* As we're writing through the gtt, flush
- * any CPU writes before we write the relocations
- */
- if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
- i915_gem_clflush_object(obj);
- drm_agp_chipset_flush(dev);
- obj->write_domain = 0;
- }
-
- /* Map the page containing the relocation we're going to
- * perform.
- */
- reloc_offset = obj_priv->gtt_offset + reloc.offset;
- if (reloc_page == NULL ||
- (last_reloc_offset & ~(PAGE_SIZE - 1)) !=
- (reloc_offset & ~(PAGE_SIZE - 1))) {
- if (reloc_page != NULL)
- iounmap(reloc_page);
-
- reloc_page = ioremap(dev->agp->base +
- (reloc_offset & ~(PAGE_SIZE - 1)),
- PAGE_SIZE);
- last_reloc_offset = reloc_offset;
- if (reloc_page == NULL) {
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return -ENOMEM;
- }
- }
-
- reloc_entry = (uint32_t *)((char *)reloc_page +
- (reloc_offset & (PAGE_SIZE - 1)));
- reloc_val = target_obj_priv->gtt_offset + reloc.delta;
-
-#if WATCH_BUF
- DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
- obj, (unsigned int) reloc.offset,
- readl(reloc_entry), reloc_val);
-#endif
- writel(reloc_val, reloc_entry);
-
- /* Write the updated presumed offset for this entry back out
- * to the user.
- */
- reloc.presumed_offset = target_obj_priv->gtt_offset;
- ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
- if (ret != 0) {
- drm_gem_object_unreference(target_obj);
- i915_gem_object_unpin(obj);
- return ret;
- }
-
- drm_gem_object_unreference(target_obj);
- }
-
- if (reloc_page != NULL)
- iounmap(reloc_page);
-
-#if WATCH_BUF
- if (0)
- i915_gem_dump_object(obj, 128, __func__, ~0);
-#endif
- return 0;
-}
-
-/** Dispatch a batchbuffer to the ring
- */
-static int
-i915_dispatch_gem_execbuffer(struct drm_device *dev,
- struct drm_i915_gem_execbuffer *exec,
- uint64_t exec_offset)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
- (uintptr_t) exec->cliprects_ptr;
- int nbox = exec->num_cliprects;
- int i = 0, count;
- uint32_t exec_start, exec_len;
- RING_LOCALS;
-
- exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
- exec_len = (uint32_t) exec->batch_len;
-
- if ((exec_start | exec_len) & 0x7) {
- DRM_ERROR("alignment\n");
- return -EINVAL;
- }
-
- if (!exec_start)
- return -EINVAL;
-
- count = nbox ? nbox : 1;
-
- for (i = 0; i < count; i++) {
- if (i < nbox) {
- int ret = i915_emit_box(dev, boxes, i,
- exec->DR1, exec->DR4);
- if (ret)
- return ret;
- }
-
- if (IS_I830(dev) || IS_845G(dev)) {
- BEGIN_LP_RING(4);
- OUT_RING(MI_BATCH_BUFFER);
- OUT_RING(exec_start | MI_BATCH_NON_SECURE);
- OUT_RING(exec_start + exec_len - 4);
- OUT_RING(0);
- ADVANCE_LP_RING();
- } else {
- BEGIN_LP_RING(2);
- if (IS_I965G(dev)) {
- OUT_RING(MI_BATCH_BUFFER_START |
- (2 << 6) |
- MI_BATCH_NON_SECURE_I965);
- OUT_RING(exec_start);
- } else {
- OUT_RING(MI_BATCH_BUFFER_START |
- (2 << 6));
- OUT_RING(exec_start | MI_BATCH_NON_SECURE);
- }
- ADVANCE_LP_RING();
- }
- }
-
- /* XXX breadcrumb */
- return 0;
-}
-
-/* Throttle our rendering by waiting until the ring has completed our requests
- * emitted over 20 msec ago.
- *
- * This should get us reasonable parallelism between CPU and GPU but also
- * relatively low latency when blocking on a particular request to finish.
- */
-static int
-i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
-{
- struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
- int ret = 0;
- uint32_t seqno;
-
- mutex_lock(&dev->struct_mutex);
- seqno = i915_file_priv->mm.last_gem_throttle_seqno;
- i915_file_priv->mm.last_gem_throttle_seqno =
- i915_file_priv->mm.last_gem_seqno;
- if (seqno)
- ret = i915_wait_request(dev, seqno);
- mutex_unlock(&dev->struct_mutex);
- return ret;
-}
-
-int
-i915_gem_execbuffer(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
- struct drm_i915_gem_execbuffer *args = data;
- struct drm_i915_gem_exec_object *exec_list = NULL;
- struct drm_gem_object **object_list = NULL;
- struct drm_gem_object *batch_obj;
- int ret, i, pinned = 0;
- uint64_t exec_offset;
- uint32_t seqno, flush_domains;
-
-#if WATCH_EXEC
- DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
- (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
-
- /* Copy in the exec list from userland */
- exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
- DRM_MEM_DRIVER);
- object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
- DRM_MEM_DRIVER);
- if (exec_list == NULL || object_list == NULL) {
- DRM_ERROR("Failed to allocate exec or object list "
- "for %d buffers\n",
- args->buffer_count);
- ret = -ENOMEM;
- goto pre_mutex_err;
- }
- ret = copy_from_user(exec_list,
- (struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
- sizeof(*exec_list) * args->buffer_count);
- if (ret != 0) {
- DRM_ERROR("copy %d exec entries failed %d\n",
- args->buffer_count, ret);
- goto pre_mutex_err;
- }
-
- mutex_lock(&dev->struct_mutex);
-
- i915_verify_inactive(dev, __FILE__, __LINE__);
-
- if (dev_priv->mm.wedged) {
- DRM_ERROR("Execbuf while wedged\n");
- mutex_unlock(&dev->struct_mutex);
- return -EIO;
- }
-
- if (dev_priv->mm.suspended) {
- DRM_ERROR("Execbuf while VT-switched.\n");
- mutex_unlock(&dev->struct_mutex);
- return -EBUSY;
- }
-
- /* Zero the gloabl flush/invalidate flags. These
- * will be modified as each object is bound to the
- * gtt
- */
- dev->invalidate_domains = 0;
- dev->flush_domains = 0;
-
- /* Look up object handles and perform the relocations */
- for (i = 0; i < args->buffer_count; i++) {
- object_list[i] = drm_gem_object_lookup(dev, file_priv,
- exec_list[i].handle);
- if (object_list[i] == NULL) {
- DRM_ERROR("Invalid object handle %d at index %d\n",
- exec_list[i].handle, i);
- ret = -EBADF;
- goto err;
- }
-
- object_list[i]->pending_read_domains = 0;
- object_list[i]->pending_write_domain = 0;
- ret = i915_gem_object_pin_and_relocate(object_list[i],
- file_priv,
- &exec_list[i]);
- if (ret) {
- DRM_ERROR("object bind and relocate failed %d\n", ret);
- goto err;
- }
- pinned = i + 1;
- }
-
- /* Set the pending read domains for the batch buffer to COMMAND */
- batch_obj = object_list[args->buffer_count-1];
- batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
- batch_obj->pending_write_domain = 0;
-
- i915_verify_inactive(dev, __FILE__, __LINE__);
-
- for (i = 0; i < args->buffer_count; i++) {
- struct drm_gem_object *obj = object_list[i];
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
-
- if (obj_priv->gtt_space == NULL) {
- /* We evicted the buffer in the process of validating
- * our set of buffers in. We could try to recover by
- * kicking them everything out and trying again from
- * the start.
- */
- ret = -ENOMEM;
- goto err;
- }
-
- /* make sure all previous memory operations have passed */
- ret = i915_gem_object_set_domain(obj,
- obj->pending_read_domains,
- obj->pending_write_domain);
- if (ret)
- goto err;
- }
-
- i915_verify_inactive(dev, __FILE__, __LINE__);
-
- /* Flush/invalidate caches and chipset buffer */
- flush_domains = i915_gem_dev_set_domain(dev);
-
- i915_verify_inactive(dev, __FILE__, __LINE__);
-
-#if WATCH_COHERENCY
- for (i = 0; i < args->buffer_count; i++) {
- i915_gem_object_check_coherency(object_list[i],
- exec_list[i].handle);
- }
-#endif
-
- exec_offset = exec_list[args->buffer_count - 1].offset;
-
-#if WATCH_EXEC
- i915_gem_dump_object(object_list[args->buffer_count - 1],
- args->batch_len,
- __func__,
- ~0);
-#endif
-
- /* Exec the batchbuffer */
- ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
- if (ret) {
- DRM_ERROR("dispatch failed %d\n", ret);
- goto err;
- }
-
- /*
- * Ensure that the commands in the batch buffer are
- * finished before the interrupt fires
- */
- flush_domains |= i915_retire_commands(dev);
-
- i915_verify_inactive(dev, __FILE__, __LINE__);
-
- /*
- * Get a seqno representing the execution of the current buffer,
- * which we can wait on. We would like to mitigate these interrupts,
- * likely by only creating seqnos occasionally (so that we have
- * *some* interrupts representing completion of buffers that we can
- * wait on when trying to clear up gtt space).
- */
- seqno = i915_add_request(dev, flush_domains);
- BUG_ON(seqno == 0);
- i915_file_priv->mm.last_gem_seqno = seqno;
- for (i = 0; i < args->buffer_count; i++) {
- struct drm_gem_object *obj = object_list[i];
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
-
- i915_gem_object_move_to_active(obj);
- obj_priv->last_rendering_seqno = seqno;
-#if WATCH_LRU
- DRM_INFO("%s: move to exec list %p\n", __func__, obj);
-#endif
- }
-#if WATCH_LRU
- i915_dump_lru(dev, __func__);
-#endif
-
- i915_verify_inactive(dev, __FILE__, __LINE__);
-
- /* Copy the new buffer offsets back to the user's exec list. */
- ret = copy_to_user((struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
- exec_list,
- sizeof(*exec_list) * args->buffer_count);
- if (ret)
- DRM_ERROR("failed to copy %d exec entries "
- "back to user (%d)\n",
- args->buffer_count, ret);
-err:
- if (object_list != NULL) {
- for (i = 0; i < pinned; i++)
- i915_gem_object_unpin(object_list[i]);
-
- for (i = 0; i < args->buffer_count; i++)
- drm_gem_object_unreference(object_list[i]);
- }
- mutex_unlock(&dev->struct_mutex);
-
-pre_mutex_err:
- drm_free(object_list, sizeof(*object_list) * args->buffer_count,
- DRM_MEM_DRIVER);
- drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
- DRM_MEM_DRIVER);
-
- return ret;
-}
-
-int
-i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
-{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int ret;
-
- i915_verify_inactive(dev, __FILE__, __LINE__);
- if (obj_priv->gtt_space == NULL) {
- ret = i915_gem_object_bind_to_gtt(obj, alignment);
- if (ret != 0) {
- DRM_ERROR("Failure to bind: %d", ret);
- return ret;
- }
- }
- obj_priv->pin_count++;
-
- /* If the object is not active and not pending a flush,
- * remove it from the inactive list
- */
- if (obj_priv->pin_count == 1) {
- atomic_inc(&dev->pin_count);
- atomic_add(obj->size, &dev->pin_memory);
- if (!obj_priv->active &&
- (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
- I915_GEM_DOMAIN_GTT)) == 0 &&
- !list_empty(&obj_priv->list))
- list_del_init(&obj_priv->list);
- }
- i915_verify_inactive(dev, __FILE__, __LINE__);
-
- return 0;
-}
-
-void
-i915_gem_object_unpin(struct drm_gem_object *obj)
-{
- struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
-
- i915_verify_inactive(dev, __FILE__, __LINE__);
- obj_priv->pin_count--;
- BUG_ON(obj_priv->pin_count < 0);
- BUG_ON(obj_priv->gtt_space == NULL);
-
- /* If the object is no longer pinned, and is
- * neither active nor being flushed, then stick it on
- * the inactive list
- */
- if (obj_priv->pin_count == 0) {
- if (!obj_priv->active &&
- (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
- I915_GEM_DOMAIN_GTT)) == 0)
- list_move_tail(&obj_priv->list,
- &dev_priv->mm.inactive_list);
- atomic_dec(&dev->pin_count);
- atomic_sub(obj->size, &dev->pin_memory);
- }
- i915_verify_inactive(dev, __FILE__, __LINE__);
-}
-
-int
-i915_gem_pin_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_gem_pin *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret;
-
- mutex_lock(&dev->struct_mutex);
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL) {
- DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
- args->handle);
- mutex_unlock(&dev->struct_mutex);
- return -EBADF;
- }
- obj_priv = obj->driver_private;
-
- ret = i915_gem_object_pin(obj, args->alignment);
- if (ret != 0) {
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
-
- /* XXX - flush the CPU caches for pinned objects
- * as the X server doesn't manage domains yet
- */
- if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
- i915_gem_clflush_object(obj);
- drm_agp_chipset_flush(dev);
- obj->write_domain = 0;
- }
- args->offset = obj_priv->gtt_offset;
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-int
-i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_gem_pin *args = data;
- struct drm_gem_object *obj;
-
- mutex_lock(&dev->struct_mutex);
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL) {
- DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
- args->handle);
- mutex_unlock(&dev->struct_mutex);
- return -EBADF;
- }
-
- i915_gem_object_unpin(obj);
-
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
-int
-i915_gem_busy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_gem_busy *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
-
- mutex_lock(&dev->struct_mutex);
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL) {
- DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
- args->handle);
- mutex_unlock(&dev->struct_mutex);
- return -EBADF;
- }
-
- obj_priv = obj->driver_private;
- args->busy = obj_priv->active;
-
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
-int
-i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return i915_gem_ring_throttle(dev, file_priv);
-}
-
-int i915_gem_init_object(struct drm_gem_object *obj)
-{
- struct drm_i915_gem_object *obj_priv;
-
- obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
- if (obj_priv == NULL)
- return -ENOMEM;
-
- /*
- * We've just allocated pages from the kernel,
- * so they've just been written by the CPU with
- * zeros. They'll need to be clflushed before we
- * use them with the GPU.
- */
- obj->write_domain = I915_GEM_DOMAIN_CPU;
- obj->read_domains = I915_GEM_DOMAIN_CPU;
-
- obj->driver_private = obj_priv;
- obj_priv->obj = obj;
- INIT_LIST_HEAD(&obj_priv->list);
- return 0;
-}
-
-void i915_gem_free_object(struct drm_gem_object *obj)
-{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
-
- while (obj_priv->pin_count > 0)
- i915_gem_object_unpin(obj);
-
- i915_gem_object_unbind(obj);
-
- drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
- drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
-}
-
-int
-i915_gem_set_domain(struct drm_gem_object *obj,
- struct drm_file *file_priv,
- uint32_t read_domains,
- uint32_t write_domain)
-{
- struct drm_device *dev = obj->dev;
- int ret;
- uint32_t flush_domains;
-
- BUG_ON(!mutex_is_locked(&dev->struct_mutex));
-
- ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
- if (ret)
- return ret;
- flush_domains = i915_gem_dev_set_domain(obj->dev);
-
- if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
- (void) i915_add_request(dev, flush_domains);
-
- return 0;
-}
-
-/** Unbinds all objects that are on the given buffer list. */
-static int
-i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
-{
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret;
-
- while (!list_empty(head)) {
- obj_priv = list_first_entry(head,
- struct drm_i915_gem_object,
- list);
- obj = obj_priv->obj;
-
- if (obj_priv->pin_count != 0) {
- DRM_ERROR("Pinned object in unbind list\n");
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- ret = i915_gem_object_unbind(obj);
- if (ret != 0) {
- DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
- ret);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
- }
-
-
- return 0;
-}
-
-static int
-i915_gem_idle(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t seqno, cur_seqno, last_seqno;
- int stuck;
-
- if (dev_priv->mm.suspended)
- return 0;
-
- /* Hack! Don't let anybody do execbuf while we don't control the chip.
- * We need to replace this with a semaphore, or something.
- */
- dev_priv->mm.suspended = 1;
-
- i915_kernel_lost_context(dev);
-
- /* Flush the GPU along with all non-CPU write domains
- */
- i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
- ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
- seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU |
- I915_GEM_DOMAIN_GTT));
-
- if (seqno == 0) {
- mutex_unlock(&dev->struct_mutex);
- return -ENOMEM;
- }
-
- dev_priv->mm.waiting_gem_seqno = seqno;
- last_seqno = 0;
- stuck = 0;
- for (;;) {
- cur_seqno = i915_get_gem_seqno(dev);
- if (i915_seqno_passed(cur_seqno, seqno))
- break;
- if (last_seqno == cur_seqno) {
- if (stuck++ > 100) {
- DRM_ERROR("hardware wedged\n");
- dev_priv->mm.wedged = 1;
- DRM_WAKEUP(&dev_priv->irq_queue);
- break;
- }
- }
- msleep(10);
- last_seqno = cur_seqno;
- }
- dev_priv->mm.waiting_gem_seqno = 0;
-
- i915_gem_retire_requests(dev);
-
- /* Active and flushing should now be empty as we've
- * waited for a sequence higher than any pending execbuffer
- */
- BUG_ON(!list_empty(&dev_priv->mm.active_list));
- BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
-
- /* Request should now be empty as we've also waited
- * for the last request in the list
- */
- BUG_ON(!list_empty(&dev_priv->mm.request_list));
-
- /* Move all buffers out of the GTT. */
- i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
-
- BUG_ON(!list_empty(&dev_priv->mm.active_list));
- BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
- BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
- BUG_ON(!list_empty(&dev_priv->mm.request_list));
- return 0;
-}
-
-static int
-i915_gem_init_hws(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret;
-
- /* If we need a physical address for the status page, it's already
- * initialized at driver load time.
- */
- if (!I915_NEED_GFX_HWS(dev))
- return 0;
-
- obj = drm_gem_object_alloc(dev, 4096);
- if (obj == NULL) {
- DRM_ERROR("Failed to allocate status page\n");
- return -ENOMEM;
- }
- obj_priv = obj->driver_private;
-
- ret = i915_gem_object_pin(obj, 4096);
- if (ret != 0) {
- drm_gem_object_unreference(obj);
- return ret;
- }
-
- dev_priv->status_gfx_addr = obj_priv->gtt_offset;
- dev_priv->hws_map.offset = dev->agp->base + obj_priv->gtt_offset;
- dev_priv->hws_map.size = 4096;
- dev_priv->hws_map.type = 0;
- dev_priv->hws_map.flags = 0;
- dev_priv->hws_map.mtrr = 0;
-
- drm_core_ioremap(&dev_priv->hws_map, dev);
- if (dev_priv->hws_map.handle == NULL) {
- DRM_ERROR("Failed to map status page.\n");
- memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
- drm_gem_object_unreference(obj);
- return -EINVAL;
- }
- dev_priv->hws_obj = obj;
- dev_priv->hw_status_page = dev_priv->hws_map.handle;
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
- I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
- DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
-
- return 0;
-}
-
-static int
-i915_gem_init_ringbuffer(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret;
-
- ret = i915_gem_init_hws(dev);
- if (ret != 0)
- return ret;
-
- obj = drm_gem_object_alloc(dev, 128 * 1024);
- if (obj == NULL) {
- DRM_ERROR("Failed to allocate ringbuffer\n");
- return -ENOMEM;
- }
- obj_priv = obj->driver_private;
-
- ret = i915_gem_object_pin(obj, 4096);
- if (ret != 0) {
- drm_gem_object_unreference(obj);
- return ret;
- }
-
- /* Set up the kernel mapping for the ring. */
- dev_priv->ring.Size = obj->size;
- dev_priv->ring.tail_mask = obj->size - 1;
-
- dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset;
- dev_priv->ring.map.size = obj->size;
- dev_priv->ring.map.type = 0;
- dev_priv->ring.map.flags = 0;
- dev_priv->ring.map.mtrr = 0;
-
- drm_core_ioremap(&dev_priv->ring.map, dev);
- if (dev_priv->ring.map.handle == NULL) {
- DRM_ERROR("Failed to map ringbuffer.\n");
- memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
- drm_gem_object_unreference(obj);
- return -EINVAL;
- }
- dev_priv->ring.ring_obj = obj;
- dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
-
- /* Stop the ring if it's running. */
- I915_WRITE(PRB0_CTL, 0);
- I915_WRITE(PRB0_HEAD, 0);
- I915_WRITE(PRB0_TAIL, 0);
- I915_WRITE(PRB0_START, 0);
-
- /* Initialize the ring. */
- I915_WRITE(PRB0_START, obj_priv->gtt_offset);
- I915_WRITE(PRB0_CTL,
- ((obj->size - 4096) & RING_NR_PAGES) |
- RING_NO_REPORT |
- RING_VALID);
-
- /* Update our cache of the ring state */
- i915_kernel_lost_context(dev);
-
- return 0;
-}
-
-static void
-i915_gem_cleanup_ringbuffer(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (dev_priv->ring.ring_obj == NULL)
- return;
-
- drm_core_ioremapfree(&dev_priv->ring.map, dev);
-
- i915_gem_object_unpin(dev_priv->ring.ring_obj);
- drm_gem_object_unreference(dev_priv->ring.ring_obj);
- dev_priv->ring.ring_obj = NULL;
- memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
-
- if (dev_priv->hws_obj != NULL) {
- i915_gem_object_unpin(dev_priv->hws_obj);
- drm_gem_object_unreference(dev_priv->hws_obj);
- dev_priv->hws_obj = NULL;
- memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
-
- /* Write high address into HWS_PGA when disabling. */
- I915_WRITE(HWS_PGA, 0x1ffff000);
- }
-}
-
-int
-i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
-
- if (dev_priv->mm.wedged) {
- DRM_ERROR("Reenabling wedged hardware, good luck\n");
- dev_priv->mm.wedged = 0;
- }
-
- ret = i915_gem_init_ringbuffer(dev);
- if (ret != 0)
- return ret;
-
- mutex_lock(&dev->struct_mutex);
- BUG_ON(!list_empty(&dev_priv->mm.active_list));
- BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
- BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
- BUG_ON(!list_empty(&dev_priv->mm.request_list));
- dev_priv->mm.suspended = 0;
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
-int
-i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- int ret;
-
- mutex_lock(&dev->struct_mutex);
- ret = i915_gem_idle(dev);
- if (ret == 0)
- i915_gem_cleanup_ringbuffer(dev);
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-void
-i915_gem_lastclose(struct drm_device *dev)
-{
- int ret;
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- mutex_lock(&dev->struct_mutex);
-
- if (dev_priv->ring.ring_obj != NULL) {
- ret = i915_gem_idle(dev);
- if (ret)
- DRM_ERROR("failed to idle hardware: %d\n", ret);
-
- i915_gem_cleanup_ringbuffer(dev);
- }
-
- mutex_unlock(&dev->struct_mutex);
-}
-
-void i915_gem_load(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- INIT_LIST_HEAD(&dev_priv->mm.active_list);
- INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
- INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
- INIT_LIST_HEAD(&dev_priv->mm.request_list);
- INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
- i915_gem_retire_work_handler);
- dev_priv->mm.next_gem_seqno = 1;
-
- i915_gem_detect_bit_6_swizzle(dev);
-}
diff --git a/linux-core/i915_gem_debug.c b/linux-core/i915_gem_debug.c
deleted file mode 100644
index a2d6f289..00000000
--- a/linux-core/i915_gem_debug.c
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright © 2008 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Keith Packard <keithp@keithp.com>
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "drm_compat.h"
-#include "i915_drm.h"
-#include "i915_drv.h"
-
-#if WATCH_INACTIVE
-void
-i915_verify_inactive(struct drm_device *dev, char *file, int line)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
-
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
- obj = obj_priv->obj;
- if (obj_priv->pin_count || obj_priv->active ||
- (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
- I915_GEM_DOMAIN_GTT)))
- DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n",
- obj,
- obj_priv->pin_count, obj_priv->active,
- obj->write_domain, file, line);
- }
-}
-#endif /* WATCH_INACTIVE */
-
-
-#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
-static void
-i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
- uint32_t bias, uint32_t mark)
-{
- uint32_t *mem = kmap_atomic(page, KM_USER0);
- int i;
- for (i = start; i < end; i += 4)
- DRM_INFO("%08x: %08x%s\n",
- (int) (bias + i), mem[i / 4],
- (bias + i == mark) ? " ********" : "");
- kunmap_atomic(mem, KM_USER0);
- /* give syslog time to catch up */
- msleep(1);
-}
-
-void
-i915_gem_dump_object(struct drm_gem_object *obj, int len,
- const char *where, uint32_t mark)
-{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int page;
-
- DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
- for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
- int page_len, chunk, chunk_len;
-
- page_len = len - page * PAGE_SIZE;
- if (page_len > PAGE_SIZE)
- page_len = PAGE_SIZE;
-
- for (chunk = 0; chunk < page_len; chunk += 128) {
- chunk_len = page_len - chunk;
- if (chunk_len > 128)
- chunk_len = 128;
- i915_gem_dump_page(obj_priv->page_list[page],
- chunk, chunk + chunk_len,
- obj_priv->gtt_offset +
- page * PAGE_SIZE,
- mark);
- }
- }
-}
-#endif
-
-#if WATCH_LRU
-void
-i915_dump_lru(struct drm_device *dev, const char *where)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
-
- DRM_INFO("active list %s {\n", where);
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
- list)
- {
- DRM_INFO(" %p: %08x\n", obj_priv,
- obj_priv->last_rendering_seqno);
- }
- DRM_INFO("}\n");
- DRM_INFO("flushing list %s {\n", where);
- list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
- list)
- {
- DRM_INFO(" %p: %08x\n", obj_priv,
- obj_priv->last_rendering_seqno);
- }
- DRM_INFO("}\n");
- DRM_INFO("inactive %s {\n", where);
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
- DRM_INFO(" %p: %08x\n", obj_priv,
- obj_priv->last_rendering_seqno);
- }
- DRM_INFO("}\n");
-}
-#endif
-
-
-#if WATCH_COHERENCY
-void
-i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
-{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int page;
- uint32_t *gtt_mapping;
- uint32_t *backing_map = NULL;
- int bad_count = 0;
-
- DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n",
- __func__, obj, obj_priv->gtt_offset, handle,
- obj->size / 1024);
-
- gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
- obj->size);
- if (gtt_mapping == NULL) {
- DRM_ERROR("failed to map GTT space\n");
- return;
- }
-
- for (page = 0; page < obj->size / PAGE_SIZE; page++) {
- int i;
-
- backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0);
-
- if (backing_map == NULL) {
- DRM_ERROR("failed to map backing page\n");
- goto out;
- }
-
- for (i = 0; i < PAGE_SIZE / 4; i++) {
- uint32_t cpuval = backing_map[i];
- uint32_t gttval = readl(gtt_mapping +
- page * 1024 + i);
-
- if (cpuval != gttval) {
- DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
- "0x%08x vs 0x%08x\n",
- (int)(obj_priv->gtt_offset +
- page * PAGE_SIZE + i * 4),
- cpuval, gttval);
- if (bad_count++ >= 8) {
- DRM_INFO("...\n");
- goto out;
- }
- }
- }
- kunmap_atomic(backing_map, KM_USER0);
- backing_map = NULL;
- }
-
- out:
- if (backing_map != NULL)
- kunmap_atomic(backing_map, KM_USER0);
- iounmap(gtt_mapping);
-
- /* give syslog time to catch up */
- msleep(1);
-
- /* Directly flush the object, since we just loaded values with the CPU
- * from the backing pages and we don't want to disturb the cache
- * management that we're trying to observe.
- */
-
- i915_gem_clflush_object(obj);
-}
-#endif
diff --git a/linux-core/i915_gem_proc.c b/linux-core/i915_gem_proc.c
deleted file mode 100644
index 132eb3d1..00000000
--- a/linux-core/i915_gem_proc.c
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
- * Copyright © 2008 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Eric Anholt <eric@anholt.net>
- * Keith Packard <keithp@keithp.com>
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "drm_compat.h"
-#include "i915_drm.h"
-#include "i915_drv.h"
-
-static int i915_gem_active_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data)
-{
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_device *dev = minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
- int len = 0;
-
- if (offset > DRM_PROC_LIMIT) {
- *eof = 1;
- return 0;
- }
-
- *start = &buf[offset];
- *eof = 0;
- DRM_PROC_PRINT("Active:\n");
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
- list)
- {
- struct drm_gem_object *obj = obj_priv->obj;
- if (obj->name) {
- DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
- obj, obj->name,
- obj->read_domains, obj->write_domain,
- obj_priv->last_rendering_seqno);
- } else {
- DRM_PROC_PRINT(" %p: %08x %08x %d\n",
- obj,
- obj->read_domains, obj->write_domain,
- obj_priv->last_rendering_seqno);
- }
- }
- if (len > request + offset)
- return request;
- *eof = 1;
- return len - offset;
-}
-
-static int i915_gem_flushing_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data)
-{
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_device *dev = minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
- int len = 0;
-
- if (offset > DRM_PROC_LIMIT) {
- *eof = 1;
- return 0;
- }
-
- *start = &buf[offset];
- *eof = 0;
- DRM_PROC_PRINT("Flushing:\n");
- list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
- list)
- {
- struct drm_gem_object *obj = obj_priv->obj;
- if (obj->name) {
- DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
- obj, obj->name,
- obj->read_domains, obj->write_domain,
- obj_priv->last_rendering_seqno);
- } else {
- DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
- obj->read_domains, obj->write_domain,
- obj_priv->last_rendering_seqno);
- }
- }
- if (len > request + offset)
- return request;
- *eof = 1;
- return len - offset;
-}
-
-static int i915_gem_inactive_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data)
-{
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_device *dev = minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
- int len = 0;
-
- if (offset > DRM_PROC_LIMIT) {
- *eof = 1;
- return 0;
- }
-
- *start = &buf[offset];
- *eof = 0;
- DRM_PROC_PRINT("Inactive:\n");
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list,
- list)
- {
- struct drm_gem_object *obj = obj_priv->obj;
- if (obj->name) {
- DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
- obj, obj->name,
- obj->read_domains, obj->write_domain,
- obj_priv->last_rendering_seqno);
- } else {
- DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
- obj->read_domains, obj->write_domain,
- obj_priv->last_rendering_seqno);
- }
- }
- if (len > request + offset)
- return request;
- *eof = 1;
- return len - offset;
-}
-
-static int i915_gem_request_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data)
-{
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_device *dev = minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_request *gem_request;
- int len = 0;
-
- if (offset > DRM_PROC_LIMIT) {
- *eof = 1;
- return 0;
- }
-
- *start = &buf[offset];
- *eof = 0;
- DRM_PROC_PRINT("Request:\n");
- list_for_each_entry(gem_request, &dev_priv->mm.request_list,
- list)
- {
- DRM_PROC_PRINT(" %d @ %d %08x\n",
- gem_request->seqno,
- (int) (jiffies - gem_request->emitted_jiffies),
- gem_request->flush_domains);
- }
- if (len > request + offset)
- return request;
- *eof = 1;
- return len - offset;
-}
-
-static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data)
-{
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_device *dev = minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- int len = 0;
-
- if (offset > DRM_PROC_LIMIT) {
- *eof = 1;
- return 0;
- }
-
- *start = &buf[offset];
- *eof = 0;
- DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev));
- DRM_PROC_PRINT("Waiter sequence: %d\n",
- dev_priv->mm.waiting_gem_seqno);
- DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
- if (len > request + offset)
- return request;
- *eof = 1;
- return len - offset;
-}
-
-
-static int i915_interrupt_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data)
-{
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_device *dev = minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- int len = 0;
-
- if (offset > DRM_PROC_LIMIT) {
- *eof = 1;
- return 0;
- }
-
- *start = &buf[offset];
- *eof = 0;
- DRM_PROC_PRINT("Interrupt enable: %08x\n",
- I915_READ(IER));
- DRM_PROC_PRINT("Interrupt identity: %08x\n",
- I915_READ(IIR));
- DRM_PROC_PRINT("Interrupt mask: %08x\n",
- I915_READ(IMR));
- DRM_PROC_PRINT("Pipe A stat: %08x\n",
- I915_READ(PIPEASTAT));
- DRM_PROC_PRINT("Pipe B stat: %08x\n",
- I915_READ(PIPEBSTAT));
- DRM_PROC_PRINT("Interrupts received: %d\n",
- atomic_read(&dev_priv->irq_received));
- DRM_PROC_PRINT("Current sequence: %d\n",
- i915_get_gem_seqno(dev));
- DRM_PROC_PRINT("Waiter sequence: %d\n",
- dev_priv->mm.waiting_gem_seqno);
- DRM_PROC_PRINT("IRQ sequence: %d\n",
- dev_priv->mm.irq_gem_seqno);
- if (len > request + offset)
- return request;
- *eof = 1;
- return len - offset;
-}
-
-static struct drm_proc_list {
- /** file name */
- const char *name;
- /** proc callback*/
- int (*f) (char *, char **, off_t, int, int *, void *);
-} i915_gem_proc_list[] = {
- {"i915_gem_active", i915_gem_active_info},
- {"i915_gem_flushing", i915_gem_flushing_info},
- {"i915_gem_inactive", i915_gem_inactive_info},
- {"i915_gem_request", i915_gem_request_info},
- {"i915_gem_seqno", i915_gem_seqno_info},
- {"i915_gem_interrupt", i915_interrupt_info},
-};
-
-#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list)
-
-int i915_gem_proc_init(struct drm_minor *minor)
-{
- struct proc_dir_entry *ent;
- int i, j;
-
- for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) {
- ent = create_proc_entry(i915_gem_proc_list[i].name,
- S_IFREG | S_IRUGO, minor->dev_root);
- if (!ent) {
- DRM_ERROR("Cannot create /proc/dri/.../%s\n",
- i915_gem_proc_list[i].name);
- for (j = 0; j < i; j++)
- remove_proc_entry(i915_gem_proc_list[i].name,
- minor->dev_root);
- return -1;
- }
- ent->read_proc = i915_gem_proc_list[i].f;
- ent->data = minor;
- }
- return 0;
-}
-
-void i915_gem_proc_cleanup(struct drm_minor *minor)
-{
- int i;
-
- if (!minor->dev_root)
- return;
-
- for (i = 0; i < I915_GEM_PROC_ENTRIES; i++)
- remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root);
-}
diff --git a/linux-core/i915_gem_tiling.c b/linux-core/i915_gem_tiling.c
deleted file mode 100644
index a4ff736f..00000000
--- a/linux-core/i915_gem_tiling.c
+++ /dev/null
@@ -1,309 +0,0 @@
-/*
- * Copyright © 2008 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Eric Anholt <eric@anholt.net>
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drm.h"
-#include "i915_drv.h"
-
-/** @file i915_gem_tiling.c
- *
- * Support for managing tiling state of buffer objects.
- *
- * The idea behind tiling is to increase cache hit rates by rearranging
- * pixel data so that a group of pixel accesses are in the same cacheline.
- * Performance improvement from doing this on the back/depth buffer are on
- * the order of 30%.
- *
- * Intel architectures make this somewhat more complicated, though, by
- * adjustments made to addressing of data when the memory is in interleaved
- * mode (matched pairs of DIMMS) to improve memory bandwidth.
- * For interleaved memory, the CPU sends every sequential 64 bytes
- * to an alternate memory channel so it can get the bandwidth from both.
- *
- * The GPU also rearranges its accesses for increased bandwidth to interleaved
- * memory, and it matches what the CPU does for non-tiled. However, when tiled
- * it does it a little differently, since one walks addresses not just in the
- * X direction but also Y. So, along with alternating channels when bit
- * 6 of the address flips, it also alternates when other bits flip -- Bits 9
- * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
- * are common to both the 915 and 965-class hardware.
- *
- * The CPU also sometimes XORs in higher bits as well, to improve
- * bandwidth doing strided access like we do so frequently in graphics. This
- * is called "Channel XOR Randomization" in the MCH documentation. The result
- * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
- * decode.
- *
- * All of this bit 6 XORing has an effect on our memory management,
- * as we need to make sure that the 3d driver can correctly address object
- * contents.
- *
- * If we don't have interleaved memory, all tiling is safe and no swizzling is
- * required.
- *
- * When bit 17 is XORed in, we simply refuse to tile at all. Bit
- * 17 is not just a page offset, so as we page an objet out and back in,
- * individual pages in it will have different bit 17 addresses, resulting in
- * each 64 bytes being swapped with its neighbor!
- *
- * Otherwise, if interleaved, we have to tell the 3d driver what the address
- * swizzling it needs to do is, since it's writing with the CPU to the pages
- * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
- * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
- * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
- * to match what the GPU expects.
- */
-
-/**
- * Detects bit 6 swizzling of address lookup between IGD access and CPU
- * access through main memory.
- */
-void
-i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct pci_dev *bridge;
- uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
- uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
- int mchbar_offset;
- char __iomem *mchbar;
- int ret;
-
- bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
- if (bridge == NULL) {
- DRM_ERROR("Couldn't get bridge device\n");
- return;
- }
-
- ret = pci_enable_device(bridge);
- if (ret != 0) {
- DRM_ERROR("pci_enable_device failed: %d\n", ret);
- return;
- }
-
- if (IS_I965G(dev))
- mchbar_offset = 0x48;
- else
- mchbar_offset = 0x44;
-
- /* Use resource 2 for our BAR that's stashed in a nonstandard location,
- * since the bridge would only ever use standard BARs 0-1 (though it
- * doesn't anyway)
- */
- ret = pci_read_base(bridge, mchbar_offset, &bridge->resource[2]);
- if (ret != 0) {
- DRM_ERROR("pci_read_base failed: %d\n", ret);
- return;
- }
-
- mchbar = ioremap(pci_resource_start(bridge, 2),
- pci_resource_len(bridge, 2));
- if (mchbar == NULL) {
- DRM_ERROR("Couldn't map MCHBAR to determine tile swizzling\n");
- return;
- }
-
- if (IS_I965G(dev) && !IS_I965GM(dev)) {
- uint32_t chdecmisc;
-
- /* On the 965, channel interleave appears to be determined by
- * the flex bit. If flex is set, then the ranks (sides of a
- * DIMM) of memory will be "stacked" (physical addresses walk
- * through one rank then move on to the next, flipping channels
- * or not depending on rank configuration). The GPU in this
- * case does exactly the same addressing as the CPU.
- *
- * Unlike the 945, channel randomization based does not
- * appear to be available.
- *
- * XXX: While the G965 doesn't appear to do any interleaving
- * when the DIMMs are not exactly matched, the G4x chipsets
- * might be for "L-shaped" configurations, and will need to be
- * detected.
- *
- * L-shaped configuration:
- *
- * +-----+
- * | |
- * |DIMM2| <-- non-interleaved
- * +-----+
- * +-----+ +-----+
- * | | | |
- * |DIMM0| |DIMM1| <-- interleaved area
- * +-----+ +-----+
- */
- chdecmisc = readb(mchbar + CHDECMISC);
-
- if (chdecmisc == 0xff) {
- DRM_ERROR("Couldn't read from MCHBAR. "
- "Disabling tiling.\n");
- } else if (chdecmisc & CHDECMISC_FLEXMEMORY) {
- swizzle_x = I915_BIT_6_SWIZZLE_NONE;
- swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else {
- swizzle_x = I915_BIT_6_SWIZZLE_9_10;
- swizzle_y = I915_BIT_6_SWIZZLE_9;
- }
- } else if (IS_I9XX(dev)) {
- uint32_t dcc;
-
- /* On 915-945 and GM965, channel interleave by the CPU is
- * determined by DCC. The CPU will alternate based on bit 6
- * in interleaved mode, and the GPU will then also alternate
- * on bit 6, 9, and 10 for X, but the CPU may also optionally
- * alternate based on bit 17 (XOR not disabled and XOR
- * bit == 17).
- */
- dcc = readl(mchbar + DCC);
- switch (dcc & DCC_ADDRESSING_MODE_MASK) {
- case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
- case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
- swizzle_x = I915_BIT_6_SWIZZLE_NONE;
- swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- break;
- case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
- if (IS_I915G(dev) || IS_I915GM(dev) ||
- dcc & DCC_CHANNEL_XOR_DISABLE) {
- swizzle_x = I915_BIT_6_SWIZZLE_9_10;
- swizzle_y = I915_BIT_6_SWIZZLE_9;
- } else if (IS_I965GM(dev)) {
- /* GM965 only does bit 11-based channel
- * randomization
- */
- swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
- swizzle_y = I915_BIT_6_SWIZZLE_9_11;
- } else {
- /* Bit 17 or perhaps other swizzling */
- swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
- swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
- }
- break;
- }
- if (dcc == 0xffffffff) {
- DRM_ERROR("Couldn't read from MCHBAR. "
- "Disabling tiling.\n");
- swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
- swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
- }
- } else {
- /* As far as we know, the 865 doesn't have these bit 6
- * swizzling issues.
- */
- swizzle_x = I915_BIT_6_SWIZZLE_NONE;
- swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- }
-
- iounmap(mchbar);
-
- dev_priv->mm.bit_6_swizzle_x = swizzle_x;
- dev_priv->mm.bit_6_swizzle_y = swizzle_y;
-}
-
-/**
- * Sets the tiling mode of an object, returning the required swizzling of
- * bit 6 of addresses in the object.
- */
-int
-i915_gem_set_tiling(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_gem_set_tiling *args = data;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -EINVAL;
- obj_priv = obj->driver_private;
-
- mutex_lock(&dev->struct_mutex);
-
- if (args->tiling_mode == I915_TILING_NONE) {
- obj_priv->tiling_mode = I915_TILING_NONE;
- args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
- } else {
- if (args->tiling_mode == I915_TILING_X)
- args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
- else
- args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
- /* If we can't handle the swizzling, make it untiled. */
- if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
- args->tiling_mode = I915_TILING_NONE;
- args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
- }
- }
- obj_priv->tiling_mode = args->tiling_mode;
-
- mutex_unlock(&dev->struct_mutex);
-
- drm_gem_object_unreference(obj);
-
- return 0;
-}
-
-/**
- * Returns the current tiling mode and required bit 6 swizzling for the object.
- */
-int
-i915_gem_get_tiling(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_gem_get_tiling *args = data;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -EINVAL;
- obj_priv = obj->driver_private;
-
- mutex_lock(&dev->struct_mutex);
-
- args->tiling_mode = obj_priv->tiling_mode;
- switch (obj_priv->tiling_mode) {
- case I915_TILING_X:
- args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
- break;
- case I915_TILING_Y:
- args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
- break;
- case I915_TILING_NONE:
- args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
- break;
- default:
- DRM_ERROR("unknown tiling mode\n");
- }
-
- mutex_unlock(&dev->struct_mutex);
-
- drm_gem_object_unreference(obj);
-
- return 0;
-}
diff --git a/linux-core/i915_ioc32.c b/linux-core/i915_ioc32.c
deleted file mode 100644
index 0b8fff19..00000000
--- a/linux-core/i915_ioc32.c
+++ /dev/null
@@ -1,284 +0,0 @@
-/**
- * \file i915_ioc32.c
- *
- * 32-bit ioctl compatibility routines for the i915 DRM.
- *
- * \author Alan Hourihane <alanh@fairlite.demon.co.uk>
- *
- *
- * Copyright (C) Paul Mackerras 2005
- * Copyright (C) Alan Hourihane 2005
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-#include <linux/compat.h>
-
-#include "drmP.h"
-#include "drm.h"
-#include "i915_drm.h"
-#include "i915_drv.h"
-
-typedef struct _drm_i915_batchbuffer32 {
- int start; /* agp offset */
- int used; /* nr bytes in use */
- int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
- int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
- int num_cliprects; /* mulitpass with multiple cliprects? */
- u32 cliprects; /* pointer to userspace cliprects */
-} drm_i915_batchbuffer32_t;
-
-static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_i915_batchbuffer32_t batchbuffer32;
- drm_i915_batchbuffer_t __user *batchbuffer;
-
- if (copy_from_user
- (&batchbuffer32, (void __user *)arg, sizeof(batchbuffer32)))
- return -EFAULT;
-
- batchbuffer = compat_alloc_user_space(sizeof(*batchbuffer));
- if (!access_ok(VERIFY_WRITE, batchbuffer, sizeof(*batchbuffer))
- || __put_user(batchbuffer32.start, &batchbuffer->start)
- || __put_user(batchbuffer32.used, &batchbuffer->used)
- || __put_user(batchbuffer32.DR1, &batchbuffer->DR1)
- || __put_user(batchbuffer32.DR4, &batchbuffer->DR4)
- || __put_user(batchbuffer32.num_cliprects,
- &batchbuffer->num_cliprects)
- || __put_user((int __user *)(unsigned long)batchbuffer32.cliprects,
- &batchbuffer->cliprects))
- return -EFAULT;
-
- return drm_ioctl(file->f_dentry->d_inode, file,
- DRM_IOCTL_I915_BATCHBUFFER,
- (unsigned long) batchbuffer);
-}
-
-typedef struct _drm_i915_cmdbuffer32 {
- u32 buf; /* pointer to userspace command buffer */
- int sz; /* nr bytes in buf */
- int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
- int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
- int num_cliprects; /* mulitpass with multiple cliprects? */
- u32 cliprects; /* pointer to userspace cliprects */
-} drm_i915_cmdbuffer32_t;
-
-static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_i915_cmdbuffer32_t cmdbuffer32;
- drm_i915_cmdbuffer_t __user *cmdbuffer;
-
- if (copy_from_user
- (&cmdbuffer32, (void __user *)arg, sizeof(cmdbuffer32)))
- return -EFAULT;
-
- cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer));
- if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer))
- || __put_user((int __user *)(unsigned long)cmdbuffer32.buf,
- &cmdbuffer->buf)
- || __put_user(cmdbuffer32.sz, &cmdbuffer->sz)
- || __put_user(cmdbuffer32.DR1, &cmdbuffer->DR1)
- || __put_user(cmdbuffer32.DR4, &cmdbuffer->DR4)
- || __put_user(cmdbuffer32.num_cliprects, &cmdbuffer->num_cliprects)
- || __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects,
- &cmdbuffer->cliprects))
- return -EFAULT;
-
- return drm_ioctl(file->f_dentry->d_inode, file,
- DRM_IOCTL_I915_CMDBUFFER, (unsigned long) cmdbuffer);
-}
-
-typedef struct drm_i915_irq_emit32 {
- u32 irq_seq;
-} drm_i915_irq_emit32_t;
-
-static int compat_i915_irq_emit(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_i915_irq_emit32_t req32;
- drm_i915_irq_emit_t __user *request;
-
- if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user((int __user *)(unsigned long)req32.irq_seq,
- &request->irq_seq))
- return -EFAULT;
-
- return drm_ioctl(file->f_dentry->d_inode, file,
- DRM_IOCTL_I915_IRQ_EMIT, (unsigned long) request);
-}
-typedef struct drm_i915_getparam32 {
- int param;
- u32 value;
-} drm_i915_getparam32_t;
-
-static int compat_i915_getparam(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_i915_getparam32_t req32;
- drm_i915_getparam_t __user *request;
-
- if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user(req32.param, &request->param)
- || __put_user((void __user *)(unsigned long)req32.value,
- &request->value))
- return -EFAULT;
-
- return drm_ioctl(file->f_dentry->d_inode, file,
- DRM_IOCTL_I915_GETPARAM, (unsigned long) request);
-}
-
-typedef struct drm_i915_mem_alloc32 {
- int region;
- int alignment;
- int size;
- u32 region_offset; /* offset from start of fb or agp */
-} drm_i915_mem_alloc32_t;
-
-static int compat_i915_alloc(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_i915_mem_alloc32_t req32;
- drm_i915_mem_alloc_t __user *request;
-
- if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user(req32.region, &request->region)
- || __put_user(req32.alignment, &request->alignment)
- || __put_user(req32.size, &request->size)
- || __put_user((void __user *)(unsigned long)req32.region_offset,
- &request->region_offset))
- return -EFAULT;
-
- return drm_ioctl(file->f_dentry->d_inode, file,
- DRM_IOCTL_I915_ALLOC, (unsigned long) request);
-}
-
-typedef struct drm_i915_execbuffer32 {
- uint64_t ops_list;
- uint32_t num_buffers;
- struct _drm_i915_batchbuffer32 batch;
- drm_context_t context;
- struct drm_fence_arg fence_arg;
-} drm_i915_execbuffer32_t;
-
-static int compat_i915_execbuffer(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_i915_execbuffer32_t req32;
- struct drm_i915_execbuffer __user *request;
- int err;
-
- if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request));
-
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user(req32.ops_list, &request->ops_list)
- || __put_user(req32.num_buffers, &request->num_buffers)
- || __put_user(req32.context, &request->context)
- || __copy_to_user(&request->fence_arg, &req32.fence_arg,
- sizeof(req32.fence_arg))
- || __put_user(req32.batch.start, &request->batch.start)
- || __put_user(req32.batch.used, &request->batch.used)
- || __put_user(req32.batch.DR1, &request->batch.DR1)
- || __put_user(req32.batch.DR4, &request->batch.DR4)
- || __put_user(req32.batch.num_cliprects,
- &request->batch.num_cliprects)
- || __put_user((int __user *)(unsigned long)req32.batch.cliprects,
- &request->batch.cliprects))
- return -EFAULT;
-
- err = drm_ioctl(file->f_dentry->d_inode, file,
- DRM_IOCTL_I915_EXECBUFFER, (unsigned long)request);
-
- if (err)
- return err;
-
- if (__get_user(req32.fence_arg.handle, &request->fence_arg.handle)
- || __get_user(req32.fence_arg.fence_class, &request->fence_arg.fence_class)
- || __get_user(req32.fence_arg.type, &request->fence_arg.type)
- || __get_user(req32.fence_arg.flags, &request->fence_arg.flags)
- || __get_user(req32.fence_arg.signaled, &request->fence_arg.signaled)
- || __get_user(req32.fence_arg.error, &request->fence_arg.error)
- || __get_user(req32.fence_arg.sequence, &request->fence_arg.sequence))
- return -EFAULT;
-
- if (copy_to_user((void __user *)arg, &req32, sizeof(req32)))
- return -EFAULT;
-
- return 0;
-}
-
-
-drm_ioctl_compat_t *i915_compat_ioctls[] = {
- [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
- [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
- [DRM_I915_GETPARAM] = compat_i915_getparam,
- [DRM_I915_IRQ_EMIT] = compat_i915_irq_emit,
- [DRM_I915_ALLOC] = compat_i915_alloc,
-#ifdef I915_HAVE_BUFFER
- [DRM_I915_EXECBUFFER] = compat_i915_execbuffer,
-#endif
-};
-
-/**
- * Called whenever a 32-bit process running under a 64-bit kernel
- * performs an ioctl on /dev/dri/card<n>.
- *
- * \param filp file pointer.
- * \param cmd command.
- * \param arg user argument.
- * \return zero on success or negative number on failure.
- */
-long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- unsigned int nr = DRM_IOCTL_NR(cmd);
- drm_ioctl_compat_t *fn = NULL;
- int ret;
-
- if (nr < DRM_COMMAND_BASE)
- return drm_compat_ioctl(filp, cmd, arg);
-
- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
-
- lock_kernel(); /* XXX for now */
- if (fn != NULL)
- ret = (*fn)(filp, cmd, arg);
- else
- ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
- unlock_kernel();
-
- return ret;
-}
diff --git a/linux-core/i915_irq.c b/linux-core/i915_irq.c
deleted file mode 120000
index 2058a2e4..00000000
--- a/linux-core/i915_irq.c
+++ /dev/null
@@ -1 +0,0 @@
-../shared-core/i915_irq.c \ No newline at end of file
diff --git a/linux-core/i915_mem.c b/linux-core/i915_mem.c
deleted file mode 120000
index e8e56553..00000000
--- a/linux-core/i915_mem.c
+++ /dev/null
@@ -1 +0,0 @@
-../shared-core/i915_mem.c \ No newline at end of file
diff --git a/linux-core/i915_opregion.c b/linux-core/i915_opregion.c
deleted file mode 100644
index 1fa599ea..00000000
--- a/linux-core/i915_opregion.c
+++ /dev/null
@@ -1,389 +0,0 @@
-/*
- *
- * Copyright 2008 Intel Corporation <hong.liu@intel.com>
- * Copyright 2008 Red Hat <mjg@redhat.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NON-INFRINGEMENT. IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/acpi.h>
-
-#include "drmP.h"
-#include "i915_drm.h"
-#include "i915_drv.h"
-
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
-#define PCI_ASLE 0xe4
-#define PCI_ASLS 0xfc
-
-#define OPREGION_SZ (8*1024)
-#define OPREGION_HEADER_OFFSET 0
-#define OPREGION_ACPI_OFFSET 0x100
-#define OPREGION_SWSCI_OFFSET 0x200
-#define OPREGION_ASLE_OFFSET 0x300
-#define OPREGION_VBT_OFFSET 0x1000
-
-#define OPREGION_SIGNATURE "IntelGraphicsMem"
-#define MBOX_ACPI (1<<0)
-#define MBOX_SWSCI (1<<1)
-#define MBOX_ASLE (1<<2)
-
-/* _DOD id definitions */
-#define OUTPUT_CONNECTOR_MSK 0xf000
-#define OUTPUT_CONNECTOR_OFFSET 12
-
-#define OUTPUT_PORT_MSK 0x00f0
-#define OUTPUT_PORT_OFFSET 4
- #define OUTPUT_PORT_ANALOG 0
- #define OUTPUT_PORT_LVDS 1
- #define OUTPUT_PORT_SDVOB 2
- #define OUTPUT_PORT_SDVOC 3
- #define OUTPUT_PORT_TV 4
-
-#define OUTPUT_DISPLAY_MSK 0x0f00
-#define OUTPUT_DISPLAY_OFFSET 8
- #define OUTPUT_DISPLAY_OTHER 0
- #define OUTPUT_DISPLAY_VGA 1
- #define OUTPUT_DISPLAY_TV 2
- #define OUTPUT_DISPLAY_DIGI 3
- #define OUTPUT_DISPLAY_FLAT_PANEL 4
-
-/* predefined id for integrated LVDS and VGA connector */
-#define OUTPUT_INT_LVDS 0x00000110
-#define OUTPUT_INT_VGA 0x80000100
-
-struct opregion_header {
- u8 signature[16];
- u32 size;
- u32 opregion_ver;
- u8 bios_ver[32];
- u8 vbios_ver[16];
- u8 driver_ver[16];
- u32 mboxes;
- u8 reserved[164];
-} __attribute__((packed));
-
-/* OpRegion mailbox #1: public ACPI methods */
-struct opregion_acpi {
- u32 drdy; /* driver readiness */
- u32 csts; /* notification status */
- u32 cevt; /* current event */
- u8 rsvd1[20];
- u32 didl[8]; /* supported display devices ID list */
- u32 cpdl[8]; /* currently presented display list */
- u32 cadl[8]; /* currently active display list */
- u32 nadl[8]; /* next active devices list */
- u32 aslp; /* ASL sleep time-out */
- u32 tidx; /* toggle table index */
- u32 chpd; /* current hotplug enable indicator */
- u32 clid; /* current lid state*/
- u32 cdck; /* current docking state */
- u32 sxsw; /* Sx state resume */
- u32 evts; /* ASL supported events */
- u32 cnot; /* current OS notification */
- u32 nrdy; /* driver status */
- u8 rsvd2[60];
-} __attribute__((packed));
-
-/* OpRegion mailbox #2: SWSCI */
-struct opregion_swsci {
- u32 scic; /* SWSCI command|status|data */
- u32 parm; /* command parameters */
- u32 dslp; /* driver sleep time-out */
- u8 rsvd[244];
-} __attribute__((packed));
-
-/* OpRegion mailbox #3: ASLE */
-struct opregion_asle {
- u32 ardy; /* driver readiness */
- u32 aslc; /* ASLE interrupt command */
- u32 tche; /* technology enabled indicator */
- u32 alsi; /* current ALS illuminance reading */
- u32 bclp; /* backlight brightness to set */
- u32 pfit; /* panel fitting state */
- u32 cblv; /* current brightness level */
- u16 bclm[20]; /* backlight level duty cycle mapping table */
- u32 cpfm; /* current panel fitting mode */
- u32 epfm; /* enabled panel fitting modes */
- u8 plut[74]; /* panel LUT and identifier */
- u32 pfmb; /* PWM freq and min brightness */
- u8 rsvd[102];
-} __attribute__((packed));
-
-/* ASLE irq request bits */
-#define ASLE_SET_ALS_ILLUM (1 << 0)
-#define ASLE_SET_BACKLIGHT (1 << 1)
-#define ASLE_SET_PFIT (1 << 2)
-#define ASLE_SET_PWM_FREQ (1 << 3)
-#define ASLE_REQ_MSK 0xf
-
-/* response bits of ASLE irq request */
-#define ASLE_ALS_ILLUM_FAIL (2<<10)
-#define ASLE_BACKLIGHT_FAIL (2<<12)
-#define ASLE_PFIT_FAIL (2<<14)
-#define ASLE_PWM_FREQ_FAIL (2<<16)
-
-/* ASLE backlight brightness to set */
-#define ASLE_BCLP_VALID (1<<31)
-#define ASLE_BCLP_MSK (~(1<<31))
-
-/* ASLE panel fitting request */
-#define ASLE_PFIT_VALID (1<<31)
-#define ASLE_PFIT_CENTER (1<<0)
-#define ASLE_PFIT_STRETCH_TEXT (1<<1)
-#define ASLE_PFIT_STRETCH_GFX (1<<2)
-
-/* PWM frequency and minimum brightness */
-#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
-#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
-#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
-#define ASLE_PFMB_PWM_VALID (1<<31)
-
-#define ASLE_CBLV_VALID (1<<31)
-
-static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct opregion_asle *asle = dev_priv->opregion.asle;
- u32 blc_pwm_ctl;
-
- if (!(bclp & ASLE_BCLP_VALID))
- return ASLE_BACKLIGHT_FAIL;
-
- bclp &= ASLE_BCLP_MSK;
- if (bclp < 0 || bclp > 255)
- return ASLE_BACKLIGHT_FAIL;
-
- blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
- blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
- I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | ((bclp * 0x101) -1));
- asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
-
- return 0;
-}
-
-static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
-{
- return 0;
-}
-
-static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- if (pfmb & ASLE_PFMB_PWM_VALID) {
- u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
- u32 pwm = pfmb & ASLE_PFMB_PWM_MASK;
- blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK;
- pwm = pwm >> 9;
- // FIXME - what do we do with the PWM?
- }
- return 0;
-}
-
-static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
-{
- if (!(pfit & ASLE_PFIT_VALID))
- return ASLE_PFIT_FAIL;
- return 0;
-}
-
-void opregion_asle_intr(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct opregion_asle *asle = dev_priv->opregion.asle;
- u32 asle_stat = 0;
- u32 asle_req;
-
- if (!asle)
- return;
-
- asle_req = asle->aslc & ASLE_REQ_MSK;
-
- if (!asle_req) {
- DRM_DEBUG("non asle set request??\n");
- return;
- }
-
- if (asle_req & ASLE_SET_ALS_ILLUM)
- asle_stat |= asle_set_als_illum(dev, asle->alsi);
-
- if (asle_req & ASLE_SET_BACKLIGHT)
- asle_stat |= asle_set_backlight(dev, asle->bclp);
-
- if (asle_req & ASLE_SET_PFIT)
- asle_stat |= asle_set_pfit(dev, asle->pfit);
-
- if (asle_req & ASLE_SET_PWM_FREQ)
- asle_stat |= asle_set_pwm_freq(dev, asle->pfmb);
-
- asle->aslc = asle_stat;
-}
-
-#define ASLE_ALS_EN (1<<0)
-#define ASLE_BLC_EN (1<<1)
-#define ASLE_PFIT_EN (1<<2)
-#define ASLE_PFMB_EN (1<<3)
-
-void opregion_enable_asle(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct opregion_asle *asle = dev_priv->opregion.asle;
-
- if (asle) {
- if (IS_MOBILE(dev)) {
- u32 pipeb_stats = I915_READ(PIPEBSTAT);
- /* Some hardware uses the legacy backlight controller
- to signal interrupts, so we need to set up pipe B
- to generate an IRQ on writes */
- pipeb_stats |= I915_LEGACY_BLC_EVENT_ENABLE;
- I915_WRITE(PIPEBSTAT, pipeb_stats);
-
- dev_priv->irq_mask_reg &=
- ~I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
- }
-
- dev_priv->irq_mask_reg &= ~I915_ASLE_INTERRUPT;
-
- asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
- ASLE_PFMB_EN;
- asle->ardy = 1;
- }
-}
-
-#define ACPI_EV_DISPLAY_SWITCH (1<<0)
-#define ACPI_EV_LID (1<<1)
-#define ACPI_EV_DOCK (1<<2)
-
-static struct intel_opregion *system_opregion;
-
-int intel_opregion_video_event(struct notifier_block *nb, unsigned long val,
- void *data)
-{
- /* The only video events relevant to opregion are 0x80. These indicate
- either a docking event, lid switch or display switch request. In
- Linux, these are handled by the dock, button and video drivers.
- We might want to fix the video driver to be opregion-aware in
- future, but right now we just indicate to the firmware that the
- request has been handled */
-
- struct opregion_acpi *acpi;
-
- if (!system_opregion)
- return NOTIFY_DONE;
-
- acpi = system_opregion->acpi;
- acpi->csts = 0;
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block intel_opregion_notifier = {
- .notifier_call = intel_opregion_video_event,
-};
-
-int intel_opregion_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_opregion *opregion = &dev_priv->opregion;
- void *base;
- u32 asls, mboxes;
- int err = 0;
-
- pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
- DRM_DEBUG("graphic opregion physical addr: 0x%x\n", asls);
- if (asls == 0) {
- DRM_DEBUG("ACPI OpRegion not supported!\n");
- return -ENOTSUPP;
- }
-
- base = ioremap(asls, OPREGION_SZ);
- if (!base)
- return -ENOMEM;
-
- opregion->header = base;
- if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) {
- DRM_DEBUG("opregion signature mismatch\n");
- err = -EINVAL;
- goto err_out;
- }
-
- mboxes = opregion->header->mboxes;
- if (mboxes & MBOX_ACPI) {
- DRM_DEBUG("Public ACPI methods supported\n");
- opregion->acpi = base + OPREGION_ACPI_OFFSET;
- } else {
- DRM_DEBUG("Public ACPI methods not supported\n");
- err = -ENOTSUPP;
- goto err_out;
- }
- opregion->enabled = 1;
-
- if (mboxes & MBOX_SWSCI) {
- DRM_DEBUG("SWSCI supported\n");
- opregion->swsci = base + OPREGION_SWSCI_OFFSET;
- }
- if (mboxes & MBOX_ASLE) {
- DRM_DEBUG("ASLE supported\n");
- opregion->asle = base + OPREGION_ASLE_OFFSET;
- }
-
- /* Notify BIOS we are ready to handle ACPI video ext notifs.
- * Right now, all the events are handled by the ACPI video module.
- * We don't actually need to do anything with them. */
- opregion->acpi->csts = 0;
- opregion->acpi->drdy = 1;
-
- system_opregion = opregion;
- register_acpi_notifier(&intel_opregion_notifier);
-
- return 0;
-
-err_out:
- iounmap(opregion->header);
- opregion->header = NULL;
- return err;
-}
-
-void intel_opregion_free(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_opregion *opregion = &dev_priv->opregion;
-
- if (!opregion->enabled)
- return;
-
- opregion->acpi->drdy = 0;
-
- system_opregion = NULL;
- unregister_acpi_notifier(&intel_opregion_notifier);
-
- /* just clear all opregion memory pointers now */
- iounmap(opregion->header);
- opregion->header = NULL;
- opregion->acpi = NULL;
- opregion->swsci = NULL;
- opregion->asle = NULL;
-
- opregion->enabled = 0;
-}
-#endif
diff --git a/linux-core/i915_suspend.c b/linux-core/i915_suspend.c
deleted file mode 120000
index b55754c5..00000000
--- a/linux-core/i915_suspend.c
+++ /dev/null
@@ -1 +0,0 @@
-../shared-core/i915_suspend.c \ No newline at end of file