diff options
author | Jerome Glisse <glisse@freedesktop.org> | 2008-03-10 23:36:27 +0100 |
---|---|---|
committer | John Doe <glisse@freedesktop.org> | 2008-03-10 23:36:27 +0100 |
commit | a7e6ca62ad0d9c3c45fd9e1d81b59c2db2d714cf (patch) | |
tree | 8f757eeaa2c234af14ca1df515de7810e770ed91 /shared-core | |
parent | a7dc4d08b9b4f8fe6fcaa4c778f6dd3718d1e36a (diff) | |
parent | 9f19e79f955281b9de393219e4ad9835ffe29c49 (diff) |
Merge branch 'modesetting-101' of ssh://git.freedesktop.org/git/mesa/drm into modesetting-101
Diffstat (limited to 'shared-core')
-rw-r--r-- | shared-core/drm.h | 21 | ||||
-rw-r--r-- | shared-core/i915_dma.c | 79 | ||||
-rw-r--r-- | shared-core/i915_drm.h | 2 | ||||
-rw-r--r-- | shared-core/i915_init.c | 236 | ||||
-rw-r--r-- | shared-core/i915_irq.c | 7 |
5 files changed, 234 insertions, 111 deletions
diff --git a/shared-core/drm.h b/shared-core/drm.h index cbe83fd1..303a84b6 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -645,8 +645,14 @@ struct drm_set_version { #define DRM_FENCE_FLAG_EMIT 0x00000001 #define DRM_FENCE_FLAG_SHAREABLE 0x00000002 +/** + * On hardware with no interrupt events for operation completion, + * indicates that the kernel should sleep while waiting for any blocking + * operation to complete rather than spinning. + * + * Has no effect otherwise. + */ #define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004 -#define DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS 0x00000008 #define DRM_FENCE_FLAG_NO_USER 0x00000010 /* Reserved for driver use */ @@ -795,13 +801,12 @@ struct drm_fence_arg { * with it as a result of this operation */ #define DRM_BO_HINT_DONT_FENCE 0x00000004 -/* - * Sleep while waiting for the operation to complete. - * Without this flag, the kernel will, instead, spin - * until this operation has completed. I'm not sure - * why you would ever want this, so please always - * provide DRM_BO_HINT_WAIT_LAZY to any operation - * which may block +/** + * On hardware with no interrupt events for operation completion, + * indicates that the kernel should sleep while waiting for any blocking + * operation to complete rather than spinning. + * + * Has no effect otherwise. */ #define DRM_BO_HINT_WAIT_LAZY 0x00000008 /* diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index eee24dd1..8237e145 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -66,6 +66,11 @@ void i915_kernel_lost_context(struct drm_device * dev) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_ring_buffer *ring = &(dev_priv->ring); + /* we should never lose context on the ring with modesetting + * as we don't expose it to userspace */ + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return; + ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR; ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR; ring->space = ring->head - (ring->tail + 8); @@ -75,6 +80,11 @@ void i915_kernel_lost_context(struct drm_device * dev) int i915_dma_cleanup(struct drm_device * dev) { + struct drm_i915_private *dev_priv = dev->dev_private; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return 0; + /* Make sure interrupts are disabled here because the uninstall ioctl * may not have been called from userspace and after dev_private * is freed, it's too late. @@ -82,6 +92,28 @@ int i915_dma_cleanup(struct drm_device * dev) if (dev->irq) drm_irq_uninstall(dev); + if (dev_priv->ring.virtual_start) { + drm_core_ioremapfree(&dev_priv->ring.map, dev); + dev_priv->ring.virtual_start = 0; + dev_priv->ring.map.handle = 0; + dev_priv->ring.map.size = 0; + dev_priv->ring.Size = 0; + } + + if (dev_priv->status_page_dmah) { + drm_pci_free(dev, dev_priv->status_page_dmah); + dev_priv->status_page_dmah = NULL; + /* Need to rewrite hardware status page */ + I915_WRITE(0x02080, 0x1ffff000); + } + + if (dev_priv->status_gfx_addr) { + dev_priv->status_gfx_addr = 0; + drm_core_ioremapfree(&dev_priv->hws_map, dev); + I915_WRITE(0x02080, 0x1ffff000); + } + + return 0; } @@ -153,13 +185,17 @@ static int i915_initialize(struct drm_device * dev, struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; - dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset); - if (!dev_priv->mmio_map) { - i915_dma_cleanup(dev); - DRM_ERROR("can not find mmio map!\n"); - return -EINVAL; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + if (init->mmio_offset != 0) + dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset); + if (!dev_priv->mmio_map) { + i915_dma_cleanup(dev); + DRM_ERROR("can not find mmio map!\n"); + return -EINVAL; + } } + #ifdef I915_HAVE_BUFFER dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS; #endif @@ -246,6 +282,9 @@ static int i915_dma_resume(struct drm_device * dev) DRM_DEBUG("\n"); + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return 0; + if (!dev_priv->mmio_map) { DRM_ERROR("can not find mmio map!\n"); return -EINVAL; @@ -765,6 +804,8 @@ struct i915_relocatee_info { unsigned page_offset; struct drm_bo_kmap_obj kmap; int is_iomem; + int idle; + int evicted; }; struct drm_i915_validate_buffer { @@ -820,6 +861,14 @@ int i915_apply_reloc(struct drm_file *file_priv, int num_buffers, drm_bo_kunmap(&relocatee->kmap); relocatee->data_page = NULL; relocatee->offset = new_cmd_offset; + + if (unlikely(!relocatee->idle)) { + ret = drm_bo_wait(relocatee->buf, 0, 0, 0); + if (ret) + return ret; + relocatee->idle = 1; + } + ret = drm_bo_kmap(relocatee->buf, new_cmd_offset >> PAGE_SHIFT, 1, &relocatee->kmap); if (ret) { @@ -829,6 +878,12 @@ int i915_apply_reloc(struct drm_file *file_priv, int num_buffers, relocatee->data_page = drm_bmo_virtual(&relocatee->kmap, &relocatee->is_iomem); relocatee->page_offset = (relocatee->offset & PAGE_MASK); + + if (!relocatee->evicted && + relocatee->buf->mem.flags & DRM_BO_FLAG_CACHED_MAPPED) { + drm_bo_evict_cached(relocatee->buf); + relocatee->evicted = 1; + } } val = buffers[buf_index].buffer->offset; @@ -963,10 +1018,6 @@ static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle, } mutex_lock (&relocatee.buf->mutex); - ret = drm_bo_wait (relocatee.buf, 0, 0, FALSE); - if (ret) - goto out_err1; - while (reloc_user_ptr) { ret = i915_process_relocs(file_priv, buf_handle, &reloc_user_ptr, &relocatee, buffers, buf_count); if (ret) { @@ -1430,6 +1481,16 @@ drm_i915_mmio_entry_t mmio_table[] = { I915_MMIO_MAY_READ|I915_MMIO_MAY_WRITE, 0x30010, 6 + }, + [MMIO_REGS_FENCE] = { + I915_MMIO_MAY_READ|I915_MMIO_MAY_WRITE, + 0x2000, + 8 + }, + [MMIO_REGS_FENCE_NEW] = { + I915_MMIO_MAY_READ|I915_MMIO_MAY_WRITE, + 0x3000, + 16 } }; diff --git a/shared-core/i915_drm.h b/shared-core/i915_drm.h index a40cabdc..824aee27 100644 --- a/shared-core/i915_drm.h +++ b/shared-core/i915_drm.h @@ -328,6 +328,8 @@ typedef struct drm_i915_vblank_swap { #define MMIO_REGS_PS_DEPTH_COUNT 8 #define MMIO_REGS_DOVSTA 9 #define MMIO_REGS_GAMMA 10 +#define MMIO_REGS_FENCE 11 +#define MMIO_REGS_FENCE_NEW 12 typedef struct drm_i915_mmio_entry { unsigned int flag; diff --git a/shared-core/i915_init.c b/shared-core/i915_init.c index b2d3f0d0..3d8a1dca 100644 --- a/shared-core/i915_init.c +++ b/shared-core/i915_init.c @@ -153,106 +153,113 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) DRM_DEBUG("fb_base: 0x%08lx\n", dev->mode_config.fb_base); ret = drm_addmap(dev, dev_priv->mmiobase, dev_priv->mmiolen, - _DRM_REGISTERS, _DRM_READ_ONLY|_DRM_DRIVER, &dev_priv->mmio_map); + _DRM_REGISTERS, _DRM_KERNEL|_DRM_READ_ONLY|_DRM_DRIVER, &dev_priv->mmio_map); if (ret != 0) { DRM_ERROR("Cannot add mapping for MMIO registers\n"); return ret; } +#ifdef __linux__ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) + intel_init_chipset_flush_compat(dev); +#endif +#endif + /* * Initialize the memory manager for local and AGP space */ drm_bo_driver_init(dev); - i915_probe_agp(dev->pdev, &agp_size, &prealloc_size); - printk("setting up %ld bytes of VRAM space\n", prealloc_size); - printk("setting up %ld bytes of TT space\n", (agp_size - prealloc_size)); - drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0, prealloc_size >> PAGE_SHIFT, 1); - drm_bo_init_mm(dev, DRM_BO_MEM_TT, prealloc_size >> PAGE_SHIFT, (agp_size - prealloc_size) >> PAGE_SHIFT, 1); - - I915_WRITE(LP_RING + RING_LEN, 0); - I915_WRITE(LP_RING + RING_HEAD, 0); - I915_WRITE(LP_RING + RING_TAIL, 0); - - size = PRIMARY_RINGBUFFER_SIZE; - ret = drm_buffer_object_create(dev, size, drm_bo_type_kernel, - DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | - DRM_BO_FLAG_MEM_VRAM | - DRM_BO_FLAG_NO_EVICT, - DRM_BO_HINT_DONT_FENCE, 0x1, 0, - &dev_priv->ring_buffer); - if (ret < 0) { - DRM_ERROR("Unable to allocate or pin ring buffer\n"); - return -EINVAL; - } - - /* remap the buffer object properly */ - dev_priv->ring.Start = dev_priv->ring_buffer->offset; - dev_priv->ring.End = dev_priv->ring.Start + size; - dev_priv->ring.Size = size; - dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; - - /* FIXME: need wrapper with PCI mem checks */ - ret = drm_mem_reg_ioremap(dev, &dev_priv->ring_buffer->mem, - (void **) &dev_priv->ring.virtual_start); - if (ret) - DRM_ERROR("error mapping ring buffer: %d\n", ret); - - DRM_DEBUG("ring start %08lX, %p, %08lX\n", dev_priv->ring.Start, - dev_priv->ring.virtual_start, dev_priv->ring.Size); + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + i915_probe_agp(dev->pdev, &agp_size, &prealloc_size); + printk("setting up %ld bytes of VRAM space\n", prealloc_size); + printk("setting up %ld bytes of TT space\n", (agp_size - prealloc_size)); + drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0, prealloc_size >> PAGE_SHIFT, 1); + drm_bo_init_mm(dev, DRM_BO_MEM_TT, prealloc_size >> PAGE_SHIFT, (agp_size - prealloc_size) >> PAGE_SHIFT, 1); + + I915_WRITE(LP_RING + RING_LEN, 0); + I915_WRITE(LP_RING + RING_HEAD, 0); + I915_WRITE(LP_RING + RING_TAIL, 0); + + size = PRIMARY_RINGBUFFER_SIZE; + ret = drm_buffer_object_create(dev, size, drm_bo_type_kernel, + DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | + DRM_BO_FLAG_MEM_VRAM | + DRM_BO_FLAG_NO_EVICT, + DRM_BO_HINT_DONT_FENCE, 0x1, 0, + &dev_priv->ring_buffer); + if (ret < 0) { + DRM_ERROR("Unable to allocate or pin ring buffer\n"); + return -EINVAL; + } + + /* remap the buffer object properly */ + dev_priv->ring.Start = dev_priv->ring_buffer->offset; + dev_priv->ring.End = dev_priv->ring.Start + size; + dev_priv->ring.Size = size; + dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; + + /* FIXME: need wrapper with PCI mem checks */ + ret = drm_mem_reg_ioremap(dev, &dev_priv->ring_buffer->mem, + (void **) &dev_priv->ring.virtual_start); + if (ret) + DRM_ERROR("error mapping ring buffer: %d\n", ret); + + DRM_DEBUG("ring start %08lX, %p, %08lX\n", dev_priv->ring.Start, + dev_priv->ring.virtual_start, dev_priv->ring.Size); // - memset((void *)(dev_priv->ring.virtual_start), 0, dev_priv->ring.Size); - - I915_WRITE(LP_RING + RING_START, dev_priv->ring.Start); - I915_WRITE(LP_RING + RING_LEN, - ((dev_priv->ring.Size - 4096) & RING_NR_PAGES) | - (RING_NO_REPORT | RING_VALID)); - - /* We are using separate values as placeholders for mechanisms for - * private backbuffer/depthbuffer usage. - */ - dev_priv->use_mi_batchbuffer_start = 0; + memset((void *)(dev_priv->ring.virtual_start), 0, dev_priv->ring.Size); + + I915_WRITE(LP_RING + RING_START, dev_priv->ring.Start); + I915_WRITE(LP_RING + RING_LEN, + ((dev_priv->ring.Size - 4096) & RING_NR_PAGES) | + (RING_NO_REPORT | RING_VALID)); + + /* We are using separate values as placeholders for mechanisms for + * private backbuffer/depthbuffer usage. + */ + dev_priv->use_mi_batchbuffer_start = 0; + + /* Allow hardware batchbuffers unless told otherwise. + */ + dev_priv->allow_batchbuffer = 1; + + /* Program Hardware Status Page */ + if (!IS_G33(dev)) { + dev_priv->status_page_dmah = + drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); + + if (!dev_priv->status_page_dmah) { + dev->dev_private = (void *)dev_priv; + i915_dma_cleanup(dev); + DRM_ERROR("Can not allocate hardware status page\n"); + return -ENOMEM; + } + dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; + dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; + + memset(dev_priv->hw_status_page, 0, PAGE_SIZE); + + I915_WRITE(I915REG_HWS_PGA, dev_priv->dma_status_page); + } + DRM_DEBUG("Enabled hardware status page\n"); - /* Allow hardware batchbuffers unless told otherwise. - */ - dev_priv->allow_batchbuffer = 1; - - /* Program Hardware Status Page */ - if (!IS_G33(dev)) { - dev_priv->status_page_dmah = - drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); - - if (!dev_priv->status_page_dmah) { - dev->dev_private = (void *)dev_priv; - i915_dma_cleanup(dev); - DRM_ERROR("Can not allocate hardware status page\n"); - return -ENOMEM; + dev_priv->wq = create_singlethread_workqueue("i915"); + if (dev_priv == 0) { + DRM_DEBUG("Error\n"); } - dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; - dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; - memset(dev_priv->hw_status_page, 0, PAGE_SIZE); + intel_modeset_init(dev); + drm_initial_config(dev, false); - I915_WRITE(I915REG_HWS_PGA, dev_priv->dma_status_page); - } - DRM_DEBUG("Enabled hardware status page\n"); + drm_mm_print(&dev->bm.man[DRM_BO_MEM_VRAM].manager, "VRAM"); + drm_mm_print(&dev->bm.man[DRM_BO_MEM_TT].manager, "TT"); - dev_priv->wq = create_singlethread_workqueue("i915"); - if (dev_priv == 0) { - DRM_DEBUG("Error\n"); + drm_irq_install(dev); } - - intel_modeset_init(dev); - drm_initial_config(dev, false); - - drm_mm_print(&dev->bm.man[DRM_BO_MEM_VRAM].manager, "VRAM"); - drm_mm_print(&dev->bm.man[DRM_BO_MEM_TT].manager, "TT"); - - drm_irq_install(dev); - return 0; } @@ -262,7 +269,11 @@ int i915_driver_unload(struct drm_device *dev) I915_WRITE(LP_RING + RING_LEN, 0); - intel_modeset_cleanup(dev); + + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + drm_irq_uninstall(dev); + intel_modeset_cleanup(dev); + } #if 0 if (dev_priv->ring.virtual_start) { @@ -298,25 +309,33 @@ int i915_driver_unload(struct drm_device *dev) I915_WRITE(I915REG_HWS_PGA, 0x1ffff000); } - drm_mem_reg_iounmap(dev, &dev_priv->ring_buffer->mem, - dev_priv->ring.virtual_start); + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + drm_mem_reg_iounmap(dev, &dev_priv->ring_buffer->mem, + dev_priv->ring.virtual_start); - DRM_DEBUG("usage is %d\n", atomic_read(&dev_priv->ring_buffer->usage)); - mutex_lock(&dev->struct_mutex); - drm_bo_usage_deref_locked(&dev_priv->ring_buffer); + DRM_DEBUG("usage is %d\n", atomic_read(&dev_priv->ring_buffer->usage)); + mutex_lock(&dev->struct_mutex); + drm_bo_usage_deref_locked(&dev_priv->ring_buffer); - if (drm_bo_clean_mm(dev, DRM_BO_MEM_TT, 1)) { - DRM_ERROR("Memory manager type 3 not clean. " - "Delaying takedown\n"); - } - if (drm_bo_clean_mm(dev, DRM_BO_MEM_VRAM, 1)) { - DRM_ERROR("Memory manager type 3 not clean. " - "Delaying takedown\n"); + if (drm_bo_clean_mm(dev, DRM_BO_MEM_TT, 1)) { + DRM_ERROR("Memory manager type 3 not clean. " + "Delaying takedown\n"); + } + if (drm_bo_clean_mm(dev, DRM_BO_MEM_VRAM, 1)) { + DRM_ERROR("Memory manager type 3 not clean. " + "Delaying takedown\n"); + } + mutex_unlock(&dev->struct_mutex); } - mutex_unlock(&dev->struct_mutex); drm_bo_driver_finish(dev); +#ifdef __linux__ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) + intel_init_chipset_flush_compat(dev); +#endif +#endif + DRM_DEBUG("%p\n", dev_priv->mmio_map); drm_rmmap(dev, dev_priv->mmio_map); @@ -363,3 +382,32 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master) master->driver_priv = NULL; } + +void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + i915_mem_release(dev, file_priv, dev_priv->agp_heap); +} + +void i915_driver_lastclose(struct drm_device * dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return; + + if (dev_priv->agp_heap) + i915_mem_takedown(&(dev_priv->agp_heap)); + + i915_dma_cleanup(dev); +} + +int i915_driver_firstopen(struct drm_device *dev) +{ + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return 0; + + drm_bo_driver_init(dev); + return 0; +} diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c index dad6ef86..b9d137f4 100644 --- a/shared-core/i915_irq.c +++ b/shared-core/i915_irq.c @@ -415,6 +415,13 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int plane) if (i915_in_vblank(dev, pipe)) count++; #endif + /* count may be reset by other driver(e.g. 2D driver), + we have no way to know if it is wrapped or resetted + when count is zero. do a rough guess. + */ + if (count == 0 && dev->last_vblank[pipe] < dev->max_vblank_count/2) + dev->last_vblank[pipe] = 0; + return count; } |