From c2f80ecf4be09b5b9866d12e3b25cdcf7996b1f4 Mon Sep 17 00:00:00 2001 From: Patrice Mandin Date: Fri, 9 Nov 2007 18:08:08 +0100 Subject: suspend() and resume() need kernel 2.6.22 or later --- linux-core/drm_sysfs.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'linux-core') diff --git a/linux-core/drm_sysfs.c b/linux-core/drm_sysfs.c index 6f8623ce..caec120a 100644 --- a/linux-core/drm_sysfs.c +++ b/linux-core/drm_sysfs.c @@ -89,8 +89,10 @@ struct class *drm_sysfs_create(struct module *owner, char *name) goto err_out; } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)) class->suspend = drm_sysfs_suspend; class->resume = drm_sysfs_resume; +#endif err = class_create_file(class, &class_attr_version); if (err) -- cgit v1.2.3 From 793cd1dad5e248509a1b09dce7126f236efadb3e Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Sat, 10 Nov 2007 14:39:36 -0800 Subject: Make sure PLLs are enabled before writing pipe configuration regs Fix from the X driver. Make sure the PLLs are enabled and not in VGA mode before writing PIPE(A|B)CONF regs to avoid hangs or crashes. --- linux-core/i915_drv.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'linux-core') diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c index 798491ae..d2bcf4b5 100644 --- a/linux-core/i915_drv.c +++ b/linux-core/i915_drv.c @@ -431,7 +431,11 @@ static int i915_resume(struct drm_device *dev) I915_WRITE(DSPASURF, dev_priv->saveDSPASURF); I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF); } - I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF); + + if ((dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) && + (dev_priv->saveDPLL_A & DPLL_VGA_MODE_DIS)) + I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF); + i915_restore_palette(dev, PIPE_A); /* Enable the plane */ I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR); @@ -471,7 +475,10 @@ static int i915_resume(struct drm_device *dev) I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF); I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); } - I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF); + + if ((dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) && + (dev_priv->saveDPLL_B & DPLL_VGA_MODE_DIS)) + I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF); i915_restore_palette(dev, PIPE_A); /* Enable the plane */ I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); -- cgit v1.2.3 From 2370ded79b4176d76cda1ec5f495fd33c2d566ed Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 9 Nov 2007 04:27:23 +1100 Subject: nouveau: stub superioctl --- linux-core/Makefile.kernel | 2 +- linux-core/nouveau_bo.c | 265 ++++++++++++++++++++++++++++++++++++++++++++ linux-core/nouveau_buffer.c | 256 ------------------------------------------ 3 files changed, 266 insertions(+), 257 deletions(-) create mode 100644 linux-core/nouveau_bo.c delete mode 100644 linux-core/nouveau_buffer.c (limited to 'linux-core') diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index e7c280d0..92e1eb7e 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -23,7 +23,7 @@ i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \ i915_buffer.o i915_compat.o nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \ - nouveau_sgdma.o nouveau_dma.o nouveau_buffer.o nouveau_fence.o \ + nouveau_sgdma.o nouveau_dma.o nouveau_bo.o nouveau_fence.o \ nv04_timer.o \ nv04_mc.o nv40_mc.o nv50_mc.o \ nv04_fb.o nv10_fb.o nv40_fb.o \ diff --git a/linux-core/nouveau_bo.c b/linux-core/nouveau_bo.c new file mode 100644 index 00000000..f0b0576c --- /dev/null +++ b/linux-core/nouveau_bo.c @@ -0,0 +1,265 @@ +/* + * Copyright 2007 Dave Airlied + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +/* + * Authors: Dave Airlied + * Ben Skeggs + * Jeremy Kolb + */ + +#include "drmP.h" +#include "nouveau_drm.h" +#include "nouveau_drv.h" +#include "nouveau_dma.h" + +static struct drm_ttm_backend * +nouveau_bo_create_ttm_backend_entry(struct drm_device * dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + switch (dev_priv->gart_info.type) { + case NOUVEAU_GART_AGP: + return drm_agp_init_ttm(dev); + case NOUVEAU_GART_SGDMA: + return nouveau_sgdma_init_ttm(dev); + default: + DRM_ERROR("Unknown GART type %d\n", dev_priv->gart_info.type); + break; + } + + return NULL; +} + +static int +nouveau_bo_fence_type(struct drm_buffer_object *bo, + uint32_t *fclass, uint32_t *type) +{ + /* When we get called, *fclass is set to the requested fence class */ + + if (bo->mem.mask & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) + *type = 3; + else + *type = 1; + return 0; + +} + +static int +nouveau_bo_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags) +{ + /* We'll do this from user space. */ + return 0; +} + +static int +nouveau_bo_init_mem_type(struct drm_device *dev, uint32_t type, + struct drm_mem_type_manager *man) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + switch (type) { + case DRM_BO_MEM_LOCAL: + man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | + _DRM_FLAG_MEMTYPE_CACHED; + man->drm_bus_maptype = 0; + break; + case DRM_BO_MEM_VRAM: + man->flags = _DRM_FLAG_MEMTYPE_FIXED | + _DRM_FLAG_MEMTYPE_MAPPABLE | + _DRM_FLAG_NEEDS_IOREMAP; + man->io_addr = NULL; + man->drm_bus_maptype = _DRM_FRAME_BUFFER; + man->io_offset = drm_get_resource_start(dev, 1); + man->io_size = drm_get_resource_len(dev, 1); + if (man->io_size > nouveau_mem_fb_amount(dev)) + man->io_size = nouveau_mem_fb_amount(dev); + break; + case DRM_BO_MEM_PRIV0: + /* Unmappable VRAM */ + man->flags = _DRM_FLAG_MEMTYPE_CMA; + man->drm_bus_maptype = 0; + break; + case DRM_BO_MEM_TT: + switch (dev_priv->gart_info.type) { + case NOUVEAU_GART_AGP: + man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | + _DRM_FLAG_MEMTYPE_CSELECT | + _DRM_FLAG_NEEDS_IOREMAP; + man->drm_bus_maptype = _DRM_AGP; + break; + case NOUVEAU_GART_SGDMA: + man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | + _DRM_FLAG_MEMTYPE_CSELECT | + _DRM_FLAG_MEMTYPE_CMA; + man->drm_bus_maptype = _DRM_SCATTER_GATHER; + break; + default: + DRM_ERROR("Unknown GART type: %d\n", + dev_priv->gart_info.type); + return -EINVAL; + } + + man->io_offset = dev_priv->gart_info.aper_base; + man->io_size = dev_priv->gart_info.aper_size; + man->io_addr = NULL; + break; + default: + DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); + return -EINVAL; + } + return 0; +} + +static uint32_t +nouveau_bo_evict_mask(struct drm_buffer_object *bo) +{ + switch (bo->mem.mem_type) { + case DRM_BO_MEM_LOCAL: + case DRM_BO_MEM_TT: + return DRM_BO_FLAG_MEM_LOCAL; + default: + return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED; + } + return 0; +} + +/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access + * DRM_BO_MEM_{VRAM,PRIV0,TT} directly. + */ +static int +nouveau_bo_move_m2mf(struct drm_buffer_object *bo, int evict, int no_wait, + struct drm_bo_mem_reg *new_mem) +{ + struct drm_device *dev = bo->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_drm_channel *dchan = &dev_priv->channel; + struct drm_bo_mem_reg *old_mem = &bo->mem; + uint32_t srch, dsth, page_count; + + /* Can happen during init/takedown */ + if (!dchan->chan) + return -EINVAL; + + srch = old_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB; + dsth = new_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB; + if (srch != dchan->m2mf_dma_source || dsth != dchan->m2mf_dma_destin) { + dchan->m2mf_dma_source = srch; + dchan->m2mf_dma_destin = dsth; + + BEGIN_RING(NvSubM2MF, + NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE, 2); + OUT_RING (dchan->m2mf_dma_source); + OUT_RING (dchan->m2mf_dma_destin); + } + + page_count = new_mem->num_pages; + while (page_count) { + int line_count = (page_count > 2047) ? 2047 : page_count; + + BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); + OUT_RING (old_mem->mm_node->start << PAGE_SHIFT); + OUT_RING (new_mem->mm_node->start << PAGE_SHIFT); + OUT_RING (PAGE_SIZE); /* src_pitch */ + OUT_RING (PAGE_SIZE); /* dst_pitch */ + OUT_RING (PAGE_SIZE); /* line_length */ + OUT_RING (line_count); + OUT_RING ((1<<8)|(1<<0)); + OUT_RING (0); + BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); + OUT_RING (0); + + page_count -= line_count; + } + + return drm_bo_move_accel_cleanup(bo, evict, no_wait, dchan->chan->id, + DRM_FENCE_TYPE_EXE, 0, new_mem); +} + +static int +nouveau_bo_move(struct drm_buffer_object *bo, int evict, int no_wait, + struct drm_bo_mem_reg *new_mem) +{ + struct drm_bo_mem_reg *old_mem = &bo->mem; + + if (new_mem->mem_type == DRM_BO_MEM_LOCAL) { + if (old_mem->mem_type == DRM_BO_MEM_LOCAL) + return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); +#if 0 + if (!nouveau_bo_move_flipd(bo, evict, no_wait, new_mem)) +#endif + return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); + } + else + if (old_mem->mem_type == DRM_BO_MEM_LOCAL) { +#if 0 + if (nouveau_bo_move_flips(bo, evict, no_wait, new_mem)) +#endif + return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); + } + else { +// if (nouveau_bo_move_m2mf(bo, evict, no_wait, new_mem)) + return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); + } + return 0; +} + +static void +nouveau_bo_flush_ttm(struct drm_ttm *ttm) +{ +} + +static uint32_t nouveau_mem_prios[] = { + DRM_BO_MEM_PRIV0, + DRM_BO_MEM_VRAM, + DRM_BO_MEM_TT, + DRM_BO_MEM_LOCAL +}; +static uint32_t nouveau_busy_prios[] = { + DRM_BO_MEM_TT, + DRM_BO_MEM_PRIV0, + DRM_BO_MEM_VRAM, + DRM_BO_MEM_LOCAL +}; + +struct drm_bo_driver nouveau_bo_driver = { + .mem_type_prio = nouveau_mem_prios, + .mem_busy_prio = nouveau_busy_prios, + .num_mem_type_prio = sizeof(nouveau_mem_prios)/sizeof(uint32_t), + .num_mem_busy_prio = sizeof(nouveau_busy_prios)/sizeof(uint32_t), + .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry, + .fence_type = nouveau_bo_fence_type, + .invalidate_caches = nouveau_bo_invalidate_caches, + .init_mem_type = nouveau_bo_init_mem_type, + .evict_mask = nouveau_bo_evict_mask, + .move = nouveau_bo_move, + .ttm_cache_flush= nouveau_bo_flush_ttm +}; + +int +nouveau_bo_validate(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + DRM_ERROR("unimplemented\n"); + return -EINVAL; +} + diff --git a/linux-core/nouveau_buffer.c b/linux-core/nouveau_buffer.c deleted file mode 100644 index c40dff6b..00000000 --- a/linux-core/nouveau_buffer.c +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright 2007 Dave Airlied - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -/* - * Authors: Dave Airlied - * Ben Skeggs - * Jeremy Kolb - */ - -#include "drmP.h" -#include "nouveau_drm.h" -#include "nouveau_drv.h" -#include "nouveau_dma.h" - -static struct drm_ttm_backend * -nouveau_bo_create_ttm_backend_entry(struct drm_device * dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - - switch (dev_priv->gart_info.type) { - case NOUVEAU_GART_AGP: - return drm_agp_init_ttm(dev); - case NOUVEAU_GART_SGDMA: - return nouveau_sgdma_init_ttm(dev); - default: - DRM_ERROR("Unknown GART type %d\n", dev_priv->gart_info.type); - break; - } - - return NULL; -} - -static int -nouveau_bo_fence_type(struct drm_buffer_object *bo, - uint32_t *fclass, uint32_t *type) -{ - /* When we get called, *fclass is set to the requested fence class */ - - if (bo->mem.mask & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) - *type = 3; - else - *type = 1; - return 0; - -} - -static int -nouveau_bo_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags) -{ - /* We'll do this from user space. */ - return 0; -} - -static int -nouveau_bo_init_mem_type(struct drm_device *dev, uint32_t type, - struct drm_mem_type_manager *man) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - - switch (type) { - case DRM_BO_MEM_LOCAL: - man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | - _DRM_FLAG_MEMTYPE_CACHED; - man->drm_bus_maptype = 0; - break; - case DRM_BO_MEM_VRAM: - man->flags = _DRM_FLAG_MEMTYPE_FIXED | - _DRM_FLAG_MEMTYPE_MAPPABLE | - _DRM_FLAG_NEEDS_IOREMAP; - man->io_addr = NULL; - man->drm_bus_maptype = _DRM_FRAME_BUFFER; - man->io_offset = drm_get_resource_start(dev, 1); - man->io_size = drm_get_resource_len(dev, 1); - if (man->io_size > nouveau_mem_fb_amount(dev)) - man->io_size = nouveau_mem_fb_amount(dev); - break; - case DRM_BO_MEM_PRIV0: - /* Unmappable VRAM */ - man->flags = _DRM_FLAG_MEMTYPE_CMA; - man->drm_bus_maptype = 0; - break; - case DRM_BO_MEM_TT: - switch (dev_priv->gart_info.type) { - case NOUVEAU_GART_AGP: - man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | - _DRM_FLAG_MEMTYPE_CSELECT | - _DRM_FLAG_NEEDS_IOREMAP; - man->drm_bus_maptype = _DRM_AGP; - break; - case NOUVEAU_GART_SGDMA: - man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | - _DRM_FLAG_MEMTYPE_CSELECT | - _DRM_FLAG_MEMTYPE_CMA; - man->drm_bus_maptype = _DRM_SCATTER_GATHER; - break; - default: - DRM_ERROR("Unknown GART type: %d\n", - dev_priv->gart_info.type); - return -EINVAL; - } - - man->io_offset = dev_priv->gart_info.aper_base; - man->io_size = dev_priv->gart_info.aper_size; - man->io_addr = NULL; - break; - default: - DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); - return -EINVAL; - } - return 0; -} - -static uint32_t -nouveau_bo_evict_mask(struct drm_buffer_object *bo) -{ - switch (bo->mem.mem_type) { - case DRM_BO_MEM_LOCAL: - case DRM_BO_MEM_TT: - return DRM_BO_FLAG_MEM_LOCAL; - default: - return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED; - } - return 0; -} - -/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access - * DRM_BO_MEM_{VRAM,PRIV0,TT} directly. - */ -static int -nouveau_bo_move_m2mf(struct drm_buffer_object *bo, int evict, int no_wait, - struct drm_bo_mem_reg *new_mem) -{ - struct drm_device *dev = bo->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_drm_channel *dchan = &dev_priv->channel; - struct drm_bo_mem_reg *old_mem = &bo->mem; - uint32_t srch, dsth, page_count; - - /* Can happen during init/takedown */ - if (!dchan->chan) - return -EINVAL; - - srch = old_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB; - dsth = new_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB; - if (srch != dchan->m2mf_dma_source || dsth != dchan->m2mf_dma_destin) { - dchan->m2mf_dma_source = srch; - dchan->m2mf_dma_destin = dsth; - - BEGIN_RING(NvSubM2MF, - NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE, 2); - OUT_RING (dchan->m2mf_dma_source); - OUT_RING (dchan->m2mf_dma_destin); - } - - page_count = new_mem->num_pages; - while (page_count) { - int line_count = (page_count > 2047) ? 2047 : page_count; - - BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); - OUT_RING (old_mem->mm_node->start << PAGE_SHIFT); - OUT_RING (new_mem->mm_node->start << PAGE_SHIFT); - OUT_RING (PAGE_SIZE); /* src_pitch */ - OUT_RING (PAGE_SIZE); /* dst_pitch */ - OUT_RING (PAGE_SIZE); /* line_length */ - OUT_RING (line_count); - OUT_RING ((1<<8)|(1<<0)); - OUT_RING (0); - BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); - OUT_RING (0); - - page_count -= line_count; - } - - return drm_bo_move_accel_cleanup(bo, evict, no_wait, dchan->chan->id, - DRM_FENCE_TYPE_EXE, 0, new_mem); -} - -static int -nouveau_bo_move(struct drm_buffer_object *bo, int evict, int no_wait, - struct drm_bo_mem_reg *new_mem) -{ - struct drm_bo_mem_reg *old_mem = &bo->mem; - - if (new_mem->mem_type == DRM_BO_MEM_LOCAL) { - if (old_mem->mem_type == DRM_BO_MEM_LOCAL) - return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); -#if 0 - if (!nouveau_bo_move_flipd(bo, evict, no_wait, new_mem)) -#endif - return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); - } - else - if (old_mem->mem_type == DRM_BO_MEM_LOCAL) { -#if 0 - if (nouveau_bo_move_flips(bo, evict, no_wait, new_mem)) -#endif - return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); - } - else { -// if (nouveau_bo_move_m2mf(bo, evict, no_wait, new_mem)) - return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); - } - return 0; -} - -static void -nouveau_bo_flush_ttm(struct drm_ttm *ttm) -{ -} - -static uint32_t nouveau_mem_prios[] = { - DRM_BO_MEM_PRIV0, - DRM_BO_MEM_VRAM, - DRM_BO_MEM_TT, - DRM_BO_MEM_LOCAL -}; -static uint32_t nouveau_busy_prios[] = { - DRM_BO_MEM_TT, - DRM_BO_MEM_PRIV0, - DRM_BO_MEM_VRAM, - DRM_BO_MEM_LOCAL -}; - -struct drm_bo_driver nouveau_bo_driver = { - .mem_type_prio = nouveau_mem_prios, - .mem_busy_prio = nouveau_busy_prios, - .num_mem_type_prio = sizeof(nouveau_mem_prios)/sizeof(uint32_t), - .num_mem_busy_prio = sizeof(nouveau_busy_prios)/sizeof(uint32_t), - .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry, - .fence_type = nouveau_bo_fence_type, - .invalidate_caches = nouveau_bo_invalidate_caches, - .init_mem_type = nouveau_bo_init_mem_type, - .evict_mask = nouveau_bo_evict_mask, - .move = nouveau_bo_move, - .ttm_cache_flush= nouveau_bo_flush_ttm -}; -- cgit v1.2.3 From 7246a33dd104903bc9227628270712ea9e6168d8 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 14 Nov 2007 04:05:48 +1100 Subject: nouveau: store user control reg offsets in channel struct --- linux-core/nouveau_fence.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/nouveau_fence.c b/linux-core/nouveau_fence.c index b3e81a89..4e624a7a 100644 --- a/linux-core/nouveau_fence.c +++ b/linux-core/nouveau_fence.c @@ -79,6 +79,7 @@ nouveau_fence_perform_flush(struct drm_device *dev, uint32_t class) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_fence_class_manager *fc = &dev->fm.fence_class[class]; + struct nouveau_channel *chan = dev_priv->fifos[class]; uint32_t pending_types = 0; DRM_DEBUG("class=%d\n", class); @@ -89,7 +90,7 @@ nouveau_fence_perform_flush(struct drm_device *dev, uint32_t class) fc->pending_flush); if (pending_types) { - uint32_t sequence = NV_READ(NV03_FIFO_REGS(class) + 0x48); + uint32_t sequence = NV_READ(chan->ref_cnt); DRM_DEBUG("got 0x%08x\n", sequence); drm_fence_handler(dev, class, sequence, pending_types, 0); -- cgit v1.2.3 From 7e4bb6099a492b90374565aa574ba65f19ae2ab2 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 14 Nov 2007 05:11:11 +1100 Subject: Revert "nouveau: stub superioctl" This reverts commit 2370ded79b4176d76cda1ec5f495fd33c2d566ed. Err.. didn't mean for that to slip in :) --- linux-core/Makefile.kernel | 2 +- linux-core/nouveau_bo.c | 265 -------------------------------------------- linux-core/nouveau_buffer.c | 256 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 257 insertions(+), 266 deletions(-) delete mode 100644 linux-core/nouveau_bo.c create mode 100644 linux-core/nouveau_buffer.c (limited to 'linux-core') diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 92e1eb7e..e7c280d0 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -23,7 +23,7 @@ i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \ i915_buffer.o i915_compat.o nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \ - nouveau_sgdma.o nouveau_dma.o nouveau_bo.o nouveau_fence.o \ + nouveau_sgdma.o nouveau_dma.o nouveau_buffer.o nouveau_fence.o \ nv04_timer.o \ nv04_mc.o nv40_mc.o nv50_mc.o \ nv04_fb.o nv10_fb.o nv40_fb.o \ diff --git a/linux-core/nouveau_bo.c b/linux-core/nouveau_bo.c deleted file mode 100644 index f0b0576c..00000000 --- a/linux-core/nouveau_bo.c +++ /dev/null @@ -1,265 +0,0 @@ -/* - * Copyright 2007 Dave Airlied - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -/* - * Authors: Dave Airlied - * Ben Skeggs - * Jeremy Kolb - */ - -#include "drmP.h" -#include "nouveau_drm.h" -#include "nouveau_drv.h" -#include "nouveau_dma.h" - -static struct drm_ttm_backend * -nouveau_bo_create_ttm_backend_entry(struct drm_device * dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - - switch (dev_priv->gart_info.type) { - case NOUVEAU_GART_AGP: - return drm_agp_init_ttm(dev); - case NOUVEAU_GART_SGDMA: - return nouveau_sgdma_init_ttm(dev); - default: - DRM_ERROR("Unknown GART type %d\n", dev_priv->gart_info.type); - break; - } - - return NULL; -} - -static int -nouveau_bo_fence_type(struct drm_buffer_object *bo, - uint32_t *fclass, uint32_t *type) -{ - /* When we get called, *fclass is set to the requested fence class */ - - if (bo->mem.mask & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) - *type = 3; - else - *type = 1; - return 0; - -} - -static int -nouveau_bo_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags) -{ - /* We'll do this from user space. */ - return 0; -} - -static int -nouveau_bo_init_mem_type(struct drm_device *dev, uint32_t type, - struct drm_mem_type_manager *man) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - - switch (type) { - case DRM_BO_MEM_LOCAL: - man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | - _DRM_FLAG_MEMTYPE_CACHED; - man->drm_bus_maptype = 0; - break; - case DRM_BO_MEM_VRAM: - man->flags = _DRM_FLAG_MEMTYPE_FIXED | - _DRM_FLAG_MEMTYPE_MAPPABLE | - _DRM_FLAG_NEEDS_IOREMAP; - man->io_addr = NULL; - man->drm_bus_maptype = _DRM_FRAME_BUFFER; - man->io_offset = drm_get_resource_start(dev, 1); - man->io_size = drm_get_resource_len(dev, 1); - if (man->io_size > nouveau_mem_fb_amount(dev)) - man->io_size = nouveau_mem_fb_amount(dev); - break; - case DRM_BO_MEM_PRIV0: - /* Unmappable VRAM */ - man->flags = _DRM_FLAG_MEMTYPE_CMA; - man->drm_bus_maptype = 0; - break; - case DRM_BO_MEM_TT: - switch (dev_priv->gart_info.type) { - case NOUVEAU_GART_AGP: - man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | - _DRM_FLAG_MEMTYPE_CSELECT | - _DRM_FLAG_NEEDS_IOREMAP; - man->drm_bus_maptype = _DRM_AGP; - break; - case NOUVEAU_GART_SGDMA: - man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | - _DRM_FLAG_MEMTYPE_CSELECT | - _DRM_FLAG_MEMTYPE_CMA; - man->drm_bus_maptype = _DRM_SCATTER_GATHER; - break; - default: - DRM_ERROR("Unknown GART type: %d\n", - dev_priv->gart_info.type); - return -EINVAL; - } - - man->io_offset = dev_priv->gart_info.aper_base; - man->io_size = dev_priv->gart_info.aper_size; - man->io_addr = NULL; - break; - default: - DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); - return -EINVAL; - } - return 0; -} - -static uint32_t -nouveau_bo_evict_mask(struct drm_buffer_object *bo) -{ - switch (bo->mem.mem_type) { - case DRM_BO_MEM_LOCAL: - case DRM_BO_MEM_TT: - return DRM_BO_FLAG_MEM_LOCAL; - default: - return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED; - } - return 0; -} - -/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access - * DRM_BO_MEM_{VRAM,PRIV0,TT} directly. - */ -static int -nouveau_bo_move_m2mf(struct drm_buffer_object *bo, int evict, int no_wait, - struct drm_bo_mem_reg *new_mem) -{ - struct drm_device *dev = bo->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_drm_channel *dchan = &dev_priv->channel; - struct drm_bo_mem_reg *old_mem = &bo->mem; - uint32_t srch, dsth, page_count; - - /* Can happen during init/takedown */ - if (!dchan->chan) - return -EINVAL; - - srch = old_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB; - dsth = new_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB; - if (srch != dchan->m2mf_dma_source || dsth != dchan->m2mf_dma_destin) { - dchan->m2mf_dma_source = srch; - dchan->m2mf_dma_destin = dsth; - - BEGIN_RING(NvSubM2MF, - NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE, 2); - OUT_RING (dchan->m2mf_dma_source); - OUT_RING (dchan->m2mf_dma_destin); - } - - page_count = new_mem->num_pages; - while (page_count) { - int line_count = (page_count > 2047) ? 2047 : page_count; - - BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); - OUT_RING (old_mem->mm_node->start << PAGE_SHIFT); - OUT_RING (new_mem->mm_node->start << PAGE_SHIFT); - OUT_RING (PAGE_SIZE); /* src_pitch */ - OUT_RING (PAGE_SIZE); /* dst_pitch */ - OUT_RING (PAGE_SIZE); /* line_length */ - OUT_RING (line_count); - OUT_RING ((1<<8)|(1<<0)); - OUT_RING (0); - BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); - OUT_RING (0); - - page_count -= line_count; - } - - return drm_bo_move_accel_cleanup(bo, evict, no_wait, dchan->chan->id, - DRM_FENCE_TYPE_EXE, 0, new_mem); -} - -static int -nouveau_bo_move(struct drm_buffer_object *bo, int evict, int no_wait, - struct drm_bo_mem_reg *new_mem) -{ - struct drm_bo_mem_reg *old_mem = &bo->mem; - - if (new_mem->mem_type == DRM_BO_MEM_LOCAL) { - if (old_mem->mem_type == DRM_BO_MEM_LOCAL) - return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); -#if 0 - if (!nouveau_bo_move_flipd(bo, evict, no_wait, new_mem)) -#endif - return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); - } - else - if (old_mem->mem_type == DRM_BO_MEM_LOCAL) { -#if 0 - if (nouveau_bo_move_flips(bo, evict, no_wait, new_mem)) -#endif - return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); - } - else { -// if (nouveau_bo_move_m2mf(bo, evict, no_wait, new_mem)) - return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); - } - return 0; -} - -static void -nouveau_bo_flush_ttm(struct drm_ttm *ttm) -{ -} - -static uint32_t nouveau_mem_prios[] = { - DRM_BO_MEM_PRIV0, - DRM_BO_MEM_VRAM, - DRM_BO_MEM_TT, - DRM_BO_MEM_LOCAL -}; -static uint32_t nouveau_busy_prios[] = { - DRM_BO_MEM_TT, - DRM_BO_MEM_PRIV0, - DRM_BO_MEM_VRAM, - DRM_BO_MEM_LOCAL -}; - -struct drm_bo_driver nouveau_bo_driver = { - .mem_type_prio = nouveau_mem_prios, - .mem_busy_prio = nouveau_busy_prios, - .num_mem_type_prio = sizeof(nouveau_mem_prios)/sizeof(uint32_t), - .num_mem_busy_prio = sizeof(nouveau_busy_prios)/sizeof(uint32_t), - .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry, - .fence_type = nouveau_bo_fence_type, - .invalidate_caches = nouveau_bo_invalidate_caches, - .init_mem_type = nouveau_bo_init_mem_type, - .evict_mask = nouveau_bo_evict_mask, - .move = nouveau_bo_move, - .ttm_cache_flush= nouveau_bo_flush_ttm -}; - -int -nouveau_bo_validate(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - DRM_ERROR("unimplemented\n"); - return -EINVAL; -} - diff --git a/linux-core/nouveau_buffer.c b/linux-core/nouveau_buffer.c new file mode 100644 index 00000000..c40dff6b --- /dev/null +++ b/linux-core/nouveau_buffer.c @@ -0,0 +1,256 @@ +/* + * Copyright 2007 Dave Airlied + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +/* + * Authors: Dave Airlied + * Ben Skeggs + * Jeremy Kolb + */ + +#include "drmP.h" +#include "nouveau_drm.h" +#include "nouveau_drv.h" +#include "nouveau_dma.h" + +static struct drm_ttm_backend * +nouveau_bo_create_ttm_backend_entry(struct drm_device * dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + switch (dev_priv->gart_info.type) { + case NOUVEAU_GART_AGP: + return drm_agp_init_ttm(dev); + case NOUVEAU_GART_SGDMA: + return nouveau_sgdma_init_ttm(dev); + default: + DRM_ERROR("Unknown GART type %d\n", dev_priv->gart_info.type); + break; + } + + return NULL; +} + +static int +nouveau_bo_fence_type(struct drm_buffer_object *bo, + uint32_t *fclass, uint32_t *type) +{ + /* When we get called, *fclass is set to the requested fence class */ + + if (bo->mem.mask & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) + *type = 3; + else + *type = 1; + return 0; + +} + +static int +nouveau_bo_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags) +{ + /* We'll do this from user space. */ + return 0; +} + +static int +nouveau_bo_init_mem_type(struct drm_device *dev, uint32_t type, + struct drm_mem_type_manager *man) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + switch (type) { + case DRM_BO_MEM_LOCAL: + man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | + _DRM_FLAG_MEMTYPE_CACHED; + man->drm_bus_maptype = 0; + break; + case DRM_BO_MEM_VRAM: + man->flags = _DRM_FLAG_MEMTYPE_FIXED | + _DRM_FLAG_MEMTYPE_MAPPABLE | + _DRM_FLAG_NEEDS_IOREMAP; + man->io_addr = NULL; + man->drm_bus_maptype = _DRM_FRAME_BUFFER; + man->io_offset = drm_get_resource_start(dev, 1); + man->io_size = drm_get_resource_len(dev, 1); + if (man->io_size > nouveau_mem_fb_amount(dev)) + man->io_size = nouveau_mem_fb_amount(dev); + break; + case DRM_BO_MEM_PRIV0: + /* Unmappable VRAM */ + man->flags = _DRM_FLAG_MEMTYPE_CMA; + man->drm_bus_maptype = 0; + break; + case DRM_BO_MEM_TT: + switch (dev_priv->gart_info.type) { + case NOUVEAU_GART_AGP: + man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | + _DRM_FLAG_MEMTYPE_CSELECT | + _DRM_FLAG_NEEDS_IOREMAP; + man->drm_bus_maptype = _DRM_AGP; + break; + case NOUVEAU_GART_SGDMA: + man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | + _DRM_FLAG_MEMTYPE_CSELECT | + _DRM_FLAG_MEMTYPE_CMA; + man->drm_bus_maptype = _DRM_SCATTER_GATHER; + break; + default: + DRM_ERROR("Unknown GART type: %d\n", + dev_priv->gart_info.type); + return -EINVAL; + } + + man->io_offset = dev_priv->gart_info.aper_base; + man->io_size = dev_priv->gart_info.aper_size; + man->io_addr = NULL; + break; + default: + DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); + return -EINVAL; + } + return 0; +} + +static uint32_t +nouveau_bo_evict_mask(struct drm_buffer_object *bo) +{ + switch (bo->mem.mem_type) { + case DRM_BO_MEM_LOCAL: + case DRM_BO_MEM_TT: + return DRM_BO_FLAG_MEM_LOCAL; + default: + return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED; + } + return 0; +} + +/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access + * DRM_BO_MEM_{VRAM,PRIV0,TT} directly. + */ +static int +nouveau_bo_move_m2mf(struct drm_buffer_object *bo, int evict, int no_wait, + struct drm_bo_mem_reg *new_mem) +{ + struct drm_device *dev = bo->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_drm_channel *dchan = &dev_priv->channel; + struct drm_bo_mem_reg *old_mem = &bo->mem; + uint32_t srch, dsth, page_count; + + /* Can happen during init/takedown */ + if (!dchan->chan) + return -EINVAL; + + srch = old_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB; + dsth = new_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB; + if (srch != dchan->m2mf_dma_source || dsth != dchan->m2mf_dma_destin) { + dchan->m2mf_dma_source = srch; + dchan->m2mf_dma_destin = dsth; + + BEGIN_RING(NvSubM2MF, + NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE, 2); + OUT_RING (dchan->m2mf_dma_source); + OUT_RING (dchan->m2mf_dma_destin); + } + + page_count = new_mem->num_pages; + while (page_count) { + int line_count = (page_count > 2047) ? 2047 : page_count; + + BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); + OUT_RING (old_mem->mm_node->start << PAGE_SHIFT); + OUT_RING (new_mem->mm_node->start << PAGE_SHIFT); + OUT_RING (PAGE_SIZE); /* src_pitch */ + OUT_RING (PAGE_SIZE); /* dst_pitch */ + OUT_RING (PAGE_SIZE); /* line_length */ + OUT_RING (line_count); + OUT_RING ((1<<8)|(1<<0)); + OUT_RING (0); + BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); + OUT_RING (0); + + page_count -= line_count; + } + + return drm_bo_move_accel_cleanup(bo, evict, no_wait, dchan->chan->id, + DRM_FENCE_TYPE_EXE, 0, new_mem); +} + +static int +nouveau_bo_move(struct drm_buffer_object *bo, int evict, int no_wait, + struct drm_bo_mem_reg *new_mem) +{ + struct drm_bo_mem_reg *old_mem = &bo->mem; + + if (new_mem->mem_type == DRM_BO_MEM_LOCAL) { + if (old_mem->mem_type == DRM_BO_MEM_LOCAL) + return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); +#if 0 + if (!nouveau_bo_move_flipd(bo, evict, no_wait, new_mem)) +#endif + return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); + } + else + if (old_mem->mem_type == DRM_BO_MEM_LOCAL) { +#if 0 + if (nouveau_bo_move_flips(bo, evict, no_wait, new_mem)) +#endif + return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); + } + else { +// if (nouveau_bo_move_m2mf(bo, evict, no_wait, new_mem)) + return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); + } + return 0; +} + +static void +nouveau_bo_flush_ttm(struct drm_ttm *ttm) +{ +} + +static uint32_t nouveau_mem_prios[] = { + DRM_BO_MEM_PRIV0, + DRM_BO_MEM_VRAM, + DRM_BO_MEM_TT, + DRM_BO_MEM_LOCAL +}; +static uint32_t nouveau_busy_prios[] = { + DRM_BO_MEM_TT, + DRM_BO_MEM_PRIV0, + DRM_BO_MEM_VRAM, + DRM_BO_MEM_LOCAL +}; + +struct drm_bo_driver nouveau_bo_driver = { + .mem_type_prio = nouveau_mem_prios, + .mem_busy_prio = nouveau_busy_prios, + .num_mem_type_prio = sizeof(nouveau_mem_prios)/sizeof(uint32_t), + .num_mem_busy_prio = sizeof(nouveau_busy_prios)/sizeof(uint32_t), + .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry, + .fence_type = nouveau_bo_fence_type, + .invalidate_caches = nouveau_bo_invalidate_caches, + .init_mem_type = nouveau_bo_init_mem_type, + .evict_mask = nouveau_bo_evict_mask, + .move = nouveau_bo_move, + .ttm_cache_flush= nouveau_bo_flush_ttm +}; -- cgit v1.2.3 From 68cdcda1eaf02353f2ef2d637c6bf1003c849185 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kristian=20H=C3=B8gsberg?= Date: Wed, 14 Nov 2007 14:28:34 -0500 Subject: Add new shared header file drm_internal.h. This header file is shared across linux and bsd, but is not installed for user space to access. It's the place to put prototypes and data types that aren't platform or chipset specific, but still internal to the drm. --- linux-core/drmP.h | 10 +--------- linux-core/drm_internal.h | 1 + 2 files changed, 2 insertions(+), 9 deletions(-) create mode 120000 linux-core/drm_internal.h (limited to 'linux-core') diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 759d2575..356131eb 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -83,6 +83,7 @@ #include "drm_os_linux.h" #include "drm_hashtab.h" +#include "drm_internal.h" struct drm_file; @@ -586,15 +587,6 @@ struct drm_vbl_sig { struct task_struct *task; }; -/** - * Drawable information. - */ -struct drm_drawable_info { - unsigned int num_rects; - struct drm_clip_rect *rects; -}; - - /* location of GART table */ #define DRM_ATI_GART_MAIN 1 #define DRM_ATI_GART_FB 2 diff --git a/linux-core/drm_internal.h b/linux-core/drm_internal.h new file mode 120000 index 00000000..b30ef94a --- /dev/null +++ b/linux-core/drm_internal.h @@ -0,0 +1 @@ +../shared-core/drm_internal.h \ No newline at end of file -- cgit v1.2.3 From 2eee33ace5b647153a7cf20990efd12313cc8472 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Thu, 15 Nov 2007 13:29:55 +1100 Subject: intel: add flushing for i8xx chipsets. Add a nut vs hammer style chipset flush for the i8xx chipsets - reenable TTM code paths --- linux-core/i915_compat.c | 126 ++++++++++++++++++++++++++++++++++++----------- 1 file changed, 96 insertions(+), 30 deletions(-) (limited to 'linux-core') diff --git a/linux-core/i915_compat.c b/linux-core/i915_compat.c index b09cc9f2..e8890f67 100644 --- a/linux-core/i915_compat.c +++ b/linux-core/i915_compat.c @@ -19,10 +19,15 @@ #define I915_IFPADDR 0x60 #define I965_IFPADDR 0x70 -static struct _intel_private_compat { +static struct _i9xx_private_compat { void __iomem *flush_page; struct resource ifp_resource; -} intel_private; +} i9xx_private; + +static struct _i8xx_private_compat { + void *flush_page; + struct page *page; +} i8xx_private; static void intel_compat_align_resource(void *data, struct resource *res, @@ -35,7 +40,7 @@ intel_compat_align_resource(void *data, struct resource *res, static int intel_alloc_chipset_flush_resource(struct pci_dev *pdev) { int ret; - ret = pci_bus_alloc_resource(pdev->bus, &intel_private.ifp_resource, PAGE_SIZE, + ret = pci_bus_alloc_resource(pdev->bus, &i9xx_private.ifp_resource, PAGE_SIZE, PAGE_SIZE, PCIBIOS_MIN_MEM, 0, intel_compat_align_resource, pdev); if (ret != 0) @@ -53,15 +58,15 @@ static void intel_i915_setup_chipset_flush(struct pci_dev *pdev) if (!(temp & 0x1)) { intel_alloc_chipset_flush_resource(pdev); - pci_write_config_dword(pdev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); + pci_write_config_dword(pdev, I915_IFPADDR, (i9xx_private.ifp_resource.start & 0xffffffff) | 0x1); } else { temp &= ~1; - intel_private.ifp_resource.start = temp; - intel_private.ifp_resource.end = temp + PAGE_SIZE; - ret = request_resource(&iomem_resource, &intel_private.ifp_resource); + i9xx_private.ifp_resource.start = temp; + i9xx_private.ifp_resource.end = temp + PAGE_SIZE; + ret = request_resource(&iomem_resource, &i9xx_private.ifp_resource); if (ret) { - intel_private.ifp_resource.start = 0; + i9xx_private.ifp_resource.start = 0; printk("Failed inserting resource into tree\n"); } } @@ -79,34 +84,72 @@ static void intel_i965_g33_setup_chipset_flush(struct pci_dev *pdev) intel_alloc_chipset_flush_resource(pdev); - pci_write_config_dword(pdev, I965_IFPADDR + 4, (intel_private.ifp_resource.start >> 32)); - pci_write_config_dword(pdev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1); + pci_write_config_dword(pdev, I965_IFPADDR + 4, (i9xx_private.ifp_resource.start >> 32)); + pci_write_config_dword(pdev, I965_IFPADDR, (i9xx_private.ifp_resource.start & 0xffffffff) | 0x1); } else { u64 l64; temp_lo &= ~0x1; l64 = ((u64)temp_hi << 32) | temp_lo; - intel_private.ifp_resource.start = l64; - intel_private.ifp_resource.end = l64 + PAGE_SIZE; - ret = request_resource(&iomem_resource, &intel_private.ifp_resource); + i9xx_private.ifp_resource.start = l64; + i9xx_private.ifp_resource.end = l64 + PAGE_SIZE; + ret = request_resource(&iomem_resource, &i9xx_private.ifp_resource); if (!ret) { - intel_private.ifp_resource.start = 0; + i9xx_private.ifp_resource.start = 0; printk("Failed inserting resource into tree\n"); } } } -void intel_init_chipset_flush_compat(struct drm_device *dev) +static void intel_i8xx_fini_flush(struct drm_device *dev) { - struct pci_dev *agp_dev = dev->agp->agp_info.device; + kunmap(i8xx_private.page); + i8xx_private.flush_page = NULL; + unmap_page_from_agp(i8xx_private.page); + flush_agp_mappings(); - /* not flush on i8xx */ - if (!IS_I9XX(dev)) + __free_page(i8xx_private.page); +} + +static void intel_i8xx_setup_flush(struct drm_device *dev) +{ + + i8xx_private.page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); + if (!i8xx_private.page) { return; + } - intel_private.ifp_resource.name = "GMCH IFPBAR"; - intel_private.ifp_resource.flags = IORESOURCE_MEM; + /* make page uncached */ + map_page_into_agp(i8xx_private.page); + flush_agp_mappings(); + + i8xx_private.flush_page = kmap(i8xx_private.page); + if (!i8xx_private.flush_page) + intel_i8xx_fini_flush(dev); + + DRM_ERROR("i8xx got flush page %p %p\n", i8xx_private.page, i8xx_private.flush_page); +} + + +static void intel_i8xx_flush_page(struct drm_device *dev) +{ + unsigned int *pg = i8xx_private.flush_page; + int i; + + /* HAI NUT CAN I HAZ HAMMER?? */ + for (i = 0; i < 256; i++) + *(pg + i) = i; + + DRM_MEMORYBARRIER(); +} + +static void intel_i9xx_setup_flush(struct drm_device *dev) +{ + struct pci_dev *agp_dev = dev->agp->agp_info.device; + + i9xx_private.ifp_resource.name = "GMCH IFPBAR"; + i9xx_private.ifp_resource.flags = IORESOURCE_MEM; /* Setup chipset flush for 915 */ if (IS_I965G(dev) || IS_G33(dev)) { @@ -115,26 +158,49 @@ void intel_init_chipset_flush_compat(struct drm_device *dev) intel_i915_setup_chipset_flush(agp_dev); } - if (intel_private.ifp_resource.start) { - intel_private.flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); - if (!intel_private.flush_page) + if (i9xx_private.ifp_resource.start) { + i9xx_private.flush_page = ioremap_nocache(i9xx_private.ifp_resource.start, PAGE_SIZE); + if (!i9xx_private.flush_page) printk("unable to ioremap flush page - no chipset flushing"); } } -void intel_fini_chipset_flush_compat(struct drm_device *dev) +static void intel_i9xx_fini_flush(struct drm_device *dev) +{ + iounmap(i9xx_private.flush_page); + release_resource(&i9xx_private.ifp_resource); +} + +static void intel_i9xx_flush_page(struct drm_device *dev) +{ + if (i9xx_private.flush_page) + writel(1, i9xx_private.flush_page); +} + +void intel_init_chipset_flush_compat(struct drm_device *dev) { /* not flush on i8xx */ - if (!IS_I9XX(dev)) - return; + if (IS_I9XX(dev)) + intel_i9xx_setup_flush(dev); + else + intel_i8xx_setup_flush(dev); + +} - iounmap(intel_private.flush_page); - release_resource(&intel_private.ifp_resource); +void intel_fini_chipset_flush_compat(struct drm_device *dev) +{ + /* not flush on i8xx */ + if (IS_I9XX(dev)) + intel_i9xx_fini_flush(dev); + else + intel_i8xx_fini_flush(dev); } void drm_agp_chipset_flush(struct drm_device *dev) { - if (intel_private.flush_page) - writel(1, intel_private.flush_page); + if (IS_I9XX(dev)) + intel_i9xx_flush_page(dev); + else + intel_i8xx_flush_page(dev); } #endif -- cgit v1.2.3 From 62cdc6dbb3545d21bc3a68987d0781f277ae6ee4 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Thu, 15 Nov 2007 14:43:23 +1100 Subject: i915: remove excess debug output --- linux-core/i915_compat.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'linux-core') diff --git a/linux-core/i915_compat.c b/linux-core/i915_compat.c index e8890f67..e119a992 100644 --- a/linux-core/i915_compat.c +++ b/linux-core/i915_compat.c @@ -127,8 +127,6 @@ static void intel_i8xx_setup_flush(struct drm_device *dev) i8xx_private.flush_page = kmap(i8xx_private.page); if (!i8xx_private.flush_page) intel_i8xx_fini_flush(dev); - - DRM_ERROR("i8xx got flush page %p %p\n", i8xx_private.page, i8xx_private.flush_page); } -- cgit v1.2.3