summaryrefslogtreecommitdiff
path: root/shared-core/nouveau_mem.c
diff options
context:
space:
mode:
authorBen Skeggs <skeggsb@gmail.com>2008-03-12 23:37:29 +1100
committerBen Skeggs <skeggsb@gmail.com>2008-03-13 00:23:52 +1100
commit1766e1c07b03c6ccf545469663334be762c0bddf (patch)
treee60ee35ad5f843a882a1f4b65700ab66eea76c60 /shared-core/nouveau_mem.c
parent88bd1e4a350d011ec44f6786e0bfdf8fb386800c (diff)
nv50: force channel vram access through vm
If we ever want to be able to use the 3D engine we have no choice. It appears that the tiling setup (required for 3D on G8x) is in the page tables. The immediate benefit of this change however is that it's now not possible for a client to use the GPU to render over the top of important engine setup tables, which also live in VRAM. G8x VRAM size is limited to 512MiB at the moment, as we use a 1-1 mapping of real vram pages to their offset within the start of a channel's VRAM DMA object and only populate a single PDE for VRAM use.
Diffstat (limited to 'shared-core/nouveau_mem.c')
-rw-r--r--shared-core/nouveau_mem.c78
1 files changed, 78 insertions, 0 deletions
diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c
index 4e80ca46..2cf8807d 100644
--- a/shared-core/nouveau_mem.c
+++ b/shared-core/nouveau_mem.c
@@ -468,6 +468,11 @@ int nouveau_mem_init(struct drm_device *dev)
/* Init FB */
dev_priv->fb_phys=drm_get_resource_start(dev,1);
fb_size = nouveau_mem_fb_amount(dev);
+ /* On G80, limit VRAM to 512MiB temporarily due to limits in how
+ * we handle VRAM page tables.
+ */
+ if (dev_priv->card_type >= NV_50 && fb_size > (512 * 1024 * 1024))
+ fb_size = (512 * 1024 * 1024);
/* On at least NV40, RAMIN is actually at the end of vram.
* We don't want to allocate this... */
if (dev_priv->card_type >= NV_40)
@@ -540,6 +545,21 @@ int nouveau_mem_init(struct drm_device *dev)
}
}
+ /* G8x: Allocate shared page table to map real VRAM pages into */
+ if (dev_priv->card_type >= NV_50) {
+ unsigned size = ((512 * 1024 * 1024) / 65536) * 8;
+
+ ret = nouveau_gpuobj_new(dev, NULL, size, 0,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ALLOW_NO_REFS,
+ &dev_priv->vm_vram_pt);
+ if (ret) {
+ DRM_ERROR("Error creating VRAM page table: %d\n", ret);
+ return ret;
+ }
+ }
+
+
return 0;
}
@@ -558,6 +578,12 @@ struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment,
if (alignment < PAGE_SHIFT)
alignment = PAGE_SHIFT;
+ /* Align allocation sizes to 64KiB blocks on G8x. We use a 64KiB
+ * page size in the GPU VM.
+ */
+ if (flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50)
+ size = (size + (64 * 1024)) & ~((64 * 1024) - 1);
+
/*
* Warn about 0 sized allocations, but let it go through. It'll return 1 page
*/
@@ -612,6 +638,30 @@ struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment,
alloc_ok:
block->flags=type;
+ /* On G8x, map memory into VM */
+ if (block->flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50 &&
+ !(flags & NOUVEAU_MEM_NOVM)) {
+ struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt;
+ unsigned offset = block->start;
+ unsigned count = block->size / 65536;
+
+ if (!pt) {
+ DRM_ERROR("vm alloc without vm pt\n");
+ nouveau_mem_free_block(block);
+ return NULL;
+ }
+
+ while (count--) {
+ unsigned pte = offset / 65536;
+
+ INSTANCE_WR(pt, (pte * 2) + 0, offset | 1);
+ INSTANCE_WR(pt, (pte * 2) + 1, 0x00000000);
+ offset += 65536;
+ }
+ } else {
+ block->flags |= NOUVEAU_MEM_NOVM;
+ }
+
if (flags&NOUVEAU_MEM_MAPPED)
{
struct drm_map_list *entry;
@@ -653,9 +703,34 @@ alloc_ok:
void nouveau_mem_free(struct drm_device* dev, struct mem_block* block)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
DRM_DEBUG("freeing 0x%llx type=0x%08x\n", block->start, block->flags);
+
if (block->flags&NOUVEAU_MEM_MAPPED)
drm_rmmap(dev, block->map);
+
+ /* G8x: Remove pages from vm */
+ if (block->flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50 &&
+ !(block->flags & NOUVEAU_MEM_NOVM)) {
+ struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt;
+ unsigned offset = block->start;
+ unsigned count = block->size / 65536;
+
+ if (!pt) {
+ DRM_ERROR("vm free without vm pt\n");
+ goto out_free;
+ }
+
+ while (count--) {
+ unsigned pte = offset / 65536;
+ INSTANCE_WR(pt, (pte * 2) + 0, 0);
+ INSTANCE_WR(pt, (pte * 2) + 1, 0);
+ offset += 65536;
+ }
+ }
+
+out_free:
nouveau_mem_free_block(block);
}
@@ -670,6 +745,9 @@ int nouveau_ioctl_mem_alloc(struct drm_device *dev, void *data, struct drm_file
NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+ if (alloc->flags & NOUVEAU_MEM_INTERNAL)
+ return -EINVAL;
+
block=nouveau_mem_alloc(dev, alloc->alignment, alloc->size,
alloc->flags, file_priv);
if (!block)