From 4327d7f3142cdbf3f3f94426ae33e2d30b5a40c8 Mon Sep 17 00:00:00 2001 From: Maurice van der Pot Date: Mon, 4 Jun 2007 10:49:30 +1000 Subject: nouveau: fix RAMHT wrapping --- shared-core/nouveau_object.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'shared-core/nouveau_object.c') diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index ace7c2aa..e36568c6 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -167,7 +167,7 @@ nouveau_ht_object_insert(drm_device_t* dev, int channel, uint32_t handle, while (NV_READ(ht_base + ofs) || NV_READ(ht_base + ofs + 4)) { ofs += 8; - if (ofs == ht_end) ofs = ht_base; + if (ofs == dev_priv->ramht_size) ofs = 0; if (ofs == o_ofs) { DRM_ERROR("no free hash table entries\n"); return 1; -- cgit v1.2.3 From 695599f18d907bb277805581bbe208b0e083e7d9 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Sun, 24 Jun 2007 19:03:35 +1000 Subject: nouveau: Nuke DMA_OBJECT_INIT ioctl (bumps interface to 0.0.7) For various reasons, this ioctl was a bad idea. At channel creation we now automatically create DMA objects covering available VRAM and GART memory, where the client used to do this themselves. However, there is still a need to be able to create DMA objects pointing at specific areas of memory (ie. notifiers). Each channel is now allocated a small amount of memory from which a client can suballocate things (such as notifiers), and have a DMA object created which covers the suballocated area. The NOTIFIER_ALLOC ioctl exposes this functionality. --- shared-core/nouveau_object.c | 143 ++++++++++++++----------------------------- 1 file changed, 47 insertions(+), 96 deletions(-) (limited to 'shared-core/nouveau_object.c') diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index e36568c6..e7528e23 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -153,13 +153,13 @@ nouveau_ht_handle_hash(drm_device_t *dev, int channel, uint32_t handle) return hash << 3; } -static int +int nouveau_ht_object_insert(drm_device_t* dev, int channel, uint32_t handle, struct nouveau_object *obj) { drm_nouveau_private_t *dev_priv=dev->dev_private; int ht_base = NV_RAMIN + dev_priv->ramht_offset; - int ht_end = ht_base + dev_priv->ramht_size; +/* int ht_end = ht_base + dev_priv->ramht_size; */ int o_ofs, ofs; obj->handle = handle; @@ -461,115 +461,70 @@ nouveau_object_free(drm_device_t *dev, struct nouveau_object *obj) drm_free(obj, sizeof(struct nouveau_object), DRM_MEM_DRIVER); } -void nouveau_object_cleanup(drm_device_t *dev, int channel) -{ - drm_nouveau_private_t *dev_priv=dev->dev_private; - - while (dev_priv->fifos[channel].objs) { - nouveau_object_free(dev, dev_priv->fifos[channel].objs); - } -} - -int nouveau_ioctl_object_init(DRM_IOCTL_ARGS) +int +nouveau_object_init_channel(drm_device_t *dev, int channel, + uint32_t vram_handle, + uint32_t tt_handle) { - DRM_DEVICE; - drm_nouveau_object_init_t init; - struct nouveau_object *obj; - - DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_object_init_t __user *) - data, sizeof(init)); - - if (!nouveau_fifo_owner(dev, filp, init.channel)) { - DRM_ERROR("pid %d doesn't own channel %d\n", - DRM_CURRENTPID, init.channel); - return DRM_ERR(EINVAL); + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_object *gpuobj; + int ret; + + /* VRAM ctxdma */ + gpuobj = nouveau_object_dma_create(dev, channel, NV_CLASS_DMA_IN_MEMORY, + 0, dev_priv->fb_available_size, + NV_DMA_ACCESS_RW, + NV_DMA_TARGET_VIDMEM); + if (!gpuobj) { + DRM_ERROR("Error creating VRAM ctxdma: %d\n", DRM_ERR(ENOMEM)); + return DRM_ERR(ENOMEM); } - //FIXME: check args, only allow trusted objects to be created - - if (nouveau_object_handle_find(dev, init.channel, init.handle)) { - DRM_ERROR("Channel %d: handle 0x%08x already exists\n", - init.channel, init.handle); - return DRM_ERR(EINVAL); + ret = nouveau_ht_object_insert(dev, channel, vram_handle, gpuobj); + if (ret) { + DRM_ERROR("Error referencing VRAM ctxdma: %d\n", ret); + return ret; } - obj = nouveau_object_gr_create(dev, init.channel, init.class); - if (!obj) + /* non-AGP unimplemented */ + if (dev_priv->agp_heap == NULL) + return 0; + + /* GART ctxdma */ + gpuobj = nouveau_object_dma_create(dev, channel, NV_CLASS_DMA_IN_MEMORY, + 0, dev_priv->agp_available_size, + NV_DMA_ACCESS_RW, + NV_DMA_TARGET_AGP); + if (!gpuobj) { + DRM_ERROR("Error creating TT ctxdma: %d\n", DRM_ERR(ENOMEM)); return DRM_ERR(ENOMEM); + } - if (nouveau_ht_object_insert(dev, init.channel, init.handle, obj)) { - nouveau_object_free(dev, obj); - return DRM_ERR(ENOMEM); + ret = nouveau_ht_object_insert(dev, channel, tt_handle, gpuobj); + if (ret) { + DRM_ERROR("Error referencing TT ctxdma: %d\n", ret); + return ret; } return 0; } -static int -nouveau_dma_object_check_access(drm_device_t *dev, - drm_nouveau_dma_object_init_t *init) +void nouveau_object_cleanup(drm_device_t *dev, int channel) { - drm_nouveau_private_t *dev_priv = dev->dev_private; - uint64_t limit; - - /* Check for known DMA object classes */ - switch (init->class) { - case NV_CLASS_DMA_IN_MEMORY: - case NV_CLASS_DMA_FROM_MEMORY: - case NV_CLASS_DMA_TO_MEMORY: - break; - default: - DRM_ERROR("invalid class = 0x%x\n", init->class); - return DRM_ERR(EPERM); - } - - /* Check access mode, and translate to NV_DMA_ACCESS_* */ - switch (init->access) { - case NOUVEAU_MEM_ACCESS_RO: - init->access = NV_DMA_ACCESS_RO; - break; - case NOUVEAU_MEM_ACCESS_WO: - init->access = NV_DMA_ACCESS_WO; - break; - case NOUVEAU_MEM_ACCESS_RW: - init->access = NV_DMA_ACCESS_RW; - break; - default: - DRM_ERROR("invalid access mode = %d\n", init->access); - return DRM_ERR(EPERM); - } - - /* Check that request is within the allowed limits of "target" */ - switch (init->target) { - case NOUVEAU_MEM_FB: - limit = dev_priv->fb_available_size; - init->target = NV_DMA_TARGET_VIDMEM; - break; - case NOUVEAU_MEM_AGP: - limit = dev_priv->agp_available_size; - init->target = NV_DMA_TARGET_AGP; - break; - default: - DRM_ERROR("invalid target = 0x%x\n", init->target); - return DRM_ERR(EPERM); - } + drm_nouveau_private_t *dev_priv=dev->dev_private; - if ((init->offset > limit) || (init->offset + init->size) > limit) { - DRM_ERROR("access out of allowed range (%d,0x%08x,0x%08x)\n", - init->target, init->offset, init->size); - return DRM_ERR(EPERM); + while (dev_priv->fifos[channel].objs) { + nouveau_object_free(dev, dev_priv->fifos[channel].objs); } - - return 0; } -int nouveau_ioctl_dma_object_init(DRM_IOCTL_ARGS) +int nouveau_ioctl_grobj_alloc(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_nouveau_dma_object_init_t init; + drm_nouveau_grobj_alloc_t init; struct nouveau_object *obj; - DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_dma_object_init_t __user *) + DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_grobj_alloc_t __user *) data, sizeof(init)); if (!nouveau_fifo_owner(dev, filp, init.channel)) { @@ -578,8 +533,7 @@ int nouveau_ioctl_dma_object_init(DRM_IOCTL_ARGS) return DRM_ERR(EINVAL); } - if (nouveau_dma_object_check_access(dev, &init)) - return DRM_ERR(EPERM); + //FIXME: check args, only allow trusted objects to be created if (nouveau_object_handle_find(dev, init.channel, init.handle)) { DRM_ERROR("Channel %d: handle 0x%08x already exists\n", @@ -587,13 +541,10 @@ int nouveau_ioctl_dma_object_init(DRM_IOCTL_ARGS) return DRM_ERR(EINVAL); } - obj = nouveau_object_dma_create(dev, init.channel, init.class, - init.offset, init.size, - init.access, init.target); + obj = nouveau_object_gr_create(dev, init.channel, init.class); if (!obj) return DRM_ERR(ENOMEM); - obj->handle = init.handle; if (nouveau_ht_object_insert(dev, init.channel, init.handle, obj)) { nouveau_object_free(dev, obj); return DRM_ERR(ENOMEM); -- cgit v1.2.3 From 68ecf61647e9ec16d59cc8f50550d11478eb3118 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 25 Jun 2007 15:42:55 +1000 Subject: nouveau: never touch PRAMIN with NV_WRITE, cleanup RAMHT code a bit --- shared-core/nouveau_object.c | 133 ++++++++++++++++++++++++++----------------- 1 file changed, 80 insertions(+), 53 deletions(-) (limited to 'shared-core/nouveau_object.c') diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index e7528e23..dac08df4 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -139,7 +139,7 @@ nouveau_object_handle_find(drm_device_t *dev, int channel, uint32_t handle) is given as: */ static uint32_t -nouveau_ht_handle_hash(drm_device_t *dev, int channel, uint32_t handle) +nouveau_ramht_hash_handle(drm_device_t *dev, int channel, uint32_t handle) { drm_nouveau_private_t *dev_priv=dev->dev_private; uint32_t hash = 0; @@ -153,63 +153,90 @@ nouveau_ht_handle_hash(drm_device_t *dev, int channel, uint32_t handle) return hash << 3; } +static int +nouveau_ramht_entry_valid(drm_device_t *dev, uint32_t ramht, uint32_t offset) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + uint32_t ctx = NV_RI32(ramht + offset + 4); + + if (dev_priv->card_type < NV_40) + return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0); + return (ctx != 0); +} + int -nouveau_ht_object_insert(drm_device_t* dev, int channel, uint32_t handle, - struct nouveau_object *obj) +nouveau_ramht_insert(drm_device_t* dev, int channel, uint32_t handle, + struct nouveau_object *obj) { drm_nouveau_private_t *dev_priv=dev->dev_private; - int ht_base = NV_RAMIN + dev_priv->ramht_offset; -/* int ht_end = ht_base + dev_priv->ramht_size; */ - int o_ofs, ofs; - - obj->handle = handle; - o_ofs = ofs = nouveau_ht_handle_hash(dev, channel, obj->handle); - - while (NV_READ(ht_base + ofs) || NV_READ(ht_base + ofs + 4)) { - ofs += 8; - if (ofs == dev_priv->ramht_size) ofs = 0; - if (ofs == o_ofs) { - DRM_ERROR("no free hash table entries\n"); - return 1; - } + uint32_t ramht = dev_priv->ramht_offset; + uint32_t ctx, co, ho; + uint32_t inst; + + inst = nouveau_chip_instance_get(dev, obj->instance); + if (dev_priv->card_type < NV_40) { + ctx = NV_RAMHT_CONTEXT_VALID | inst | + (channel << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | + (obj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT); + } else + if (dev_priv->card_type < NV_50) { + ctx = inst | + (channel << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | + (obj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); + } else { + ctx = inst | + (obj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); } - ofs += ht_base; - - DRM_DEBUG("Channel %d - Handle 0x%08x at 0x%08x\n", - channel, obj->handle, ofs); - - NV_WRITE(NV_RAMHT_HANDLE_OFFSET + ofs, obj->handle); - if (dev_priv->card_type >= NV_40) - NV_WRITE(NV_RAMHT_CONTEXT_OFFSET + ofs, - (channel << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | - (obj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT) | - nouveau_chip_instance_get(dev, obj->instance) - ); - else - NV_WRITE(NV_RAMHT_CONTEXT_OFFSET + ofs, - NV_RAMHT_CONTEXT_VALID | - (channel << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | - (obj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT) | - nouveau_chip_instance_get(dev, obj->instance) - ); - - obj->ht_loc = ofs; - return 0; + + co = ho = nouveau_ramht_hash_handle(dev, channel, handle); + do { + if (!nouveau_ramht_entry_valid(dev, ramht, co)) { + DRM_DEBUG("insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n", + channel, co, handle, ctx); + NV_WI32(ramht + co + 0, handle); + NV_WI32(ramht + co + 4, ctx); + obj->handle = handle; + return 0; + } + DRM_DEBUG("collision ch%d 0x%08x: h=0x%08x\n", + channel, co, NV_RI32(ramht + co)); + + co += 8; + if (co == dev_priv->ramht_size) + co = 0; + } while (co != ho); + + DRM_ERROR("RAMHT space exhausted. ch=%d\n", channel); + return DRM_ERR(ENOMEM); } -static void nouveau_hash_table_remove(drm_device_t* dev, - struct nouveau_object *obj) +static void +nouveau_ramht_remove(drm_device_t* dev, struct nouveau_object *obj) { drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t ramht = dev_priv->ramht_offset; + uint32_t co, ho; + + co = ho = nouveau_ramht_hash_handle(dev, obj->channel, obj->handle); + do { + if (nouveau_ramht_entry_valid(dev, ramht, co) && + (obj->handle == NV_RI32(ramht + co))) { + DRM_DEBUG("remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n", + obj->channel, co, obj->handle, + NV_RI32(ramht + co + 4)); + NV_WI32(ramht + co + 0, 0x00000000); + NV_WI32(ramht + co + 4, 0x00000000); + obj->handle = ~0; + return; + } - DRM_DEBUG("Remove handle 0x%08x at 0x%08x from HT\n", - obj->handle, obj->ht_loc); - if (obj->ht_loc) { - DRM_DEBUG("... HT entry was: 0x%08x/0x%08x\n", - NV_READ(obj->ht_loc), NV_READ(obj->ht_loc+4)); - NV_WRITE(obj->ht_loc , 0x00000000); - NV_WRITE(obj->ht_loc+4, 0x00000000); - } + co += 8; + if (co == dev_priv->ramht_size) + co = 0; + } while (co != ho); + + DRM_ERROR("RAMHT entry not found. ch=%d, handle=0x%08x\n", + obj->channel, obj->handle); } static struct nouveau_object * @@ -457,7 +484,7 @@ nouveau_object_free(drm_device_t *dev, struct nouveau_object *obj) { nouveau_object_instance_free(dev, obj); if (obj->handle != ~0) - nouveau_hash_table_remove(dev, obj); + nouveau_ramht_remove(dev, obj); drm_free(obj, sizeof(struct nouveau_object), DRM_MEM_DRIVER); } @@ -480,7 +507,7 @@ nouveau_object_init_channel(drm_device_t *dev, int channel, return DRM_ERR(ENOMEM); } - ret = nouveau_ht_object_insert(dev, channel, vram_handle, gpuobj); + ret = nouveau_ramht_insert(dev, channel, vram_handle, gpuobj); if (ret) { DRM_ERROR("Error referencing VRAM ctxdma: %d\n", ret); return ret; @@ -500,7 +527,7 @@ nouveau_object_init_channel(drm_device_t *dev, int channel, return DRM_ERR(ENOMEM); } - ret = nouveau_ht_object_insert(dev, channel, tt_handle, gpuobj); + ret = nouveau_ramht_insert(dev, channel, tt_handle, gpuobj); if (ret) { DRM_ERROR("Error referencing TT ctxdma: %d\n", ret); return ret; @@ -545,7 +572,7 @@ int nouveau_ioctl_grobj_alloc(DRM_IOCTL_ARGS) if (!obj) return DRM_ERR(ENOMEM); - if (nouveau_ht_object_insert(dev, init.channel, init.handle, obj)) { + if (nouveau_ramht_insert(dev, init.channel, init.handle, obj)) { nouveau_object_free(dev, obj); return DRM_ERR(ENOMEM); } -- cgit v1.2.3 From 163f8526123ffa38783fc911b5f7a19debce7f73 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 2 Jul 2007 19:31:18 +1000 Subject: nouveau: rewrite gpu object code Allows multiple references to a single object, needed to support PCI(E)GART scatter-gather DMA objects which would quickly fill PRAMIN if each channel had its own. Handle per-channel private instmem areas. This is needed to support NV50, but might be something we want to do on earlier chipsets at some point? Everything that touches PRAMIN is a GPU object. --- shared-core/nouveau_object.c | 827 +++++++++++++++++++++++++++++-------------- 1 file changed, 559 insertions(+), 268 deletions(-) (limited to 'shared-core/nouveau_object.c') diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index dac08df4..79875ca1 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -35,79 +35,6 @@ #include "nouveau_drv.h" #include "nouveau_drm.h" -/* TODO - * - Check object class, deny unsafe objects (add card-specific versioning?) - * - Get rid of DMA object creation, this should be wrapped by MM routines. - */ - -/* Translate a RAMIN offset into a value the card understands, will be useful - * in the future when we can access more instance ram which isn't mapped into - * the PRAMIN aperture - */ -uint32_t -nouveau_chip_instance_get(drm_device_t *dev, struct mem_block *mem) -{ - uint32_t inst = (uint32_t)mem->start >> 4; - DRM_DEBUG("****** on-chip instance for 0x%016llx = 0x%08x\n", - mem->start, inst); - return inst; -} - -static void -nouveau_object_link(drm_device_t *dev, struct nouveau_object *obj) -{ - drm_nouveau_private_t *dev_priv=dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[obj->channel]; - - if (!chan->objs) { - chan->objs = obj; - return; - } - - obj->prev = NULL; - obj->next = chan->objs; - - chan->objs->prev = obj; - chan->objs = obj; -} - -static void -nouveau_object_unlink(drm_device_t *dev, struct nouveau_object *obj) -{ - drm_nouveau_private_t *dev_priv=dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[obj->channel]; - - if (obj->prev == NULL) { - if (obj->next) - obj->next->prev = NULL; - chan->objs = obj->next; - } else if (obj->next == NULL) { - if (obj->prev) - obj->prev->next = NULL; - } else { - obj->prev->next = obj->next; - obj->next->prev = obj->prev; - } -} - -static struct nouveau_object * -nouveau_object_handle_find(drm_device_t *dev, int channel, uint32_t handle) -{ - drm_nouveau_private_t *dev_priv=dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; - struct nouveau_object *obj = chan->objs; - - DRM_DEBUG("Looking for handle 0x%08x\n", handle); - while (obj) { - if (obj->handle == handle) - return obj; - obj = obj->next; - } - - DRM_DEBUG("...couldn't find handle\n"); - return NULL; -} - /* NVidia uses context objects to drive drawing operations. Context objects can be selected into 8 subchannels in the FIFO, @@ -150,146 +77,439 @@ nouveau_ramht_hash_handle(drm_device_t *dev, int channel, uint32_t handle) handle >>= dev_priv->ramht_bits; } hash ^= channel << (dev_priv->ramht_bits - 4); - return hash << 3; + hash <<= 3; + + DRM_DEBUG("ch%d handle=0x%08x hash=0x%08x\n", channel, handle, hash); + return hash; } static int -nouveau_ramht_entry_valid(drm_device_t *dev, uint32_t ramht, uint32_t offset) +nouveau_ramht_entry_valid(drm_device_t *dev, nouveau_gpuobj_t *ramht, + uint32_t offset) { drm_nouveau_private_t *dev_priv=dev->dev_private; - uint32_t ctx = NV_RI32(ramht + offset + 4); + uint32_t ctx = INSTANCE_RD(ramht, (offset + 4)/4); if (dev_priv->card_type < NV_40) return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0); return (ctx != 0); } -int -nouveau_ramht_insert(drm_device_t* dev, int channel, uint32_t handle, - struct nouveau_object *obj) +static int +nouveau_ramht_insert(drm_device_t* dev, nouveau_gpuobj_ref_t *ref) { drm_nouveau_private_t *dev_priv=dev->dev_private; - uint32_t ramht = dev_priv->ramht_offset; + struct nouveau_fifo *chan = &dev_priv->fifos[ref->channel]; + nouveau_gpuobj_t *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; + nouveau_gpuobj_t *gpuobj = ref->gpuobj; uint32_t ctx, co, ho; - uint32_t inst; - inst = nouveau_chip_instance_get(dev, obj->instance); + if (!ramht) { + DRM_ERROR("No hash table!\n"); + return DRM_ERR(EINVAL); + } + if (dev_priv->card_type < NV_40) { - ctx = NV_RAMHT_CONTEXT_VALID | inst | - (channel << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | - (obj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT); + ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) | + (ref->channel << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | + (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT); } else if (dev_priv->card_type < NV_50) { - ctx = inst | - (channel << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | - (obj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); + ctx = (ref->instance >> 4) | + (ref->channel << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | + (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); } else { - ctx = inst | - (obj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); + ctx = (ref->instance >> 4) | + (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); } - co = ho = nouveau_ramht_hash_handle(dev, channel, handle); + co = ho = nouveau_ramht_hash_handle(dev, ref->channel, ref->handle); do { if (!nouveau_ramht_entry_valid(dev, ramht, co)) { DRM_DEBUG("insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n", - channel, co, handle, ctx); - NV_WI32(ramht + co + 0, handle); - NV_WI32(ramht + co + 4, ctx); - obj->handle = handle; + ref->channel, co, ref->handle, ctx); + INSTANCE_WR(ramht, (co + 0)/4, ref->handle); + INSTANCE_WR(ramht, (co + 4)/4, ctx); return 0; } DRM_DEBUG("collision ch%d 0x%08x: h=0x%08x\n", - channel, co, NV_RI32(ramht + co)); + ref->channel, co, INSTANCE_RD(ramht, co/4)); co += 8; - if (co == dev_priv->ramht_size) + if (co >= dev_priv->ramht_size) co = 0; } while (co != ho); - DRM_ERROR("RAMHT space exhausted. ch=%d\n", channel); + DRM_ERROR("RAMHT space exhausted. ch=%d\n", ref->channel); return DRM_ERR(ENOMEM); } static void -nouveau_ramht_remove(drm_device_t* dev, struct nouveau_object *obj) +nouveau_ramht_remove(drm_device_t* dev, nouveau_gpuobj_ref_t *ref) { drm_nouveau_private_t *dev_priv = dev->dev_private; - uint32_t ramht = dev_priv->ramht_offset; + struct nouveau_fifo *chan = &dev_priv->fifos[ref->channel]; + nouveau_gpuobj_t *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; uint32_t co, ho; - co = ho = nouveau_ramht_hash_handle(dev, obj->channel, obj->handle); + if (!ramht) { + DRM_ERROR("No hash table!\n"); + return; + } + + co = ho = nouveau_ramht_hash_handle(dev, ref->channel, ref->handle); do { if (nouveau_ramht_entry_valid(dev, ramht, co) && - (obj->handle == NV_RI32(ramht + co))) { + (ref->handle == INSTANCE_RD(ramht, (co/4)))) { DRM_DEBUG("remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n", - obj->channel, co, obj->handle, - NV_RI32(ramht + co + 4)); - NV_WI32(ramht + co + 0, 0x00000000); - NV_WI32(ramht + co + 4, 0x00000000); - obj->handle = ~0; + ref->channel, co, ref->handle, + INSTANCE_RD(ramht, (co + 4))); + INSTANCE_WR(ramht, (co + 0)/4, 0x00000000); + INSTANCE_WR(ramht, (co + 4)/4, 0x00000000); return; } co += 8; - if (co == dev_priv->ramht_size) + if (co >= dev_priv->ramht_size) co = 0; } while (co != ho); DRM_ERROR("RAMHT entry not found. ch=%d, handle=0x%08x\n", - obj->channel, obj->handle); + ref->channel, ref->handle); } -static struct nouveau_object * -nouveau_object_instance_alloc(drm_device_t* dev, int channel) +int +nouveau_gpuobj_new(drm_device_t *dev, int channel, int size, int align, + uint32_t flags, nouveau_gpuobj_t **gpuobj_ret) { - drm_nouveau_private_t *dev_priv=dev->dev_private; - struct nouveau_object *obj; + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = NULL; + nouveau_gpuobj_t *gpuobj; + struct mem_block *pramin = NULL; + + DRM_DEBUG("ch%d size=%d align=%d flags=0x%08x\n", + channel, size, align, flags); - /* Create object struct */ - obj = drm_calloc(1, sizeof(struct nouveau_object), DRM_MEM_DRIVER); - if (!obj) { - DRM_ERROR("couldn't alloc memory for object\n"); - return NULL; + if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL) + return DRM_ERR(EINVAL); + + if (channel >= 0) { + if (channel > nouveau_fifo_number(dev)) + return DRM_ERR(EINVAL); + chan = &dev_priv->fifos[channel]; } - /* Allocate instance memory */ - obj->instance = nouveau_instmem_alloc(dev, - (dev_priv->card_type >= NV_40 ? 32 : 16), 4); - if (!obj->instance) { - DRM_ERROR("couldn't alloc RAMIN for object\n"); - drm_free(obj, sizeof(struct nouveau_object), DRM_MEM_DRIVER); - return NULL; + gpuobj = drm_calloc(1, sizeof(*gpuobj), DRM_MEM_DRIVER); + if (!gpuobj) + return DRM_ERR(ENOMEM); + DRM_DEBUG("gpuobj %p\n", gpuobj); + gpuobj->flags = flags; + gpuobj->im_channel = channel; + + /* Choose between global instmem heap, and per-channel private + * instmem heap. On ramin_heap) { + DRM_DEBUG("private heap\n"); + pramin = chan->ramin_heap; + } else + if (dev_priv->card_type < NV_50) { + DRM_DEBUG("global heap fallback\n"); + pramin = dev_priv->ramin_heap; + } + } else { + DRM_DEBUG("global heap\n"); + pramin = dev_priv->ramin_heap; + } + + if (!pramin) { + DRM_ERROR("No PRAMIN heap!\n"); + return DRM_ERR(EINVAL); } - /* Bind object to channel */ - obj->channel = channel; - obj->handle = ~0; - nouveau_object_link(dev, obj); + /* Allocate a chunk of the PRAMIN aperture */ + gpuobj->im_pramin = nouveau_mem_alloc_block(pramin, size, + drm_order(align), + (DRMFILE)-2); + if (!gpuobj->im_pramin) { + nouveau_gpuobj_del(dev, &gpuobj); + return DRM_ERR(ENOMEM); + } + gpuobj->im_pramin->flags = NOUVEAU_MEM_INSTANCE; + + /* On NV50 the PRAMIN aperture is paged. When allocating from the + * global instmem heap, alloc and bind VRAM pages into the PRAMIN + * aperture. + */ + if (!chan && dev_priv->card_type >= NV_50) { + DRM_ERROR("back aperture with vram pages\n"); + nouveau_gpuobj_del(dev, &gpuobj); + return DRM_ERR(EINVAL); + } - return obj; + if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { + int i; + + for (i = 0; i < gpuobj->im_pramin->size; i += 4) + INSTANCE_WR(gpuobj, i/4, 0); + } + + if (dev_priv->gpuobj_all) { + gpuobj->next = dev_priv->gpuobj_all; + gpuobj->next->prev = gpuobj; + } + dev_priv->gpuobj_all = gpuobj; + + *gpuobj_ret = gpuobj; + return 0; } -static void -nouveau_object_instance_free(drm_device_t *dev, struct nouveau_object *obj) +void nouveau_gpuobj_takedown(drm_device_t *dev) { - drm_nouveau_private_t *dev_priv=dev->dev_private; + drm_nouveau_private_t *dev_priv = dev->dev_private; + nouveau_gpuobj_t *gpuobj = NULL; + + DRM_DEBUG("\n"); + + while ((gpuobj = dev_priv->gpuobj_all)) { + DRM_ERROR("gpuobj %p still exists at takedown, refs=%d\n", + gpuobj, gpuobj->refcount); + gpuobj->refcount = 0; + nouveau_gpuobj_del(dev, &gpuobj); + } +} + +int nouveau_gpuobj_del(drm_device_t *dev, nouveau_gpuobj_t **pgpuobj) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + nouveau_gpuobj_t *gpuobj; + + DRM_DEBUG("gpuobj %p\n", pgpuobj ? *pgpuobj : NULL); + + if (!dev_priv || !pgpuobj || !(*pgpuobj)) + return DRM_ERR(EINVAL); + gpuobj = *pgpuobj; + + if (gpuobj->refcount != 0) { + DRM_ERROR("gpuobj refcount is %d\n", gpuobj->refcount); + return DRM_ERR(EINVAL); + } + + if (gpuobj->im_pramin) { + if (gpuobj->flags & NVOBJ_FLAG_FAKE) + drm_free(gpuobj->im_pramin, sizeof(*gpuobj->im_pramin), + DRM_MEM_DRIVER); + else + nouveau_mem_free_block(gpuobj->im_pramin); + } + + if (gpuobj->im_backing) + nouveau_mem_free(dev, gpuobj->im_backing); + + if (gpuobj->next) + gpuobj->next->prev = gpuobj->prev; + if (gpuobj->prev) + gpuobj->prev->next = gpuobj->next; + else + dev_priv->gpuobj_all = gpuobj->next; + + *pgpuobj = NULL; + drm_free(gpuobj, sizeof(*gpuobj), DRM_MEM_DRIVER); + return 0; +} + +static int +nouveau_gpuobj_instance_get(drm_device_t *dev, int channel, + nouveau_gpuobj_t *gpuobj, uint32_t *inst) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + nouveau_gpuobj_t *cpramin; + + if ((channel > 0) && gpuobj->im_channel != channel) { + DRM_ERROR("Channel mismatch: obj %d, ref %d\n", + gpuobj->im_channel, channel); + return DRM_ERR(EINVAL); + } + + /* card_type < NV_50) { + *inst = gpuobj->im_pramin->start; + return 0; + } + + /* NV50 channel-local instance */ + if (channel > 0) { + cpramin = dev_priv->fifos[channel].ramin->gpuobj; + *inst = gpuobj->im_pramin->start - cpramin->im_pramin->start; + return 0; + } + + /* NV50 global (VRAM) instance */ + if (gpuobj->im_channel < 0) { + /* ...from global heap */ + if (!gpuobj->im_backing) { + DRM_ERROR("AII, no VRAM backing gpuobj\n"); + return DRM_ERR(EINVAL); + } + *inst = gpuobj->im_backing->start - dev_priv->fb_phys; + return 0; + } else { + /* ...from local heap */ + cpramin = dev_priv->fifos[gpuobj->im_channel].ramin->gpuobj; + *inst = (cpramin->im_backing->start - dev_priv->fb_phys) + + (gpuobj->im_pramin->start - cpramin->im_pramin->start); + return 0; + } + + return DRM_ERR(EINVAL); +} + +int +nouveau_gpuobj_ref_add(drm_device_t *dev, int channel, uint32_t handle, + nouveau_gpuobj_t *gpuobj, nouveau_gpuobj_ref_t **ref_ret) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = NULL; + nouveau_gpuobj_ref_t *ref; + uint32_t instance; + int ret; + + DRM_DEBUG("ch%d h=0x%08x gpuobj=%p\n", channel, handle, gpuobj); + + if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL)) + return DRM_ERR(EINVAL); + + if (channel >= 0) { + if (channel > nouveau_fifo_number(dev)) + return DRM_ERR(EINVAL); + chan = &dev_priv->fifos[channel]; + } else + if (!ref_ret) + return DRM_ERR(EINVAL); + + ret = nouveau_gpuobj_instance_get(dev, channel, gpuobj, &instance); + if (ret) + return ret; + + ref = drm_calloc(1, sizeof(*ref), DRM_MEM_DRIVER); + if (!ref) + return DRM_ERR(ENOMEM); + ref->gpuobj = gpuobj; + ref->channel = channel; + ref->instance = instance; + + if (!ref_ret) { + ref->handle = handle; + + ret = nouveau_ramht_insert(dev, ref); + if (ret) { + drm_free(ref, sizeof(*ref), DRM_MEM_DRIVER); + return ret; + } + + ref->next = chan->ramht_refs; + chan->ramht_refs = ref; + } else { + ref->handle = ~0; + *ref_ret = ref; + } + + ref->gpuobj->refcount++; + return 0; +} + +int nouveau_gpuobj_ref_del(drm_device_t *dev, nouveau_gpuobj_ref_t **pref) +{ + nouveau_gpuobj_ref_t *ref; + + DRM_DEBUG("ref %p\n", pref ? *pref : NULL); + + if (!dev || !pref || *pref == NULL) + return DRM_ERR(EINVAL); + ref = *pref; + + if (ref->handle != ~0) + nouveau_ramht_remove(dev, ref); + + if (ref->gpuobj) { + ref->gpuobj->refcount--; + + if (ref->gpuobj->refcount == 0) { + if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS)) + nouveau_gpuobj_del(dev, &ref->gpuobj); + } + } + + *pref = NULL; + drm_free(ref, sizeof(ref), DRM_MEM_DRIVER); + return 0; +} + +int +nouveau_gpuobj_new_ref(drm_device_t *dev, int oc, int rc, uint32_t handle, + int size, int align, uint32_t flags, + nouveau_gpuobj_ref_t **ref) +{ + nouveau_gpuobj_t *gpuobj = NULL; + int ret; + + if ((ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj))) + return ret; + + if ((ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref))) { + nouveau_gpuobj_del(dev, &gpuobj); + return ret; + } + + return 0; +} + +int +nouveau_gpuobj_new_fake(drm_device_t *dev, uint32_t offset, uint32_t size, + uint32_t flags, nouveau_gpuobj_t **pgpuobj, + nouveau_gpuobj_ref_t **pref) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + nouveau_gpuobj_t *gpuobj = NULL; int i; - /* Unbind object from channel */ - nouveau_object_unlink(dev, obj); + DRM_DEBUG("offset=0x%08x size=0x%08x flags=0x%08x\n", + offset, size, flags); + + gpuobj = drm_calloc(1, sizeof(*gpuobj), DRM_MEM_DRIVER); + if (!gpuobj) + return DRM_ERR(ENOMEM); + DRM_DEBUG("gpuobj %p\n", gpuobj); + gpuobj->im_channel = -1; + gpuobj->flags = flags | NVOBJ_FLAG_FAKE; + + gpuobj->im_pramin = drm_calloc(1, sizeof(struct mem_block), + DRM_MEM_DRIVER); + if (!gpuobj->im_pramin) { + nouveau_gpuobj_del(dev, &gpuobj); + return DRM_ERR(ENOMEM); + } + gpuobj->im_pramin->start = offset; + gpuobj->im_pramin->size = size; + + if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { + for (i = 0; i < gpuobj->im_pramin->size; i += 4) + INSTANCE_WR(gpuobj, i/4, 0); + } - /* Clean RAMIN entry */ - DRM_DEBUG("Instance entry for 0x%08x" - "(engine %d, class 0x%x) before destroy:\n", - obj->handle, obj->engine, obj->class); - for (i=0; i<(obj->instance->size/4); i++) { - DRM_DEBUG(" +0x%02x: 0x%08x\n", (i*4), - INSTANCE_RD(obj->instance, i)); - INSTANCE_WR(obj->instance, i, 0x00000000); + if (pref) { + if ((i = nouveau_gpuobj_ref_add(dev, -1, 0, gpuobj, pref))) { + nouveau_gpuobj_del(dev, &gpuobj); + return i; + } } - /* Free RAMIN */ - nouveau_instmem_free(dev, obj->instance); + if (pgpuobj) + *pgpuobj = gpuobj; + return 0; } /* @@ -317,64 +537,70 @@ nouveau_object_instance_free(drm_device_t *dev, struct nouveau_object *obj) to it that can be used to set up context objects. */ -struct nouveau_object * -nouveau_object_dma_create(drm_device_t* dev, int channel, int class, - uint32_t offset, uint32_t size, - int access, int target) +static int +nouveau_gpuobj_class_instmem_size(drm_device_t *dev, int class) { - drm_nouveau_private_t *dev_priv=dev->dev_private; - struct nouveau_object *obj; - uint32_t frame, adjust; - uint32_t pte_flags = 0; - - DRM_DEBUG("offset:0x%08x, size:0x%08x, target:%d, access:%d\n", - offset, size, target, access); + drm_nouveau_private_t *dev_priv = dev->dev_private; - switch (target) { - case NV_DMA_TARGET_AGP: - offset += dev_priv->agp_phys; - break; - default: - break; - } + /*XXX: dodgy hack for now */ + if (dev_priv->card_type >= NV_50) + return 24; + if (dev_priv->card_type >= NV_40) + return 32; + return 16; +} - switch (access) { - case NV_DMA_ACCESS_RO: - break; - case NV_DMA_ACCESS_WO: - case NV_DMA_ACCESS_RW: - pte_flags |= (1 << 1); - break; - default: - DRM_ERROR("invalid access mode=%d\n", access); - return NULL; - } +int +nouveau_gpuobj_dma_new(drm_device_t *dev, int channel, int class, + uint64_t offset, uint64_t size, int access, int target, + nouveau_gpuobj_t **gpuobj) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + int ret; - frame = offset & ~0x00000FFF; - adjust = offset & 0x00000FFF; + DRM_DEBUG("ch%d class=0x%04x offset=0x%llx size=0x%llx\n", + channel, class, offset, size); + DRM_DEBUG("access=%d target=%d\n", access, target); - obj = nouveau_object_instance_alloc(dev, channel); - if (!obj) { - DRM_ERROR("couldn't allocate DMA object\n"); - return obj; + ret = nouveau_gpuobj_new(dev, channel, + nouveau_gpuobj_class_instmem_size(dev, class), + 16, + NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, + gpuobj); + if (ret) { + DRM_ERROR("Error creating gpuobj: %d\n", ret); + return ret; } - obj->engine = 0; - obj->class = class; - - INSTANCE_WR(obj->instance, 0, ((1<<12) | (1<<13) | - (adjust << 20) | - (access << 14) | - (target << 16) | - class)); - INSTANCE_WR(obj->instance, 1, size-1); - INSTANCE_WR(obj->instance, 2, frame | pte_flags); - INSTANCE_WR(obj->instance, 3, frame | pte_flags); + if (dev_priv->card_type < NV_50) { + uint32_t frame, adjust, pte_flags = 0; + + if (target == NV_DMA_TARGET_AGP) + offset += dev_priv->agp_phys; + if (access != NV_DMA_ACCESS_RO) + pte_flags |= (1<<1); + frame = offset & ~0x00000fff; + adjust = offset & 0x00000fff; + + INSTANCE_WR(*gpuobj, 0, ((1<<12) | (1<<13) | + (adjust << 20) | + (access << 14) | + (target << 16) | + class)); + INSTANCE_WR(*gpuobj, 1, size - 1); + INSTANCE_WR(*gpuobj, 2, frame | pte_flags); + INSTANCE_WR(*gpuobj, 3, frame | pte_flags); + } else { + nouveau_gpuobj_del(dev, gpuobj); + DRM_ERROR("stub\n"); + return DRM_ERR(EINVAL); + } - return obj; + (*gpuobj)->engine = NVOBJ_ENGINE_SW; + (*gpuobj)->class = class; + return 0; } - /* Context objects in the instance RAM have the following structure. * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes. @@ -426,89 +652,142 @@ nouveau_object_dma_create(drm_device_t* dev, int channel, int class, entry[5]: set to 0? */ -struct nouveau_object * -nouveau_object_gr_create(drm_device_t* dev, int channel, int class) +int +nouveau_gpuobj_gr_new(drm_device_t *dev, int channel, int class, + nouveau_gpuobj_t **gpuobj) { - drm_nouveau_private_t *dev_priv=dev->dev_private; - struct nouveau_object *obj; + drm_nouveau_private_t *dev_priv = dev->dev_private; + int ret; - DRM_DEBUG("class=%x\n", class); + DRM_DEBUG("ch%d class=0x%04x\n", channel, class); - obj = nouveau_object_instance_alloc(dev, channel); - if (!obj) { - DRM_ERROR("couldn't allocate context object\n"); - return obj; + ret = nouveau_gpuobj_new(dev, channel, + nouveau_gpuobj_class_instmem_size(dev, class), + 16, + NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, + gpuobj); + if (ret) { + DRM_ERROR("Error creating gpuobj: %d\n", ret); + return ret; } - obj->engine = 1; - obj->class = class; + if (dev_priv->card_type >= NV_50) { + nouveau_gpuobj_del(dev, gpuobj); + DRM_ERROR("stub!\n"); + return DRM_ERR(EINVAL); + } switch (class) { case NV_CLASS_NULL: - INSTANCE_WR(obj->instance, 0, 0x00001030); - INSTANCE_WR(obj->instance, 1, 0xFFFFFFFF); - INSTANCE_WR(obj->instance, 2, 0x00000000); - INSTANCE_WR(obj->instance, 2, 0x00000000); + INSTANCE_WR(*gpuobj, 0, 0x00001030); + INSTANCE_WR(*gpuobj, 1, 0xFFFFFFFF); break; default: if (dev_priv->card_type >= NV_40) { - INSTANCE_WR(obj->instance, 0, obj->class); - INSTANCE_WR(obj->instance, 1, 0x00000000); + INSTANCE_WR(*gpuobj, 0, class); #ifdef __BIG_ENDIAN - INSTANCE_WR(obj->instance, 2, 0x01000000); -#else - INSTANCE_WR(obj->instance, 2, 0x00000000); + INSTANCE_WR(*gpuobj, 2, 0x01000000); #endif - INSTANCE_WR(obj->instance, 3, 0x00000000); - INSTANCE_WR(obj->instance, 4, 0x00000000); - INSTANCE_WR(obj->instance, 5, 0x00000000); - INSTANCE_WR(obj->instance, 6, 0x00000000); - INSTANCE_WR(obj->instance, 7, 0x00000000); } else { #ifdef __BIG_ENDIAN - INSTANCE_WR(obj->instance, 0, obj->class | 0x00080000); + INSTANCE_WR(*gpuobj, 0, class | 0x00080000); #else - INSTANCE_WR(obj->instance, 0, obj->class); + INSTANCE_WR(*gpuobj, 0, class); #endif - INSTANCE_WR(obj->instance, 1, 0x00000000); - INSTANCE_WR(obj->instance, 2, 0x00000000); - INSTANCE_WR(obj->instance, 3, 0x00000000); } } - return obj; + (*gpuobj)->engine = NVOBJ_ENGINE_GR; + (*gpuobj)->class = class; + return 0; } -void -nouveau_object_free(drm_device_t *dev, struct nouveau_object *obj) +static int +nouveau_gpuobj_channel_init_pramin(drm_device_t *dev, int channel) { - nouveau_object_instance_free(dev, obj); - if (obj->handle != ~0) - nouveau_ramht_remove(dev, obj); - drm_free(obj, sizeof(struct nouveau_object), DRM_MEM_DRIVER); + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + nouveau_gpuobj_t *pramin = NULL; + int size, base, ret; + + DRM_DEBUG("ch%d\n", channel); + + /* Base amount for object storage (4KiB enough?) */ + size = 0x1000; + base = 0; + + /* PGRAPH context */ + + if (dev_priv->card_type == NV_50) { + /* RAMHT, RAMFC, PD, funny header thingo */ + } + + DRM_DEBUG("ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n", + channel, size, base); + ret = nouveau_gpuobj_new_ref(dev, -1, -1, 0, size, 0x1000, 0, + &chan->ramin); + if (ret) { + DRM_ERROR("Error allocating channel PRAMIN: %d\n", ret); + return ret; + } + pramin = chan->ramin->gpuobj; + + ret = nouveau_mem_init_heap(&chan->ramin_heap, + pramin->im_pramin->start + base, size); + if (ret) { + DRM_ERROR("Error creating PRAMIN heap: %d\n", ret); + nouveau_gpuobj_ref_del(dev, &chan->ramin); + return ret; + } + + return 0; } int -nouveau_object_init_channel(drm_device_t *dev, int channel, - uint32_t vram_handle, - uint32_t tt_handle) +nouveau_gpuobj_channel_init(drm_device_t *dev, int channel, + uint32_t vram_h, uint32_t tt_h) { drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_object *gpuobj; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + nouveau_gpuobj_t *vram = NULL, *tt = NULL; int ret; + DRM_DEBUG("ch%d vram=0x%08x tt=0x%08x\n", channel, vram_h, tt_h); + + /* Reserve a block of PRAMIN for the channel + *XXX: maybe on card_type == NV_50) { + ret = nouveau_gpuobj_channel_init_pramin(dev, channel); + if (ret) + return ret; + } + + /* RAMHT */ + if (dev_priv->card_type < NV_50) { + ret = nouveau_gpuobj_ref_add(dev, -1, 0, dev_priv->ramht, + &chan->ramht); + if (ret) + return ret; + } else { + ret = nouveau_gpuobj_new_ref(dev, channel, channel, 0, + 0x8000, 16, + NVOBJ_FLAG_ZERO_ALLOC, + &chan->ramht); + if (ret) + return ret; + } + /* VRAM ctxdma */ - gpuobj = nouveau_object_dma_create(dev, channel, NV_CLASS_DMA_IN_MEMORY, - 0, dev_priv->fb_available_size, - NV_DMA_ACCESS_RW, - NV_DMA_TARGET_VIDMEM); - if (!gpuobj) { - DRM_ERROR("Error creating VRAM ctxdma: %d\n", DRM_ERR(ENOMEM)); - return DRM_ERR(ENOMEM); + if ((ret = nouveau_gpuobj_dma_new(dev, channel, NV_CLASS_DMA_IN_MEMORY, + 0, dev_priv->fb_available_size, + NV_DMA_ACCESS_RW, + NV_DMA_TARGET_VIDMEM, &vram))) { + DRM_ERROR("Error creating VRAM ctxdma: %d\n", ret); + return ret; } - ret = nouveau_ramht_insert(dev, channel, vram_handle, gpuobj); - if (ret) { + if ((ret = nouveau_gpuobj_ref_add(dev, channel, vram_h, vram, NULL))) { DRM_ERROR("Error referencing VRAM ctxdma: %d\n", ret); return ret; } @@ -518,17 +797,15 @@ nouveau_object_init_channel(drm_device_t *dev, int channel, return 0; /* GART ctxdma */ - gpuobj = nouveau_object_dma_create(dev, channel, NV_CLASS_DMA_IN_MEMORY, - 0, dev_priv->agp_available_size, - NV_DMA_ACCESS_RW, - NV_DMA_TARGET_AGP); - if (!gpuobj) { - DRM_ERROR("Error creating TT ctxdma: %d\n", DRM_ERR(ENOMEM)); - return DRM_ERR(ENOMEM); + if ((ret = nouveau_gpuobj_dma_new(dev, channel, NV_CLASS_DMA_IN_MEMORY, + 0, dev_priv->agp_available_size, + NV_DMA_ACCESS_RW, NV_DMA_TARGET_AGP, + &tt))) { + DRM_ERROR("Error creating TT ctxdma: %d\n", ret); + return ret; } - ret = nouveau_ramht_insert(dev, channel, tt_handle, gpuobj); - if (ret) { + if ((ret = nouveau_gpuobj_ref_add(dev, channel, tt_h, tt, NULL))) { DRM_ERROR("Error referencing TT ctxdma: %d\n", ret); return ret; } @@ -536,20 +813,34 @@ nouveau_object_init_channel(drm_device_t *dev, int channel, return 0; } -void nouveau_object_cleanup(drm_device_t *dev, int channel) +void +nouveau_gpuobj_channel_takedown(drm_device_t *dev, int channel) { - drm_nouveau_private_t *dev_priv=dev->dev_private; + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + nouveau_gpuobj_ref_t *ref; + + DRM_DEBUG("ch%d\n", channel); - while (dev_priv->fifos[channel].objs) { - nouveau_object_free(dev, dev_priv->fifos[channel].objs); + while ((ref = chan->ramht_refs)) { + chan->ramht_refs = ref->next; + nouveau_gpuobj_ref_del(dev, &ref); } + nouveau_gpuobj_ref_del(dev, &chan->ramht); + + if (chan->ramin_heap) + nouveau_mem_takedown(&chan->ramin_heap); + if (chan->ramin) + nouveau_gpuobj_ref_del(dev, &chan->ramin); + } int nouveau_ioctl_grobj_alloc(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_nouveau_grobj_alloc_t init; - struct nouveau_object *obj; + nouveau_gpuobj_t *gr = NULL; + int ret; DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_grobj_alloc_t __user *) data, sizeof(init)); @@ -561,20 +852,20 @@ int nouveau_ioctl_grobj_alloc(DRM_IOCTL_ARGS) } //FIXME: check args, only allow trusted objects to be created + //FIXME: check for pre-existing handle - if (nouveau_object_handle_find(dev, init.channel, init.handle)) { - DRM_ERROR("Channel %d: handle 0x%08x already exists\n", - init.channel, init.handle); - return DRM_ERR(EINVAL); + if ((ret = nouveau_gpuobj_gr_new(dev, init.channel, init.class, &gr))) { + DRM_ERROR("Error creating gr object: %d (%d/0x%08x)\n", + ret, init.channel, init.handle); + return ret; } - obj = nouveau_object_gr_create(dev, init.channel, init.class); - if (!obj) - return DRM_ERR(ENOMEM); - - if (nouveau_ramht_insert(dev, init.channel, init.handle, obj)) { - nouveau_object_free(dev, obj); - return DRM_ERR(ENOMEM); + if ((ret = nouveau_gpuobj_ref_add(dev, init.channel, init.handle, + gr, NULL))) { + DRM_ERROR("Error referencing gr object: %d (%d/0x%08x\n)", + ret, init.channel, init.handle); + nouveau_gpuobj_del(dev, &gr); + return ret; } return 0; -- cgit v1.2.3 From c806bba4665bb369168ee0b453fa28e2e0bf2a5d Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 5 Jul 2007 00:12:33 +1000 Subject: nouveau/nv50: Initial channel/object support Should be OK on G84 for a single channel, multiple channels *almost* work. Untested on G80. --- shared-core/nouveau_object.c | 70 ++++++++++++++++++++++++++------------------ 1 file changed, 41 insertions(+), 29 deletions(-) (limited to 'shared-core/nouveau_object.c') diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index 79875ca1..a394ae6e 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -76,7 +76,8 @@ nouveau_ramht_hash_handle(drm_device_t *dev, int channel, uint32_t handle) hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1)); handle >>= dev_priv->ramht_bits; } - hash ^= channel << (dev_priv->ramht_bits - 4); + if (dev_priv->card_type < NV_50) + hash ^= channel << (dev_priv->ramht_bits - 4); hash <<= 3; DRM_DEBUG("ch%d handle=0x%08x hash=0x%08x\n", channel, handle, hash); @@ -99,7 +100,7 @@ static int nouveau_ramht_insert(drm_device_t* dev, nouveau_gpuobj_ref_t *ref) { drm_nouveau_private_t *dev_priv=dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[ref->channel]; + struct nouveau_fifo *chan = dev_priv->fifos[ref->channel]; nouveau_gpuobj_t *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; nouveau_gpuobj_t *gpuobj = ref->gpuobj; uint32_t ctx, co, ho; @@ -148,7 +149,7 @@ static void nouveau_ramht_remove(drm_device_t* dev, nouveau_gpuobj_ref_t *ref) { drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[ref->channel]; + struct nouveau_fifo *chan = dev_priv->fifos[ref->channel]; nouveau_gpuobj_t *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; uint32_t co, ho; @@ -183,9 +184,11 @@ nouveau_gpuobj_new(drm_device_t *dev, int channel, int size, int align, uint32_t flags, nouveau_gpuobj_t **gpuobj_ret) { drm_nouveau_private_t *dev_priv = dev->dev_private; + nouveau_engine_func_t *engine = &dev_priv->Engine; struct nouveau_fifo *chan = NULL; nouveau_gpuobj_t *gpuobj; struct mem_block *pramin = NULL; + int ret; DRM_DEBUG("ch%d size=%d align=%d flags=0x%08x\n", channel, size, align, flags); @@ -196,7 +199,7 @@ nouveau_gpuobj_new(drm_device_t *dev, int channel, int size, int align, if (channel >= 0) { if (channel > nouveau_fifo_number(dev)) return DRM_ERR(EINVAL); - chan = &dev_priv->fifos[channel]; + chan = dev_priv->fifos[channel]; } gpuobj = drm_calloc(1, sizeof(*gpuobj), DRM_MEM_DRIVER); @@ -230,6 +233,11 @@ nouveau_gpuobj_new(drm_device_t *dev, int channel, int size, int align, return DRM_ERR(EINVAL); } + if (!chan && (ret = engine->instmem.populate(dev, gpuobj, &size))) { + nouveau_gpuobj_del(dev, &gpuobj); + return ret; + } + /* Allocate a chunk of the PRAMIN aperture */ gpuobj->im_pramin = nouveau_mem_alloc_block(pramin, size, drm_order(align), @@ -240,14 +248,9 @@ nouveau_gpuobj_new(drm_device_t *dev, int channel, int size, int align, } gpuobj->im_pramin->flags = NOUVEAU_MEM_INSTANCE; - /* On NV50 the PRAMIN aperture is paged. When allocating from the - * global instmem heap, alloc and bind VRAM pages into the PRAMIN - * aperture. - */ - if (!chan && dev_priv->card_type >= NV_50) { - DRM_ERROR("back aperture with vram pages\n"); + if (!chan && (ret = engine->instmem.bind(dev, gpuobj))) { nouveau_gpuobj_del(dev, &gpuobj); - return DRM_ERR(EINVAL); + return ret; } if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { @@ -285,6 +288,7 @@ void nouveau_gpuobj_takedown(drm_device_t *dev) int nouveau_gpuobj_del(drm_device_t *dev, nouveau_gpuobj_t **pgpuobj) { drm_nouveau_private_t *dev_priv = dev->dev_private; + nouveau_engine_func_t *engine = &dev_priv->Engine; nouveau_gpuobj_t *gpuobj; DRM_DEBUG("gpuobj %p\n", pgpuobj ? *pgpuobj : NULL); @@ -298,6 +302,8 @@ int nouveau_gpuobj_del(drm_device_t *dev, nouveau_gpuobj_t **pgpuobj) return DRM_ERR(EINVAL); } + engine->instmem.clear(dev, gpuobj); + if (gpuobj->im_pramin) { if (gpuobj->flags & NVOBJ_FLAG_FAKE) drm_free(gpuobj->im_pramin, sizeof(*gpuobj->im_pramin), @@ -306,9 +312,6 @@ int nouveau_gpuobj_del(drm_device_t *dev, nouveau_gpuobj_t **pgpuobj) nouveau_mem_free_block(gpuobj->im_pramin); } - if (gpuobj->im_backing) - nouveau_mem_free(dev, gpuobj->im_backing); - if (gpuobj->next) gpuobj->next->prev = gpuobj->prev; if (gpuobj->prev) @@ -342,7 +345,7 @@ nouveau_gpuobj_instance_get(drm_device_t *dev, int channel, /* NV50 channel-local instance */ if (channel > 0) { - cpramin = dev_priv->fifos[channel].ramin->gpuobj; + cpramin = dev_priv->fifos[channel]->ramin->gpuobj; *inst = gpuobj->im_pramin->start - cpramin->im_pramin->start; return 0; } @@ -358,7 +361,7 @@ nouveau_gpuobj_instance_get(drm_device_t *dev, int channel, return 0; } else { /* ...from local heap */ - cpramin = dev_priv->fifos[gpuobj->im_channel].ramin->gpuobj; + cpramin = dev_priv->fifos[gpuobj->im_channel]->ramin->gpuobj; *inst = (cpramin->im_backing->start - dev_priv->fb_phys) + (gpuobj->im_pramin->start - cpramin->im_pramin->start); return 0; @@ -385,7 +388,7 @@ nouveau_gpuobj_ref_add(drm_device_t *dev, int channel, uint32_t handle, if (channel >= 0) { if (channel > nouveau_fifo_number(dev)) return DRM_ERR(EINVAL); - chan = &dev_priv->fifos[channel]; + chan = dev_priv->fifos[channel]; } else if (!ref_ret) return DRM_ERR(EINVAL); @@ -591,9 +594,10 @@ nouveau_gpuobj_dma_new(drm_device_t *dev, int channel, int class, INSTANCE_WR(*gpuobj, 2, frame | pte_flags); INSTANCE_WR(*gpuobj, 3, frame | pte_flags); } else { - nouveau_gpuobj_del(dev, gpuobj); - DRM_ERROR("stub\n"); - return DRM_ERR(EINVAL); + INSTANCE_WR(*gpuobj, 0, 0x00190000 | class); + INSTANCE_WR(*gpuobj, 1, offset + size - 1); + INSTANCE_WR(*gpuobj, 2, offset); + INSTANCE_WR(*gpuobj, 5, 0x00010000); } (*gpuobj)->engine = NVOBJ_ENGINE_SW; @@ -672,11 +676,9 @@ nouveau_gpuobj_gr_new(drm_device_t *dev, int channel, int class, } if (dev_priv->card_type >= NV_50) { - nouveau_gpuobj_del(dev, gpuobj); - DRM_ERROR("stub!\n"); - return DRM_ERR(EINVAL); - } - + INSTANCE_WR(*gpuobj, 0, class); + INSTANCE_WR(*gpuobj, 5, 0x00010000); + } else { switch (class) { case NV_CLASS_NULL: INSTANCE_WR(*gpuobj, 0, 0x00001030); @@ -696,6 +698,7 @@ nouveau_gpuobj_gr_new(drm_device_t *dev, int channel, int class, #endif } } + } (*gpuobj)->engine = NVOBJ_ENGINE_GR; (*gpuobj)->class = class; @@ -706,7 +709,7 @@ static int nouveau_gpuobj_channel_init_pramin(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; nouveau_gpuobj_t *pramin = NULL; int size, base, ret; @@ -719,7 +722,16 @@ nouveau_gpuobj_channel_init_pramin(drm_device_t *dev, int channel) /* PGRAPH context */ if (dev_priv->card_type == NV_50) { - /* RAMHT, RAMFC, PD, funny header thingo */ + /* Various fixed table thingos */ + size += 0x1400; /* mostly unknown stuff */ + size += 0x4000; /* vm pd */ + base = 0x6000; + /* RAMHT, not sure about setting size yet, 32KiB to be safe */ + size += 0x8000; + /* RAMFC */ + size += 0x1000; + /* PGRAPH context */ + size += 0x60000; } DRM_DEBUG("ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n", @@ -748,7 +760,7 @@ nouveau_gpuobj_channel_init(drm_device_t *dev, int channel, uint32_t vram_h, uint32_t tt_h) { drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; nouveau_gpuobj_t *vram = NULL, *tt = NULL; int ret; @@ -817,7 +829,7 @@ void nouveau_gpuobj_channel_takedown(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; nouveau_gpuobj_ref_t *ref; DRM_DEBUG("ch%d\n", channel); -- cgit v1.2.3 From 31e33813e8c1b085683e68524e680882368e59a9 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 9 Jul 2007 20:02:14 +1000 Subject: nouveau: Don't be so strict on dev_private; nouveau_gpuobj_t *cpramin; - if ((channel > 0) && gpuobj->im_channel != channel) { - DRM_ERROR("Channel mismatch: obj %d, ref %d\n", - gpuobj->im_channel, channel); - return DRM_ERR(EINVAL); - } - /* card_type < NV_50) { *inst = gpuobj->im_pramin->start; return 0; } + if ((channel > 0) && gpuobj->im_channel != channel) { + DRM_ERROR("Channel mismatch: obj %d, ref %d\n", + gpuobj->im_channel, channel); + return DRM_ERR(EINVAL); + } + /* NV50 channel-local instance */ if (channel > 0) { cpramin = dev_priv->fifos[channel]->ramin->gpuobj; -- cgit v1.2.3 From 694e1c5c3f768436651ddf95e11ab5a89ccc8ffa Mon Sep 17 00:00:00 2001 From: Arthur Huillet Date: Wed, 11 Jul 2007 02:35:10 +0200 Subject: Added support for PCIGART for PCI(E) cards. Bumped DRM interface patchlevel. --- shared-core/nouveau_object.c | 161 ++++++++++++++++++++++++++++++------------- 1 file changed, 112 insertions(+), 49 deletions(-) (limited to 'shared-core/nouveau_object.c') diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index bf811b4b..dcb29b40 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -515,6 +515,20 @@ nouveau_gpuobj_new_fake(drm_device_t *dev, uint32_t offset, uint32_t size, return 0; } + +static int +nouveau_gpuobj_class_instmem_size(drm_device_t *dev, int class) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + + /*XXX: dodgy hack for now */ + if (dev_priv->card_type >= NV_50) + return 24; + if (dev_priv->card_type >= NV_40) + return 32; + return 16; +} + /* DMA objects are used to reference a piece of memory in the framebuffer, PCI or AGP address space. Each object is 16 bytes big @@ -528,31 +542,20 @@ nouveau_gpuobj_new_fake(drm_device_t *dev, uint32_t offset, uint32_t size, 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP 31:20 dma adjust (bits 0-11 of the address) entry[1] - dma limit - entry[2] + dma limit (size of transfer) + entry[X] 1 0 readonly, 1 readwrite - 31:12 dma frame address (bits 12-31 of the address) + 31:12 dma frame address of the page (bits 12-31 of the address) + entry[N] + page table terminator, same value as the first pte, as does nvidia + rivatv uses 0xffffffff - Non linear page tables seem to need a list of frame addresses afterwards, - the rivatv project has some info on this. + Non linear page tables need a list of frame addresses afterwards, + the rivatv project has some info on this. The method below creates a DMA object in instance RAM and returns a handle to it that can be used to set up context objects. */ - -static int -nouveau_gpuobj_class_instmem_size(drm_device_t *dev, int class) -{ - drm_nouveau_private_t *dev_priv = dev->dev_private; - - /*XXX: dodgy hack for now */ - if (dev_priv->card_type >= NV_50) - return 24; - if (dev_priv->card_type >= NV_40) - return 32; - return 16; -} - int nouveau_gpuobj_dma_new(drm_device_t *dev, int channel, int class, uint64_t offset, uint64_t size, int access, int target, @@ -560,13 +563,28 @@ nouveau_gpuobj_dma_new(drm_device_t *dev, int channel, int class, { drm_nouveau_private_t *dev_priv = dev->dev_private; int ret; - + uint32_t is_scatter_gather = 0; + DRM_DEBUG("ch%d class=0x%04x offset=0x%llx size=0x%llx\n", channel, class, offset, size); DRM_DEBUG("access=%d target=%d\n", access, target); + switch (target) { + case NV_DMA_TARGET_AGP: + offset += dev_priv->agp_phys; + break; + case NV_DMA_TARGET_PCI_NONLINEAR: + /*assume the "offset" is a virtual memory address*/ + is_scatter_gather = 1; + /*put back the right value*/ + target = NV_DMA_TARGET_PCI; + break; + default: + break; + } + ret = nouveau_gpuobj_new(dev, channel, - nouveau_gpuobj_class_instmem_size(dev, class), + is_scatter_gather ? ((((size + PAGE_SIZE - 1) / PAGE_SIZE) << 2) + 12) : nouveau_gpuobj_class_instmem_size(dev, class), 16, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, gpuobj); @@ -577,22 +595,53 @@ nouveau_gpuobj_dma_new(drm_device_t *dev, int channel, int class, if (dev_priv->card_type < NV_50) { uint32_t frame, adjust, pte_flags = 0; - - if (target == NV_DMA_TARGET_AGP) - offset += dev_priv->agp_phys; - if (access != NV_DMA_ACCESS_RO) - pte_flags |= (1<<1); - frame = offset & ~0x00000fff; adjust = offset & 0x00000fff; - - INSTANCE_WR(*gpuobj, 0, ((1<<12) | (1<<13) | - (adjust << 20) | + if (access != NV_DMA_ACCESS_RO) + pte_flags |= (1<<1); + + if ( ! is_scatter_gather ) + { + frame = offset & ~0x00000fff; + + INSTANCE_WR(*gpuobj, 0, ((1<<12) | (1<<13) | + (adjust << 20) | (access << 14) | (target << 16) | class)); - INSTANCE_WR(*gpuobj, 1, size - 1); - INSTANCE_WR(*gpuobj, 2, frame | pte_flags); - INSTANCE_WR(*gpuobj, 3, frame | pte_flags); + INSTANCE_WR(*gpuobj, 1, size - 1); + INSTANCE_WR(*gpuobj, 2, frame | pte_flags); + INSTANCE_WR(*gpuobj, 3, frame | pte_flags); + } + else + { + uint32_t instance_offset; + uint32_t bus_addr; + size = (uint32_t) size; + + DRM_DEBUG("Creating PCI DMA object using virtual zone starting at 0x%08x, size %d\n", (uint32_t) offset, (uint32_t)size); + INSTANCE_WR(*gpuobj, 0, ((1<<12) | (0<<13) | + (adjust << 20) | + (access << 14) | + (target << 16) | + class)); + INSTANCE_WR(*gpuobj, 1, size-1); + + /*write starting at the third dword*/ + instance_offset = 2; + + /*for each PAGE, get its bus address, fill in the page table entry, and advance*/ + while ( size > 0 ) { + bus_addr = (uint32_t) page_address(vmalloc_to_page((void *) (uint32_t) offset)); + bus_addr |= (offset & ~PAGE_MASK); + bus_addr = virt_to_bus((void *)bus_addr); + frame = bus_addr & ~0x00000FFF; + INSTANCE_WR(*gpuobj, instance_offset, frame | pte_flags); + offset += PAGE_SIZE; + instance_offset ++; + size -= PAGE_SIZE; + } + + } } else { INSTANCE_WR(*gpuobj, 0, 0x00190000 | class); INSTANCE_WR(*gpuobj, 1, offset + size - 1); @@ -804,24 +853,38 @@ nouveau_gpuobj_channel_init(drm_device_t *dev, int channel, return ret; } - /* non-AGP unimplemented */ - if (dev_priv->agp_heap == NULL) - return 0; - - /* GART ctxdma */ - if ((ret = nouveau_gpuobj_dma_new(dev, channel, NV_CLASS_DMA_IN_MEMORY, - 0, dev_priv->agp_available_size, - NV_DMA_ACCESS_RW, NV_DMA_TARGET_AGP, - &tt))) { - DRM_ERROR("Error creating TT ctxdma: %d\n", ret); - return ret; + if (dev_priv->agp_heap) { + /* AGPGART ctxdma */ + if ((ret = nouveau_gpuobj_dma_new(dev, channel, NV_CLASS_DMA_IN_MEMORY, + 0, dev_priv->agp_available_size, + NV_DMA_ACCESS_RW, + NV_DMA_TARGET_AGP, &tt))) { + DRM_ERROR("Error creating AGP TT ctxdma: %d\n", DRM_ERR(ENOMEM)); + return DRM_ERR(ENOMEM); + } + + ret = nouveau_gpuobj_ref_add(dev, channel, tt_h, tt, NULL); + if (ret) { + DRM_ERROR("Error referencing AGP TT ctxdma: %d\n", ret); + return ret; + } } - - if ((ret = nouveau_gpuobj_ref_add(dev, channel, tt_h, tt, NULL))) { - DRM_ERROR("Error referencing TT ctxdma: %d\n", ret); - return ret; + else { + /*PCI*/ + if((ret = nouveau_gpuobj_dma_new(dev, channel, NV_CLASS_DMA_IN_MEMORY, + (unsigned int) dev->sg->virtual, dev->sg->pages * PAGE_SIZE, + NV_DMA_ACCESS_RW, + NV_DMA_TARGET_PCI_NONLINEAR, &tt))) { + DRM_ERROR("Error creating PCI TT ctxdma: %d\n", DRM_ERR(ENOMEM)); + return DRM_ERR(ENOMEM); + } + + ret = nouveau_gpuobj_ref_add(dev, channel, tt_h, tt, NULL); + if (ret) { + DRM_ERROR("Error referencing PCI TT ctxdma: %d\n", ret); + return ret; + } } - return 0; } -- cgit v1.2.3