diff options
author | Ian Romanick <idr@us.ibm.com> | 2007-07-12 15:28:17 -0700 |
---|---|---|
committer | Ian Romanick <idr@us.ibm.com> | 2007-07-12 15:28:17 -0700 |
commit | 5522136b7f01402ae02cbe35180e3d80f850a6b3 (patch) | |
tree | 275da77af4df8b332f17cb503a4b58b608de8620 /shared-core/nouveau_object.c | |
parent | 76ca1e858fb8e1a65ea49c0c62350d7ca91044a2 (diff) | |
parent | 851c950d988e5a47fa6add71427e5ef8d4dcf231 (diff) |
Merge branch 'master' into xgi-0-0-2
Diffstat (limited to 'shared-core/nouveau_object.c')
-rw-r--r-- | shared-core/nouveau_object.c | 1097 |
1 files changed, 748 insertions, 349 deletions
diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index e36568c6..16b38e95 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -35,79 +35,6 @@ #include "nouveau_drv.h" #include "nouveau_drm.h" -/* TODO - * - Check object class, deny unsafe objects (add card-specific versioning?) - * - Get rid of DMA object creation, this should be wrapped by MM routines. - */ - -/* Translate a RAMIN offset into a value the card understands, will be useful - * in the future when we can access more instance ram which isn't mapped into - * the PRAMIN aperture - */ -uint32_t -nouveau_chip_instance_get(drm_device_t *dev, struct mem_block *mem) -{ - uint32_t inst = (uint32_t)mem->start >> 4; - DRM_DEBUG("****** on-chip instance for 0x%016llx = 0x%08x\n", - mem->start, inst); - return inst; -} - -static void -nouveau_object_link(drm_device_t *dev, struct nouveau_object *obj) -{ - drm_nouveau_private_t *dev_priv=dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[obj->channel]; - - if (!chan->objs) { - chan->objs = obj; - return; - } - - obj->prev = NULL; - obj->next = chan->objs; - - chan->objs->prev = obj; - chan->objs = obj; -} - -static void -nouveau_object_unlink(drm_device_t *dev, struct nouveau_object *obj) -{ - drm_nouveau_private_t *dev_priv=dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[obj->channel]; - - if (obj->prev == NULL) { - if (obj->next) - obj->next->prev = NULL; - chan->objs = obj->next; - } else if (obj->next == NULL) { - if (obj->prev) - obj->prev->next = NULL; - } else { - obj->prev->next = obj->next; - obj->next->prev = obj->prev; - } -} - -static struct nouveau_object * -nouveau_object_handle_find(drm_device_t *dev, int channel, uint32_t handle) -{ - drm_nouveau_private_t *dev_priv=dev->dev_private; - struct nouveau_fifo *chan = &dev_priv->fifos[channel]; - struct nouveau_object *obj = chan->objs; - - DRM_DEBUG("Looking for handle 0x%08x\n", handle); - while (obj) { - if (obj->handle == handle) - return obj; - obj = obj->next; - } - - DRM_DEBUG("...couldn't find handle\n"); - return NULL; -} - /* NVidia uses context objects to drive drawing operations. Context objects can be selected into 8 subchannels in the FIFO, @@ -139,7 +66,7 @@ nouveau_object_handle_find(drm_device_t *dev, int channel, uint32_t handle) is given as: */ static uint32_t -nouveau_ht_handle_hash(drm_device_t *dev, int channel, uint32_t handle) +nouveau_ramht_hash_handle(drm_device_t *dev, int channel, uint32_t handle) { drm_nouveau_private_t *dev_priv=dev->dev_private; uint32_t hash = 0; @@ -149,120 +76,477 @@ nouveau_ht_handle_hash(drm_device_t *dev, int channel, uint32_t handle) hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1)); handle >>= dev_priv->ramht_bits; } - hash ^= channel << (dev_priv->ramht_bits - 4); - return hash << 3; + if (dev_priv->card_type < NV_50) + hash ^= channel << (dev_priv->ramht_bits - 4); + hash <<= 3; + + DRM_DEBUG("ch%d handle=0x%08x hash=0x%08x\n", channel, handle, hash); + return hash; +} + +static int +nouveau_ramht_entry_valid(drm_device_t *dev, nouveau_gpuobj_t *ramht, + uint32_t offset) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + uint32_t ctx = INSTANCE_RD(ramht, (offset + 4)/4); + + if (dev_priv->card_type < NV_40) + return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0); + return (ctx != 0); } static int -nouveau_ht_object_insert(drm_device_t* dev, int channel, uint32_t handle, - struct nouveau_object *obj) +nouveau_ramht_insert(drm_device_t* dev, nouveau_gpuobj_ref_t *ref) { drm_nouveau_private_t *dev_priv=dev->dev_private; - int ht_base = NV_RAMIN + dev_priv->ramht_offset; - int ht_end = ht_base + dev_priv->ramht_size; - int o_ofs, ofs; - - obj->handle = handle; - o_ofs = ofs = nouveau_ht_handle_hash(dev, channel, obj->handle); - - while (NV_READ(ht_base + ofs) || NV_READ(ht_base + ofs + 4)) { - ofs += 8; - if (ofs == dev_priv->ramht_size) ofs = 0; - if (ofs == o_ofs) { - DRM_ERROR("no free hash table entries\n"); - return 1; + struct nouveau_fifo *chan = dev_priv->fifos[ref->channel]; + nouveau_gpuobj_t *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; + nouveau_gpuobj_t *gpuobj = ref->gpuobj; + uint32_t ctx, co, ho; + + if (!ramht) { + DRM_ERROR("No hash table!\n"); + return DRM_ERR(EINVAL); + } + + if (dev_priv->card_type < NV_40) { + ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) | + (ref->channel << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | + (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT); + } else + if (dev_priv->card_type < NV_50) { + ctx = (ref->instance >> 4) | + (ref->channel << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | + (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); + } else { + ctx = (ref->instance >> 4) | + (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); + } + + co = ho = nouveau_ramht_hash_handle(dev, ref->channel, ref->handle); + do { + if (!nouveau_ramht_entry_valid(dev, ramht, co)) { + DRM_DEBUG("insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n", + ref->channel, co, ref->handle, ctx); + INSTANCE_WR(ramht, (co + 0)/4, ref->handle); + INSTANCE_WR(ramht, (co + 4)/4, ctx); + return 0; } + DRM_DEBUG("collision ch%d 0x%08x: h=0x%08x\n", + ref->channel, co, INSTANCE_RD(ramht, co/4)); + + co += 8; + if (co >= dev_priv->ramht_size) + co = 0; + } while (co != ho); + + DRM_ERROR("RAMHT space exhausted. ch=%d\n", ref->channel); + return DRM_ERR(ENOMEM); +} + +static void +nouveau_ramht_remove(drm_device_t* dev, nouveau_gpuobj_ref_t *ref) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = dev_priv->fifos[ref->channel]; + nouveau_gpuobj_t *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; + uint32_t co, ho; + + if (!ramht) { + DRM_ERROR("No hash table!\n"); + return; } - ofs += ht_base; - DRM_DEBUG("Channel %d - Handle 0x%08x at 0x%08x\n", - channel, obj->handle, ofs); + co = ho = nouveau_ramht_hash_handle(dev, ref->channel, ref->handle); + do { + if (nouveau_ramht_entry_valid(dev, ramht, co) && + (ref->handle == INSTANCE_RD(ramht, (co/4)))) { + DRM_DEBUG("remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n", + ref->channel, co, ref->handle, + INSTANCE_RD(ramht, (co + 4))); + INSTANCE_WR(ramht, (co + 0)/4, 0x00000000); + INSTANCE_WR(ramht, (co + 4)/4, 0x00000000); + return; + } + + co += 8; + if (co >= dev_priv->ramht_size) + co = 0; + } while (co != ho); - NV_WRITE(NV_RAMHT_HANDLE_OFFSET + ofs, obj->handle); - if (dev_priv->card_type >= NV_40) - NV_WRITE(NV_RAMHT_CONTEXT_OFFSET + ofs, - (channel << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | - (obj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT) | - nouveau_chip_instance_get(dev, obj->instance) - ); + DRM_ERROR("RAMHT entry not found. ch=%d, handle=0x%08x\n", + ref->channel, ref->handle); +} + +int +nouveau_gpuobj_new(drm_device_t *dev, int channel, int size, int align, + uint32_t flags, nouveau_gpuobj_t **gpuobj_ret) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + nouveau_engine_func_t *engine = &dev_priv->Engine; + struct nouveau_fifo *chan = NULL; + nouveau_gpuobj_t *gpuobj; + struct mem_block *pramin = NULL; + int ret; + + DRM_DEBUG("ch%d size=%d align=%d flags=0x%08x\n", + channel, size, align, flags); + + if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL) + return DRM_ERR(EINVAL); + + if (channel >= 0) { + if (channel > nouveau_fifo_number(dev)) + return DRM_ERR(EINVAL); + chan = dev_priv->fifos[channel]; + } + + gpuobj = drm_calloc(1, sizeof(*gpuobj), DRM_MEM_DRIVER); + if (!gpuobj) + return DRM_ERR(ENOMEM); + DRM_DEBUG("gpuobj %p\n", gpuobj); + gpuobj->flags = flags; + gpuobj->im_channel = channel; + + /* Choose between global instmem heap, and per-channel private + * instmem heap. On <NV50 allow requests for private instmem + * to be satisfied from global heap if no per-channel area + * available. + */ + if (chan) { + if (chan->ramin_heap) { + DRM_DEBUG("private heap\n"); + pramin = chan->ramin_heap; + } else + if (dev_priv->card_type < NV_50) { + DRM_DEBUG("global heap fallback\n"); + pramin = dev_priv->ramin_heap; + } + } else { + DRM_DEBUG("global heap\n"); + pramin = dev_priv->ramin_heap; + } + + if (!pramin) { + DRM_ERROR("No PRAMIN heap!\n"); + return DRM_ERR(EINVAL); + } + + if (!chan && (ret = engine->instmem.populate(dev, gpuobj, &size))) { + nouveau_gpuobj_del(dev, &gpuobj); + return ret; + } + + /* Allocate a chunk of the PRAMIN aperture */ + gpuobj->im_pramin = nouveau_mem_alloc_block(pramin, size, + drm_order(align), + (DRMFILE)-2); + if (!gpuobj->im_pramin) { + nouveau_gpuobj_del(dev, &gpuobj); + return DRM_ERR(ENOMEM); + } + gpuobj->im_pramin->flags = NOUVEAU_MEM_INSTANCE; + + if (!chan && (ret = engine->instmem.bind(dev, gpuobj))) { + nouveau_gpuobj_del(dev, &gpuobj); + return ret; + } + + if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { + int i; + + for (i = 0; i < gpuobj->im_pramin->size; i += 4) + INSTANCE_WR(gpuobj, i/4, 0); + } + + if (dev_priv->gpuobj_all) { + gpuobj->next = dev_priv->gpuobj_all; + gpuobj->next->prev = gpuobj; + } + dev_priv->gpuobj_all = gpuobj; + + *gpuobj_ret = gpuobj; + return 0; +} + +void nouveau_gpuobj_takedown(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + nouveau_gpuobj_t *gpuobj = NULL; + + DRM_DEBUG("\n"); + + while ((gpuobj = dev_priv->gpuobj_all)) { + DRM_ERROR("gpuobj %p still exists at takedown, refs=%d\n", + gpuobj, gpuobj->refcount); + gpuobj->refcount = 0; + nouveau_gpuobj_del(dev, &gpuobj); + } +} + +int nouveau_gpuobj_del(drm_device_t *dev, nouveau_gpuobj_t **pgpuobj) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + nouveau_engine_func_t *engine = &dev_priv->Engine; + nouveau_gpuobj_t *gpuobj; + + DRM_DEBUG("gpuobj %p\n", pgpuobj ? *pgpuobj : NULL); + + if (!dev_priv || !pgpuobj || !(*pgpuobj)) + return DRM_ERR(EINVAL); + gpuobj = *pgpuobj; + + if (gpuobj->refcount != 0) { + DRM_ERROR("gpuobj refcount is %d\n", gpuobj->refcount); + return DRM_ERR(EINVAL); + } + + engine->instmem.clear(dev, gpuobj); + + if (gpuobj->im_pramin) { + if (gpuobj->flags & NVOBJ_FLAG_FAKE) + drm_free(gpuobj->im_pramin, sizeof(*gpuobj->im_pramin), + DRM_MEM_DRIVER); + else + nouveau_mem_free_block(gpuobj->im_pramin); + } + + if (gpuobj->next) + gpuobj->next->prev = gpuobj->prev; + if (gpuobj->prev) + gpuobj->prev->next = gpuobj->next; else - NV_WRITE(NV_RAMHT_CONTEXT_OFFSET + ofs, - NV_RAMHT_CONTEXT_VALID | - (channel << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | - (obj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT) | - nouveau_chip_instance_get(dev, obj->instance) - ); - - obj->ht_loc = ofs; + dev_priv->gpuobj_all = gpuobj->next; + + *pgpuobj = NULL; + drm_free(gpuobj, sizeof(*gpuobj), DRM_MEM_DRIVER); return 0; } -static void nouveau_hash_table_remove(drm_device_t* dev, - struct nouveau_object *obj) +static int +nouveau_gpuobj_instance_get(drm_device_t *dev, int channel, + nouveau_gpuobj_t *gpuobj, uint32_t *inst) { drm_nouveau_private_t *dev_priv = dev->dev_private; + nouveau_gpuobj_t *cpramin; + + /* <NV50 use PRAMIN address everywhere */ + if (dev_priv->card_type < NV_50) { + *inst = gpuobj->im_pramin->start; + return 0; + } - DRM_DEBUG("Remove handle 0x%08x at 0x%08x from HT\n", - obj->handle, obj->ht_loc); - if (obj->ht_loc) { - DRM_DEBUG("... HT entry was: 0x%08x/0x%08x\n", - NV_READ(obj->ht_loc), NV_READ(obj->ht_loc+4)); - NV_WRITE(obj->ht_loc , 0x00000000); - NV_WRITE(obj->ht_loc+4, 0x00000000); + if ((channel > 0) && gpuobj->im_channel != channel) { + DRM_ERROR("Channel mismatch: obj %d, ref %d\n", + gpuobj->im_channel, channel); + return DRM_ERR(EINVAL); + } + + /* NV50 channel-local instance */ + if (channel > 0) { + cpramin = dev_priv->fifos[channel]->ramin->gpuobj; + *inst = gpuobj->im_pramin->start - cpramin->im_pramin->start; + return 0; } + + /* NV50 global (VRAM) instance */ + if (gpuobj->im_channel < 0) { + /* ...from global heap */ + if (!gpuobj->im_backing) { + DRM_ERROR("AII, no VRAM backing gpuobj\n"); + return DRM_ERR(EINVAL); + } + *inst = gpuobj->im_backing->start; + return 0; + } else { + /* ...from local heap */ + cpramin = dev_priv->fifos[gpuobj->im_channel]->ramin->gpuobj; + *inst = cpramin->im_backing->start + + (gpuobj->im_pramin->start - cpramin->im_pramin->start); + return 0; + } + + return DRM_ERR(EINVAL); } -static struct nouveau_object * -nouveau_object_instance_alloc(drm_device_t* dev, int channel) +int +nouveau_gpuobj_ref_add(drm_device_t *dev, int channel, uint32_t handle, + nouveau_gpuobj_t *gpuobj, nouveau_gpuobj_ref_t **ref_ret) { - drm_nouveau_private_t *dev_priv=dev->dev_private; - struct nouveau_object *obj; + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = NULL; + nouveau_gpuobj_ref_t *ref; + uint32_t instance; + int ret; + + DRM_DEBUG("ch%d h=0x%08x gpuobj=%p\n", channel, handle, gpuobj); + + if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL)) + return DRM_ERR(EINVAL); + + if (channel >= 0) { + if (channel > nouveau_fifo_number(dev)) + return DRM_ERR(EINVAL); + chan = dev_priv->fifos[channel]; + } else + if (!ref_ret) + return DRM_ERR(EINVAL); + + ret = nouveau_gpuobj_instance_get(dev, channel, gpuobj, &instance); + if (ret) + return ret; - /* Create object struct */ - obj = drm_calloc(1, sizeof(struct nouveau_object), DRM_MEM_DRIVER); - if (!obj) { - DRM_ERROR("couldn't alloc memory for object\n"); - return NULL; + ref = drm_calloc(1, sizeof(*ref), DRM_MEM_DRIVER); + if (!ref) + return DRM_ERR(ENOMEM); + ref->gpuobj = gpuobj; + ref->channel = channel; + ref->instance = instance; + + if (!ref_ret) { + ref->handle = handle; + + ret = nouveau_ramht_insert(dev, ref); + if (ret) { + drm_free(ref, sizeof(*ref), DRM_MEM_DRIVER); + return ret; + } + + ref->next = chan->ramht_refs; + chan->ramht_refs = ref; + } else { + ref->handle = ~0; + *ref_ret = ref; } - /* Allocate instance memory */ - obj->instance = nouveau_instmem_alloc(dev, - (dev_priv->card_type >= NV_40 ? 32 : 16), 4); - if (!obj->instance) { - DRM_ERROR("couldn't alloc RAMIN for object\n"); - drm_free(obj, sizeof(struct nouveau_object), DRM_MEM_DRIVER); - return NULL; + ref->gpuobj->refcount++; + return 0; +} + +int nouveau_gpuobj_ref_del(drm_device_t *dev, nouveau_gpuobj_ref_t **pref) +{ + nouveau_gpuobj_ref_t *ref; + + DRM_DEBUG("ref %p\n", pref ? *pref : NULL); + + if (!dev || !pref || *pref == NULL) + return DRM_ERR(EINVAL); + ref = *pref; + + if (ref->handle != ~0) + nouveau_ramht_remove(dev, ref); + + if (ref->gpuobj) { + ref->gpuobj->refcount--; + + if (ref->gpuobj->refcount == 0) { + if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS)) + nouveau_gpuobj_del(dev, &ref->gpuobj); + } } - /* Bind object to channel */ - obj->channel = channel; - obj->handle = ~0; - nouveau_object_link(dev, obj); + *pref = NULL; + drm_free(ref, sizeof(ref), DRM_MEM_DRIVER); + return 0; +} + +int +nouveau_gpuobj_new_ref(drm_device_t *dev, int oc, int rc, uint32_t handle, + int size, int align, uint32_t flags, + nouveau_gpuobj_ref_t **ref) +{ + nouveau_gpuobj_t *gpuobj = NULL; + int ret; - return obj; + if ((ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj))) + return ret; + + if ((ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref))) { + nouveau_gpuobj_del(dev, &gpuobj); + return ret; + } + + return 0; } -static void -nouveau_object_instance_free(drm_device_t *dev, struct nouveau_object *obj) +static int +nouveau_gpuobj_ref_find(drm_device_t *dev, int channel, uint32_t handle, + nouveau_gpuobj_ref_t **ref_ret) { - drm_nouveau_private_t *dev_priv=dev->dev_private; + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; + nouveau_gpuobj_ref_t *ref = chan->ramht_refs; + + while (ref) { + if (ref->handle == handle) { + if (ref_ret) + *ref_ret = ref; + return 0; + } + ref = ref->next; + } + + return DRM_ERR(EINVAL); +} + +int +nouveau_gpuobj_new_fake(drm_device_t *dev, uint32_t offset, uint32_t size, + uint32_t flags, nouveau_gpuobj_t **pgpuobj, + nouveau_gpuobj_ref_t **pref) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + nouveau_gpuobj_t *gpuobj = NULL; int i; - /* Unbind object from channel */ - nouveau_object_unlink(dev, obj); + DRM_DEBUG("offset=0x%08x size=0x%08x flags=0x%08x\n", + offset, size, flags); + + gpuobj = drm_calloc(1, sizeof(*gpuobj), DRM_MEM_DRIVER); + if (!gpuobj) + return DRM_ERR(ENOMEM); + DRM_DEBUG("gpuobj %p\n", gpuobj); + gpuobj->im_channel = -1; + gpuobj->flags = flags | NVOBJ_FLAG_FAKE; + + gpuobj->im_pramin = drm_calloc(1, sizeof(struct mem_block), + DRM_MEM_DRIVER); + if (!gpuobj->im_pramin) { + nouveau_gpuobj_del(dev, &gpuobj); + return DRM_ERR(ENOMEM); + } + gpuobj->im_pramin->start = offset; + gpuobj->im_pramin->size = size; - /* Clean RAMIN entry */ - DRM_DEBUG("Instance entry for 0x%08x" - "(engine %d, class 0x%x) before destroy:\n", - obj->handle, obj->engine, obj->class); - for (i=0; i<(obj->instance->size/4); i++) { - DRM_DEBUG(" +0x%02x: 0x%08x\n", (i*4), - INSTANCE_RD(obj->instance, i)); - INSTANCE_WR(obj->instance, i, 0x00000000); + if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { + for (i = 0; i < gpuobj->im_pramin->size; i += 4) + INSTANCE_WR(gpuobj, i/4, 0); } - /* Free RAMIN */ - nouveau_instmem_free(dev, obj->instance); + if (pref) { + if ((i = nouveau_gpuobj_ref_add(dev, -1, 0, gpuobj, pref))) { + nouveau_gpuobj_del(dev, &gpuobj); + return i; + } + } + + if (pgpuobj) + *pgpuobj = gpuobj; + return 0; +} + + +static int +nouveau_gpuobj_class_instmem_size(drm_device_t *dev, int class) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + + /*XXX: dodgy hack for now */ + if (dev_priv->card_type >= NV_50) + return 24; + if (dev_priv->card_type >= NV_40) + return 32; + return 16; } /* @@ -278,76 +562,147 @@ nouveau_object_instance_free(drm_device_t *dev, struct nouveau_object *obj) 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP 31:20 dma adjust (bits 0-11 of the address) entry[1] - dma limit - entry[2] + dma limit (size of transfer) + entry[X] 1 0 readonly, 1 readwrite - 31:12 dma frame address (bits 12-31 of the address) + 31:12 dma frame address of the page (bits 12-31 of the address) + entry[N] + page table terminator, same value as the first pte, as does nvidia + rivatv uses 0xffffffff - Non linear page tables seem to need a list of frame addresses afterwards, - the rivatv project has some info on this. + Non linear page tables need a list of frame addresses afterwards, + the rivatv project has some info on this. The method below creates a DMA object in instance RAM and returns a handle to it that can be used to set up context objects. */ - -struct nouveau_object * -nouveau_object_dma_create(drm_device_t* dev, int channel, int class, - uint32_t offset, uint32_t size, - int access, int target) +int +nouveau_gpuobj_dma_new(drm_device_t *dev, int channel, int class, + uint64_t offset, uint64_t size, int access, int target, + nouveau_gpuobj_t **gpuobj) { - drm_nouveau_private_t *dev_priv=dev->dev_private; - struct nouveau_object *obj; - uint32_t frame, adjust; - uint32_t pte_flags = 0; - - DRM_DEBUG("offset:0x%08x, size:0x%08x, target:%d, access:%d\n", - offset, size, target, access); + drm_nouveau_private_t *dev_priv = dev->dev_private; + int ret; + uint32_t is_scatter_gather = 0; + + DRM_DEBUG("ch%d class=0x%04x offset=0x%llx size=0x%llx\n", + channel, class, offset, size); + DRM_DEBUG("access=%d target=%d\n", access, target); switch (target) { - case NV_DMA_TARGET_AGP: - offset += dev_priv->agp_phys; - break; - default: - break; - } - - switch (access) { - case NV_DMA_ACCESS_RO: - break; - case NV_DMA_ACCESS_WO: - case NV_DMA_ACCESS_RW: - pte_flags |= (1 << 1); - break; - default: - DRM_ERROR("invalid access mode=%d\n", access); - return NULL; + case NV_DMA_TARGET_AGP: + offset += dev_priv->agp_phys; + break; + case NV_DMA_TARGET_PCI_NONLINEAR: + /*assume the "offset" is a virtual memory address*/ + is_scatter_gather = 1; + /*put back the right value*/ + target = NV_DMA_TARGET_PCI; + break; + default: + break; + } + + ret = nouveau_gpuobj_new(dev, channel, + is_scatter_gather ? ((((size + PAGE_SIZE - 1) / PAGE_SIZE) << 2) + 12) : nouveau_gpuobj_class_instmem_size(dev, class), + 16, + NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, + gpuobj); + if (ret) { + DRM_ERROR("Error creating gpuobj: %d\n", ret); + return ret; } - frame = offset & ~0x00000FFF; - adjust = offset & 0x00000FFF; - - obj = nouveau_object_instance_alloc(dev, channel); - if (!obj) { - DRM_ERROR("couldn't allocate DMA object\n"); - return obj; + if (dev_priv->card_type < NV_50) { + uint32_t frame, adjust, pte_flags = 0; + adjust = offset & 0x00000fff; + if (access != NV_DMA_ACCESS_RO) + pte_flags |= (1<<1); + + if ( ! is_scatter_gather ) + { + frame = offset & ~0x00000fff; + + INSTANCE_WR(*gpuobj, 0, ((1<<12) | (1<<13) | + (adjust << 20) | + (access << 14) | + (target << 16) | + class)); + INSTANCE_WR(*gpuobj, 1, size - 1); + INSTANCE_WR(*gpuobj, 2, frame | pte_flags); + INSTANCE_WR(*gpuobj, 3, frame | pte_flags); + } + else + { + uint32_t instance_offset; + uint64_t bus_addr; + size = (uint32_t) size; + + DRM_DEBUG("Creating PCI DMA object using virtual zone starting at %#llx, size %d\n", offset, (uint32_t)size); + INSTANCE_WR(*gpuobj, 0, ((1<<12) | (0<<13) | + (adjust << 20) | + (access << 14) | + (target << 16) | + class)); + INSTANCE_WR(*gpuobj, 1, size-1); + + offset += dev->sg->virtual; + + /*write starting at the third dword*/ + instance_offset = 2; + + /*for each PAGE, get its bus address, fill in the page table entry, and advance*/ + while ( size > 0 ) { + bus_addr = vmalloc_to_page(offset); + if ( ! bus_addr ) + { + DRM_ERROR("Couldn't map virtual address %#llx to a page number\n", offset); + nouveau_gpuobj_del(dev, gpuobj); + return DRM_ERR(ENOMEM); + } + bus_addr = (uint64_t) page_address(bus_addr); + if ( ! bus_addr ) + { + DRM_ERROR("Couldn't find page address for address %#llx\n", offset); + nouveau_gpuobj_del(dev, gpuobj); + return DRM_ERR(ENOMEM); + } + bus_addr |= (offset & ~PAGE_MASK); + bus_addr = virt_to_bus((void *)bus_addr); + if ( ! bus_addr ) + { + DRM_ERROR("Couldn't get bus address for %#llx\n", offset); + nouveau_gpuobj_del(dev, gpuobj); + return DRM_ERR(ENOMEM); + } + + /*if ( bus_addr >= 1 << 32 ) + { + DRM_ERROR("Bus address %#llx is over 32 bits, Nvidia cards cannot address it !\n", bus_addr); + nouveau_gpuobj_del(dev, gpuobj); + return DRM_ERR(EINVAL); + }*/ + + frame = (uint32_t) bus_addr & ~0x00000FFF; + INSTANCE_WR(*gpuobj, instance_offset, frame | pte_flags); + offset += PAGE_SIZE; + instance_offset ++; + size -= PAGE_SIZE; + } + + } + } else { + INSTANCE_WR(*gpuobj, 0, 0x00190000 | class); + INSTANCE_WR(*gpuobj, 1, offset + size - 1); + INSTANCE_WR(*gpuobj, 2, offset); + INSTANCE_WR(*gpuobj, 5, 0x00010000); } - obj->engine = 0; - obj->class = class; - - INSTANCE_WR(obj->instance, 0, ((1<<12) | (1<<13) | - (adjust << 20) | - (access << 14) | - (target << 16) | - class)); - INSTANCE_WR(obj->instance, 1, size-1); - INSTANCE_WR(obj->instance, 2, frame | pte_flags); - INSTANCE_WR(obj->instance, 3, frame | pte_flags); - - return obj; + (*gpuobj)->engine = NVOBJ_ENGINE_SW; + (*gpuobj)->class = class; + return 0; } - /* Context objects in the instance RAM have the following structure. * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes. @@ -399,177 +754,221 @@ nouveau_object_dma_create(drm_device_t* dev, int channel, int class, entry[5]: set to 0? */ -struct nouveau_object * -nouveau_object_gr_create(drm_device_t* dev, int channel, int class) +int +nouveau_gpuobj_gr_new(drm_device_t *dev, int channel, int class, + nouveau_gpuobj_t **gpuobj) { - drm_nouveau_private_t *dev_priv=dev->dev_private; - struct nouveau_object *obj; - - DRM_DEBUG("class=%x\n", class); - - obj = nouveau_object_instance_alloc(dev, channel); - if (!obj) { - DRM_ERROR("couldn't allocate context object\n"); - return obj; + drm_nouveau_private_t *dev_priv = dev->dev_private; + int ret; + + DRM_DEBUG("ch%d class=0x%04x\n", channel, class); + + ret = nouveau_gpuobj_new(dev, channel, + nouveau_gpuobj_class_instmem_size(dev, class), + 16, + NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, + gpuobj); + if (ret) { + DRM_ERROR("Error creating gpuobj: %d\n", ret); + return ret; } - obj->engine = 1; - obj->class = class; - + if (dev_priv->card_type >= NV_50) { + INSTANCE_WR(*gpuobj, 0, class); + INSTANCE_WR(*gpuobj, 5, 0x00010000); + } else { switch (class) { case NV_CLASS_NULL: - INSTANCE_WR(obj->instance, 0, 0x00001030); - INSTANCE_WR(obj->instance, 1, 0xFFFFFFFF); - INSTANCE_WR(obj->instance, 2, 0x00000000); - INSTANCE_WR(obj->instance, 2, 0x00000000); + INSTANCE_WR(*gpuobj, 0, 0x00001030); + INSTANCE_WR(*gpuobj, 1, 0xFFFFFFFF); break; default: if (dev_priv->card_type >= NV_40) { - INSTANCE_WR(obj->instance, 0, obj->class); - INSTANCE_WR(obj->instance, 1, 0x00000000); + INSTANCE_WR(*gpuobj, 0, class); #ifdef __BIG_ENDIAN - INSTANCE_WR(obj->instance, 2, 0x01000000); -#else - INSTANCE_WR(obj->instance, 2, 0x00000000); + INSTANCE_WR(*gpuobj, 2, 0x01000000); #endif - INSTANCE_WR(obj->instance, 3, 0x00000000); - INSTANCE_WR(obj->instance, 4, 0x00000000); - INSTANCE_WR(obj->instance, 5, 0x00000000); - INSTANCE_WR(obj->instance, 6, 0x00000000); - INSTANCE_WR(obj->instance, 7, 0x00000000); } else { #ifdef __BIG_ENDIAN - INSTANCE_WR(obj->instance, 0, obj->class | 0x00080000); + INSTANCE_WR(*gpuobj, 0, class | 0x00080000); #else - INSTANCE_WR(obj->instance, 0, obj->class); + INSTANCE_WR(*gpuobj, 0, class); #endif - INSTANCE_WR(obj->instance, 1, 0x00000000); - INSTANCE_WR(obj->instance, 2, 0x00000000); - INSTANCE_WR(obj->instance, 3, 0x00000000); } } + } - return obj; -} - -void -nouveau_object_free(drm_device_t *dev, struct nouveau_object *obj) -{ - nouveau_object_instance_free(dev, obj); - if (obj->handle != ~0) - nouveau_hash_table_remove(dev, obj); - drm_free(obj, sizeof(struct nouveau_object), DRM_MEM_DRIVER); + (*gpuobj)->engine = NVOBJ_ENGINE_GR; + (*gpuobj)->class = class; + return 0; } -void nouveau_object_cleanup(drm_device_t *dev, int channel) +static int +nouveau_gpuobj_channel_init_pramin(drm_device_t *dev, int channel) { - drm_nouveau_private_t *dev_priv=dev->dev_private; + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; + nouveau_gpuobj_t *pramin = NULL; + int size, base, ret; + + DRM_DEBUG("ch%d\n", channel); + + /* Base amount for object storage (4KiB enough?) */ + size = 0x1000; + base = 0; + + /* PGRAPH context */ + + if (dev_priv->card_type == NV_50) { + /* Various fixed table thingos */ + size += 0x1400; /* mostly unknown stuff */ + size += 0x4000; /* vm pd */ + base = 0x6000; + /* RAMHT, not sure about setting size yet, 32KiB to be safe */ + size += 0x8000; + /* RAMFC */ + size += 0x1000; + /* PGRAPH context */ + size += 0x60000; + } - while (dev_priv->fifos[channel].objs) { - nouveau_object_free(dev, dev_priv->fifos[channel].objs); + DRM_DEBUG("ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n", + channel, size, base); + ret = nouveau_gpuobj_new_ref(dev, -1, -1, 0, size, 0x1000, 0, + &chan->ramin); + if (ret) { + DRM_ERROR("Error allocating channel PRAMIN: %d\n", ret); + return ret; + } + pramin = chan->ramin->gpuobj; + + ret = nouveau_mem_init_heap(&chan->ramin_heap, + pramin->im_pramin->start + base, size); + if (ret) { + DRM_ERROR("Error creating PRAMIN heap: %d\n", ret); + nouveau_gpuobj_ref_del(dev, &chan->ramin); + return ret; } + + return 0; } -int nouveau_ioctl_object_init(DRM_IOCTL_ARGS) +int +nouveau_gpuobj_channel_init(drm_device_t *dev, int channel, + uint32_t vram_h, uint32_t tt_h) { - DRM_DEVICE; - drm_nouveau_object_init_t init; - struct nouveau_object *obj; - - DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_object_init_t __user *) - data, sizeof(init)); - - if (!nouveau_fifo_owner(dev, filp, init.channel)) { - DRM_ERROR("pid %d doesn't own channel %d\n", - DRM_CURRENTPID, init.channel); - return DRM_ERR(EINVAL); + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = dev_priv->fifos[channel]; + nouveau_gpuobj_t *vram = NULL, *tt = NULL; + int ret; + + DRM_DEBUG("ch%d vram=0x%08x tt=0x%08x\n", channel, vram_h, tt_h); + + /* Reserve a block of PRAMIN for the channel + *XXX: maybe on <NV50 too at some point + */ + if (0 || dev_priv->card_type == NV_50) { + ret = nouveau_gpuobj_channel_init_pramin(dev, channel); + if (ret) + return ret; } - //FIXME: check args, only allow trusted objects to be created - - if (nouveau_object_handle_find(dev, init.channel, init.handle)) { - DRM_ERROR("Channel %d: handle 0x%08x already exists\n", - init.channel, init.handle); - return DRM_ERR(EINVAL); + /* RAMHT */ + if (dev_priv->card_type < NV_50) { + ret = nouveau_gpuobj_ref_add(dev, -1, 0, dev_priv->ramht, + &chan->ramht); + if (ret) + return ret; + } else { + ret = nouveau_gpuobj_new_ref(dev, channel, channel, 0, + 0x8000, 16, + NVOBJ_FLAG_ZERO_ALLOC, + &chan->ramht); + if (ret) + return ret; } - obj = nouveau_object_gr_create(dev, init.channel, init.class); - if (!obj) - return DRM_ERR(ENOMEM); + /* VRAM ctxdma */ + if ((ret = nouveau_gpuobj_dma_new(dev, channel, NV_CLASS_DMA_IN_MEMORY, + 0, dev_priv->fb_available_size, + NV_DMA_ACCESS_RW, + NV_DMA_TARGET_VIDMEM, &vram))) { + DRM_ERROR("Error creating VRAM ctxdma: %d\n", ret); + return ret; + } - if (nouveau_ht_object_insert(dev, init.channel, init.handle, obj)) { - nouveau_object_free(dev, obj); - return DRM_ERR(ENOMEM); + if ((ret = nouveau_gpuobj_ref_add(dev, channel, vram_h, vram, NULL))) { + DRM_ERROR("Error referencing VRAM ctxdma: %d\n", ret); + return ret; } + if (dev_priv->agp_heap) { + /* AGPGART ctxdma */ + if ((ret = nouveau_gpuobj_dma_new(dev, channel, NV_CLASS_DMA_IN_MEMORY, + 0, dev_priv->agp_available_size, + NV_DMA_ACCESS_RW, + NV_DMA_TARGET_AGP, &tt))) { + DRM_ERROR("Error creating AGP TT ctxdma: %d\n", DRM_ERR(ENOMEM)); + return DRM_ERR(ENOMEM); + } + + ret = nouveau_gpuobj_ref_add(dev, channel, tt_h, tt, NULL); + if (ret) { + DRM_ERROR("Error referencing AGP TT ctxdma: %d\n", ret); + return ret; + } + } + else { + if (dev_priv -> card_type >= NV_50 ) return 0; /*no PCIGART for NV50*/ + + /*PCI*/ + if((ret = nouveau_gpuobj_dma_new(dev, channel, NV_CLASS_DMA_IN_MEMORY, + 0, dev->sg->pages * PAGE_SIZE, + NV_DMA_ACCESS_RW, + NV_DMA_TARGET_PCI_NONLINEAR, &tt))) { + DRM_ERROR("Error creating PCI TT ctxdma: %d\n", DRM_ERR(ENOMEM)); + return 0; //this is noncritical + } + + ret = nouveau_gpuobj_ref_add(dev, channel, tt_h, tt, NULL); + if (ret) { + DRM_ERROR("Error referencing PCI TT ctxdma: %d\n", ret); + return ret; + } + } return 0; } -static int -nouveau_dma_object_check_access(drm_device_t *dev, - drm_nouveau_dma_object_init_t *init) +void +nouveau_gpuobj_channel_takedown(drm_device_t *dev, int channel) { drm_nouveau_private_t *dev_priv = dev->dev_private; - uint64_t limit; - - /* Check for known DMA object classes */ - switch (init->class) { - case NV_CLASS_DMA_IN_MEMORY: - case NV_CLASS_DMA_FROM_MEMORY: - case NV_CLASS_DMA_TO_MEMORY: - break; - default: - DRM_ERROR("invalid class = 0x%x\n", init->class); - return DRM_ERR(EPERM); - } + struct nouveau_fifo *chan = dev_priv->fifos[channel]; + nouveau_gpuobj_ref_t *ref; - /* Check access mode, and translate to NV_DMA_ACCESS_* */ - switch (init->access) { - case NOUVEAU_MEM_ACCESS_RO: - init->access = NV_DMA_ACCESS_RO; - break; - case NOUVEAU_MEM_ACCESS_WO: - init->access = NV_DMA_ACCESS_WO; - break; - case NOUVEAU_MEM_ACCESS_RW: - init->access = NV_DMA_ACCESS_RW; - break; - default: - DRM_ERROR("invalid access mode = %d\n", init->access); - return DRM_ERR(EPERM); - } + DRM_DEBUG("ch%d\n", channel); - /* Check that request is within the allowed limits of "target" */ - switch (init->target) { - case NOUVEAU_MEM_FB: - limit = dev_priv->fb_available_size; - init->target = NV_DMA_TARGET_VIDMEM; - break; - case NOUVEAU_MEM_AGP: - limit = dev_priv->agp_available_size; - init->target = NV_DMA_TARGET_AGP; - break; - default: - DRM_ERROR("invalid target = 0x%x\n", init->target); - return DRM_ERR(EPERM); + while ((ref = chan->ramht_refs)) { + chan->ramht_refs = ref->next; + nouveau_gpuobj_ref_del(dev, &ref); } + nouveau_gpuobj_ref_del(dev, &chan->ramht); - if ((init->offset > limit) || (init->offset + init->size) > limit) { - DRM_ERROR("access out of allowed range (%d,0x%08x,0x%08x)\n", - init->target, init->offset, init->size); - return DRM_ERR(EPERM); - } + if (chan->ramin_heap) + nouveau_mem_takedown(&chan->ramin_heap); + if (chan->ramin) + nouveau_gpuobj_ref_del(dev, &chan->ramin); - return 0; } -int nouveau_ioctl_dma_object_init(DRM_IOCTL_ARGS) +int nouveau_ioctl_grobj_alloc(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_nouveau_dma_object_init_t init; - struct nouveau_object *obj; + drm_nouveau_grobj_alloc_t init; + nouveau_gpuobj_t *gr = NULL; + int ret; - DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_dma_object_init_t __user *) + DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_grobj_alloc_t __user *) data, sizeof(init)); if (!nouveau_fifo_owner(dev, filp, init.channel)) { @@ -578,25 +977,25 @@ int nouveau_ioctl_dma_object_init(DRM_IOCTL_ARGS) return DRM_ERR(EINVAL); } - if (nouveau_dma_object_check_access(dev, &init)) - return DRM_ERR(EPERM); - - if (nouveau_object_handle_find(dev, init.channel, init.handle)) { - DRM_ERROR("Channel %d: handle 0x%08x already exists\n", - init.channel, init.handle); + //FIXME: check args, only allow trusted objects to be created + + if (init.handle == ~0) return DRM_ERR(EINVAL); - } + if (nouveau_gpuobj_ref_find(dev, init.channel, init.handle, NULL) == 0) + return DRM_ERR(EEXIST); - obj = nouveau_object_dma_create(dev, init.channel, init.class, - init.offset, init.size, - init.access, init.target); - if (!obj) - return DRM_ERR(ENOMEM); + if ((ret = nouveau_gpuobj_gr_new(dev, init.channel, init.class, &gr))) { + DRM_ERROR("Error creating gr object: %d (%d/0x%08x)\n", + ret, init.channel, init.handle); + return ret; + } - obj->handle = init.handle; - if (nouveau_ht_object_insert(dev, init.channel, init.handle, obj)) { - nouveau_object_free(dev, obj); - return DRM_ERR(ENOMEM); + if ((ret = nouveau_gpuobj_ref_add(dev, init.channel, init.handle, + gr, NULL))) { + DRM_ERROR("Error referencing gr object: %d (%d/0x%08x\n)", + ret, init.channel, init.handle); + nouveau_gpuobj_del(dev, &gr); + return ret; } return 0; |