diff options
author | Ben Skeggs <skeggsb@gmail.com> | 2007-07-15 17:18:15 +1000 |
---|---|---|
committer | Ben Skeggs <skeggsb@gmail.com> | 2007-07-17 13:51:14 +1000 |
commit | ec67c2def9af16bf9252d6742aec815b817f135a (patch) | |
tree | 90a1645b3fe9cd2b7c7ea36b79b609aaf2f2aa4c | |
parent | 70a8a60a3e81c18f9c6485102cb226c340c3cd73 (diff) |
nouveau: G8x PCIEGART
Actually a NV04-NV50 ttm backend for both PCI and PCIEGART, but PCIGART
support for G8X using the current mm has been hacked on top of it.
-rw-r--r-- | linux-core/Makefile.kernel | 1 | ||||
-rw-r--r-- | linux-core/nouveau_sgdma.c | 318 | ||||
-rw-r--r-- | shared-core/nouveau_drv.h | 35 | ||||
-rw-r--r-- | shared-core/nouveau_fifo.c | 31 | ||||
-rw-r--r-- | shared-core/nouveau_mem.c | 187 | ||||
-rw-r--r-- | shared-core/nouveau_notifier.c | 3 | ||||
-rw-r--r-- | shared-core/nouveau_object.c | 136 | ||||
-rw-r--r-- | shared-core/nouveau_state.c | 9 | ||||
-rw-r--r-- | shared-core/nv50_graph.c | 2 |
9 files changed, 588 insertions, 134 deletions
diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index be2641c8..5aa589cd 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -22,6 +22,7 @@ i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \ i915_buffer.o nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ nouveau_object.o nouveau_irq.o nouveau_notifier.o \ + nouveau_sgdma.o \ nv04_timer.o \ nv04_mc.o nv40_mc.o nv50_mc.o \ nv04_fb.o nv10_fb.o nv40_fb.o \ diff --git a/linux-core/nouveau_sgdma.c b/linux-core/nouveau_sgdma.c new file mode 100644 index 00000000..a65317cd --- /dev/null +++ b/linux-core/nouveau_sgdma.c @@ -0,0 +1,318 @@ +#include "drmP.h" +#include "nouveau_drv.h" + +#define NV_CTXDMA_PAGE_SHIFT 12 +#define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT) +#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1) + +struct nouveau_sgdma_be { + struct drm_ttm_backend backend; + struct drm_device *dev; + + int pages; + int pages_populated; + dma_addr_t *pagelist; + int is_bound; + + unsigned int pte_start; +}; + +static int +nouveau_sgdma_needs_ub_cache_adjust(struct drm_ttm_backend *be) +{ + return ((be->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1); +} + +static int +nouveau_sgdma_populate(struct drm_ttm_backend *be, unsigned long num_pages, + struct page **pages) +{ + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + int p, d, o; + + DRM_DEBUG("num_pages = %ld\n", num_pages); + + if (nvbe->pagelist) + return DRM_ERR(EINVAL); + nvbe->pages = (num_pages << PAGE_SHIFT) >> NV_CTXDMA_PAGE_SHIFT; + nvbe->pagelist = drm_alloc(nvbe->pages*sizeof(dma_addr_t), + DRM_MEM_PAGES); + + nvbe->pages_populated = d = 0; + for (p = 0; p < num_pages; p++) { + for (o = 0; o < PAGE_SIZE; o += NV_CTXDMA_PAGE_SIZE) { + nvbe->pagelist[d] = pci_map_page(nvbe->dev->pdev, + pages[p], o, + NV_CTXDMA_PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(nvbe->pagelist[d])) { + be->func->clear(be); + DRM_ERROR("pci_map_page failed\n"); + return DRM_ERR(EINVAL); + } + nvbe->pages_populated = ++d; + } + } + + return 0; +} + +static void +nouveau_sgdma_clear(struct drm_ttm_backend *be) +{ + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + int d; + + DRM_DEBUG("\n"); + + if (nvbe && nvbe->pagelist) { + if (nvbe->is_bound) + be->func->unbind(be); + + for (d = 0; d < nvbe->pages_populated; d--) { + pci_unmap_page(nvbe->dev->pdev, nvbe->pagelist[d], + NV_CTXDMA_PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + } + drm_free(nvbe->pagelist, nvbe->pages*sizeof(dma_addr_t), + DRM_MEM_PAGES); + } +} + +static int +nouveau_sgdma_bind(struct drm_ttm_backend *be, unsigned long pg_start, + int cached) +{ + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; + struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; + uint64_t offset = (pg_start << PAGE_SHIFT); + uint32_t i; + + DRM_DEBUG("pg=0x%lx (0x%llx), cached=%d\n", pg_start, offset, cached); + + if (offset & NV_CTXDMA_PAGE_MASK) + return DRM_ERR(EINVAL); + nvbe->pte_start = (offset >> NV_CTXDMA_PAGE_SHIFT); + if (dev_priv->card_type < NV_50) + nvbe->pte_start += 2; /* skip ctxdma header */ + + for (i = nvbe->pte_start; i < nvbe->pte_start + nvbe->pages; i++) { + uint64_t pteval = nvbe->pagelist[i - nvbe->pte_start]; + + if (pteval & NV_CTXDMA_PAGE_MASK) { + DRM_ERROR("Bad pteval 0x%llx\n", pteval); + return DRM_ERR(EINVAL); + } + + if (dev_priv->card_type < NV_50) { + INSTANCE_WR(gpuobj, i, pteval | 3); + } else { + INSTANCE_WR(gpuobj, (i<<1)+0, pteval | 0x21); + INSTANCE_WR(gpuobj, (i<<1)+1, 0x00000000); + } + } + + nvbe->is_bound = 1; + return 0; +} + +static int +nouveau_sgdma_unbind(struct drm_ttm_backend *be) +{ + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; + + DRM_DEBUG("\n"); + + if (nvbe->is_bound) { + struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; + unsigned int pte; + + pte = nvbe->pte_start; + while (pte < (nvbe->pte_start + nvbe->pages)) { + uint64_t pteval = dev_priv->gart_info.sg_dummy_bus; + + if (dev_priv->card_type < NV_50) { + INSTANCE_WR(gpuobj, pte, pteval | 3); + } else { + INSTANCE_WR(gpuobj, (pte<<1)+0, 0x00000010); + INSTANCE_WR(gpuobj, (pte<<1)+1, 0x00000004); + } + + pte++; + } + + nvbe->is_bound = 0; + } + + return 0; +} + +static void +nouveau_sgdma_destroy(struct drm_ttm_backend *be) +{ + DRM_DEBUG("\n"); + if (be) { + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + if (nvbe) { + if (nvbe->pagelist) + be->func->clear(be); + drm_ctl_free(nvbe, sizeof(*nvbe), DRM_MEM_TTM); + } + } +} + +static struct drm_ttm_backend_func nouveau_sgdma_backend = { + .needs_ub_cache_adjust = nouveau_sgdma_needs_ub_cache_adjust, + .populate = nouveau_sgdma_populate, + .clear = nouveau_sgdma_clear, + .bind = nouveau_sgdma_bind, + .unbind = nouveau_sgdma_unbind, + .destroy = nouveau_sgdma_destroy +}; + +struct drm_ttm_backend * +nouveau_sgdma_init_ttm(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_sgdma_be *nvbe; + + if (!dev_priv->gart_info.sg_ctxdma) + return NULL; + + nvbe = drm_ctl_calloc(1, sizeof(*nvbe), DRM_MEM_TTM); + if (!nvbe) + return NULL; + + nvbe->dev = dev; + + nvbe->backend.func = &nouveau_sgdma_backend; + nvbe->backend.mem_type = DRM_BO_MEM_TT; + + return &nvbe->backend; +} + +int +nouveau_sgdma_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *gpuobj = NULL; + uint32_t aper_size, obj_size; + int i, ret; + + if (dev_priv->card_type < NV_50) { + aper_size = (64 * 1024 * 1024); + obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4; + obj_size += 8; /* ctxdma header */ + } else { + /* 1 entire VM page table */ + aper_size = (512 * 1024 * 1024); + obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8; + } + + if ((ret = nouveau_gpuobj_new(dev, -1, obj_size, 16, + NVOBJ_FLAG_ALLOW_NO_REFS | + NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_ZERO_FREE, &gpuobj))) { + DRM_ERROR("Error creating sgdma object: %d\n", ret); + return ret; + } + + if (dev_priv->card_type < NV_50) { + dev_priv->gart_info.sg_dummy_page = + alloc_page(GFP_KERNEL|__GFP_DMA32); + SetPageLocked(dev_priv->gart_info.sg_dummy_page); + dev_priv->gart_info.sg_dummy_bus = + pci_map_page(dev->pdev, + dev_priv->gart_info.sg_dummy_page, 0, + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + + /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and + * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE + * on those cards? */ + INSTANCE_WR(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY | + (1 << 12) /* PT present */ | + (0 << 13) /* PT *not* linear */ | + (NV_DMA_ACCESS_RW << 14) | + (NV_DMA_TARGET_PCI << 16)); + INSTANCE_WR(gpuobj, 1, aper_size - 1); + for (i=2; i<2+(aper_size>>12); i++) { + INSTANCE_WR(gpuobj, i, + dev_priv->gart_info.sg_dummy_bus | 3); + } + } else { + for (i=0; i<obj_size; i+=8) { + INSTANCE_WR(gpuobj, (i+0)/4, 0); //x00000010); + INSTANCE_WR(gpuobj, (i+4)/4, 0); //0x00000004); + } + } + + dev_priv->gart_info.type = NOUVEAU_GART_SGDMA; + dev_priv->gart_info.aper_base = 0; + dev_priv->gart_info.aper_size = aper_size; + dev_priv->gart_info.sg_ctxdma = gpuobj; + return 0; +} + +void +nouveau_sgdma_takedown(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->gart_info.sg_dummy_page) { + pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus, + NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + unlock_page(dev_priv->gart_info.sg_dummy_page); + __free_page(dev_priv->gart_info.sg_dummy_page); + dev_priv->gart_info.sg_dummy_page = NULL; + dev_priv->gart_info.sg_dummy_bus = 0; + } + + nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma); +} + +int +nouveau_sgdma_nottm_hack_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_ttm_backend *be; + struct drm_scatter_gather sgreq; + int ret; + + dev_priv->gart_info.sg_be = nouveau_sgdma_init_ttm(dev); + if (!dev_priv->gart_info.sg_be) + return DRM_ERR(ENOMEM); + be = dev_priv->gart_info.sg_be; + + /* Hack the aperture size down to the amount of system memory + * we're going to bind into it. + */ + if (dev_priv->gart_info.aper_size > 32*1024*1024) + dev_priv->gart_info.aper_size = 32*1024*1024; + + sgreq.size = dev_priv->gart_info.aper_size; + if ((ret = drm_sg_alloc(dev, &sgreq))) { + DRM_ERROR("drm_sg_alloc failed: %d\n", ret); + return ret; + } + dev_priv->gart_info.sg_handle = sgreq.handle; + + if ((ret = be->func->populate(be, dev->sg->pages, dev->sg->pagelist))) { + DRM_ERROR("failed populate: %d\n", ret); + return ret; + } + + if ((ret = be->func->bind(be, 0, 0))) { + DRM_ERROR("failed bind: %d\n", ret); + return ret; + } + + return 0; +} + +void +nouveau_sgdma_nottm_hack_takedown(struct drm_device *dev) +{ +} + diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index 4fa979e6..f68304c9 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -118,6 +118,10 @@ struct nouveau_fifo struct nouveau_gpuobj_ref *ramin_grctx; uint32_t pgraph_ctx [340]; /* XXX dynamic alloc ? */ + /* NV50 VM */ + struct nouveau_gpuobj *vm_pd; + struct nouveau_gpuobj_ref *vm_gart_pt; + /* Objects */ struct nouveau_gpuobj_ref *ramin; /* Private instmem */ struct mem_block *ramin_heap; /* Private PRAMIN heap */ @@ -220,8 +224,24 @@ struct drm_nouveau_private { /* base physical adresses */ uint64_t fb_phys; uint64_t fb_available_size; - uint64_t agp_phys; - uint64_t agp_available_size; + + struct { + enum { + NOUVEAU_GART_NONE = 0, + NOUVEAU_GART_AGP, + NOUVEAU_GART_SGDMA + } type; + uint64_t aper_base; + uint64_t aper_size; + + struct nouveau_gpuobj *sg_ctxdma; + struct page *sg_dummy_page; + dma_addr_t sg_dummy_bus; + + /* nottm hack */ + struct drm_ttm_backend *sg_be; + unsigned long sg_handle; + } gart_info; /* the mtrr covering the FB */ int fb_mtrr; @@ -307,6 +327,10 @@ extern int nouveau_gpuobj_dma_new(struct drm_device *, int channel, int class, uint64_t offset, uint64_t size, int access, int target, struct nouveau_gpuobj **); +extern int nouveau_gpuobj_gart_dma_new(struct drm_device *, int channel, + uint64_t offset, uint64_t size, + int access, struct nouveau_gpuobj **, + uint32_t *o_ret); extern int nouveau_gpuobj_gr_new(struct drm_device *, int channel, int class, struct nouveau_gpuobj **); extern int nouveau_ioctl_grobj_alloc(DRM_IOCTL_ARGS); @@ -317,6 +341,13 @@ extern void nouveau_irq_preinstall(struct drm_device*); extern void nouveau_irq_postinstall(struct drm_device*); extern void nouveau_irq_uninstall(struct drm_device*); +/* nouveau_sgdma.c */ +extern int nouveau_sgdma_init(struct drm_device *); +extern void nouveau_sgdma_takedown(struct drm_device *); +extern struct drm_ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *); +extern int nouveau_sgdma_nottm_hack_init(struct drm_device *); +extern void nouveau_sgdma_nottm_hack_takedown(struct drm_device *); + /* nv04_fb.c */ extern int nv04_fb_init(struct drm_device *dev); extern void nv04_fb_takedown(struct drm_device *dev); diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index 56c25a6e..230c8298 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -211,24 +211,27 @@ nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) } if (cb->flags & NOUVEAU_MEM_AGP) { - DRM_DEBUG("Creating CB in AGP memory\n"); + ret = nouveau_gpuobj_gart_dma_new(dev, channel, + cb->start, cb->size, + NV_DMA_ACCESS_RO, + &pushbuf, + &chan->pushbuf_base); + } else + if (cb->flags & NOUVEAU_MEM_PCI) { ret = nouveau_gpuobj_dma_new(dev, channel, - NV_CLASS_DMA_IN_MEMORY, - cb->start, cb->size, - NV_DMA_ACCESS_RO, NV_DMA_TARGET_AGP, &pushbuf); - } else if ( cb->flags & NOUVEAU_MEM_PCI) { - DRM_DEBUG("Creating CB in PCI memory\n"); - ret = nouveau_gpuobj_dma_new(dev, channel, - NV_CLASS_DMA_IN_MEMORY, - cb->start, - cb->size, - NV_DMA_ACCESS_RO, NV_DMA_TARGET_PCI_NONLINEAR, &pushbuf); + NV_CLASS_DMA_IN_MEMORY, + cb->start, cb->size, + NV_DMA_ACCESS_RO, + NV_DMA_TARGET_PCI_NONLINEAR, + &pushbuf); + chan->pushbuf_base = 0; } else if (dev_priv->card_type != NV_04) { ret = nouveau_gpuobj_dma_new (dev, channel, NV_CLASS_DMA_IN_MEMORY, cb->start, cb->size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_VIDMEM, &pushbuf); + chan->pushbuf_base = 0; } else { /* NV04 cmdbuf hack, from original ddx.. not sure of it's * exact reason for existing :) PCI access to cmdbuf in @@ -239,6 +242,7 @@ nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) cb->start + drm_get_resource_start(dev, 1), cb->size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_PCI, &pushbuf); + chan->pushbuf_base = 0; } if (ret) { @@ -250,11 +254,12 @@ nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) if ((ret = nouveau_gpuobj_ref_add(dev, channel, 0, pushbuf, &chan->pushbuf))) { DRM_ERROR("Error referencing push buffer ctxdma: %d\n", ret); + if (pushbuf != dev_priv->gart_info.sg_ctxdma) + nouveau_gpuobj_del(dev, &pushbuf); return ret; } - dev_priv->fifos[channel]->pushbuf_base = 0; - dev_priv->fifos[channel]->pushbuf_mem = cb; + chan->pushbuf_mem = cb; return 0; } diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index e5906867..7a923e17 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -209,12 +209,11 @@ void nouveau_mem_takedown(struct mem_block **heap) void nouveau_mem_close(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + nouveau_mem_takedown(&dev_priv->agp_heap); nouveau_mem_takedown(&dev_priv->fb_heap); - if ( dev_priv->pci_heap ) - { + if (dev_priv->pci_heap) nouveau_mem_takedown(&dev_priv->pci_heap); - } } /* returns the amount of FB ram in bytes */ @@ -282,93 +281,68 @@ uint64_t nouveau_mem_fb_amount(struct drm_device *dev) return 0; } - - -int nouveau_mem_init(struct drm_device *dev) +static int +nouveau_mem_init_agp(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t fb_size; - struct drm_scatter_gather sgreq; - dev_priv->agp_phys=0; - dev_priv->fb_phys=0; - sgreq . size = 4 << 20; //4MB of PCI scatter-gather zone - - /* init AGP */ - dev_priv->agp_heap=NULL; - if (drm_device_is_agp(dev)) - { - int err; - struct drm_agp_info info; - struct drm_agp_mode mode; - struct drm_agp_buffer agp_req; - struct drm_agp_binding bind_req; - - err = drm_agp_acquire(dev); - if (err) { - DRM_ERROR("Unable to acquire AGP: %d\n", err); - goto no_agp; - } - - err = drm_agp_info(dev, &info); - if (err) { - DRM_ERROR("Unable to get AGP info: %d\n", err); - goto no_agp; - } - - /* see agp.h for the AGPSTAT_* modes available */ - mode.mode = info.mode; - err = drm_agp_enable(dev, mode); - if (err) { - DRM_ERROR("Unable to enable AGP: %d\n", err); - goto no_agp; - } - - agp_req.size = info.aperture_size; - agp_req.type = 0; - err = drm_agp_alloc(dev, &agp_req); - if (err) { - DRM_ERROR("Unable to alloc AGP: %d\n", err); - goto no_agp; - } + struct drm_agp_info info; + struct drm_agp_mode mode; + struct drm_agp_buffer agp_req; + struct drm_agp_binding bind_req; + int ret; + + ret = drm_agp_acquire(dev); + if (ret) { + DRM_ERROR("Unable to acquire AGP: %d\n", ret); + return ret; + } - bind_req.handle = agp_req.handle; - bind_req.offset = 0; - err = drm_agp_bind(dev, &bind_req); - if (err) { - DRM_ERROR("Unable to bind AGP: %d\n", err); - goto no_agp; - } + ret = drm_agp_info(dev, &info); + if (ret) { + DRM_ERROR("Unable to get AGP info: %d\n", ret); + return ret; + } - if (nouveau_mem_init_heap(&dev_priv->agp_heap, - 0, info.aperture_size)) - goto no_agp; + /* see agp.h for the AGPSTAT_* modes available */ + mode.mode = info.mode; + ret = drm_agp_enable(dev, mode); + if (ret) { + DRM_ERROR("Unable to enable AGP: %d\n", ret); + return ret; + } - dev_priv->agp_phys = info.aperture_base; - dev_priv->agp_available_size = info.aperture_size; - goto have_agp; + agp_req.size = info.aperture_size; + agp_req.type = 0; + ret = drm_agp_alloc(dev, &agp_req); + if (ret) { + DRM_ERROR("Unable to alloc AGP: %d\n", ret); + return ret; } -no_agp: + bind_req.handle = agp_req.handle; + bind_req.offset = 0; + ret = drm_agp_bind(dev, &bind_req); + if (ret) { + DRM_ERROR("Unable to bind AGP: %d\n", ret); + return ret; + } - if ( dev_priv->card_type >= NV_50 ) goto no_pci; + dev_priv->gart_info.type = NOUVEAU_GART_AGP; + dev_priv->gart_info.aper_base = info.aperture_base; + dev_priv->gart_info.aper_size = info.aperture_size; + return 0; +} - dev_priv->pci_heap = NULL; - DRM_DEBUG("Allocating sg memory for PCI DMA\n"); - if ( drm_sg_alloc(dev, &sgreq) ) - { - DRM_ERROR("Unable to allocate 4MB of scatter-gather pages for PCI DMA!"); - goto no_pci; - } +int nouveau_mem_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t fb_size; + int ret = 0; - if ( nouveau_mem_init_heap(&dev_priv->pci_heap, 0, - dev->sg->pages * PAGE_SIZE)) - { - DRM_ERROR("Unable to initialize pci_heap!"); - goto no_pci; - } + dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL; + dev_priv->fb_phys = 0; + dev_priv->gart_info.type = NOUVEAU_GART_NONE; -no_pci: -have_agp: /* setup a mtrr over the FB */ dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1), nouveau_mem_fb_amount(dev), @@ -399,6 +373,54 @@ have_agp: dev_priv->fb_nomap_heap=NULL; } + /* Init AGP / NV50 PCIEGART */ + if (drm_device_is_agp(dev) && dev->agp) { + if ((ret = nouveau_mem_init_agp(dev))) + DRM_ERROR("Error initialising AGP: %d\n", ret); + } + + /*Note: this is *not* just NV50 code, but only used on NV50 for now */ + if (dev_priv->gart_info.type == NOUVEAU_GART_NONE && + dev_priv->card_type >= NV_50) { + ret = nouveau_sgdma_init(dev); + if (!ret) { + ret = nouveau_sgdma_nottm_hack_init(dev); + if (ret) + nouveau_sgdma_takedown(dev); + } + + if (ret) + DRM_ERROR("Error initialising SG DMA: %d\n", ret); + } + + if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) { + if (nouveau_mem_init_heap(&dev_priv->agp_heap, + 0, dev_priv->gart_info.aper_size)) { + if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) { + nouveau_sgdma_nottm_hack_takedown(dev); + nouveau_sgdma_takedown(dev); + } + } + } + + /* NV04-NV40 PCIEGART */ + if (!dev_priv->agp_heap && dev_priv->card_type < NV_50) { + struct drm_scatter_gather sgreq; + + DRM_DEBUG("Allocating sg memory for PCI DMA\n"); + sgreq.size = 4 << 20; //4MB of PCI scatter-gather zone + + if (drm_sg_alloc(dev, &sgreq)) { + DRM_ERROR("Unable to allocate 4MB of scatter-gather" + " pages for PCI DMA!"); + } else { + if (nouveau_mem_init_heap(&dev_priv->pci_heap, 0, + dev->sg->pages * PAGE_SIZE)) { + DRM_ERROR("Unable to initialize pci_heap!"); + } + } + } + return 0; } @@ -473,9 +495,14 @@ alloc_ok: int ret = 0; block->flags|=NOUVEAU_MEM_MAPPED; - if (type == NOUVEAU_MEM_AGP) + if (type == NOUVEAU_MEM_AGP) { + if (dev_priv->gart_info.type != NOUVEAU_GART_SGDMA) ret = drm_addmap(dev, block->start, block->size, _DRM_AGP, 0, &block->map); + else + ret = drm_addmap(dev, block->start, block->size, + _DRM_SCATTER_GATHER, 0, &block->map); + } else if (type == NOUVEAU_MEM_FB) ret = drm_addmap(dev, block->start + dev_priv->fb_phys, block->size, _DRM_FRAME_BUFFER, diff --git a/shared-core/nouveau_notifier.c b/shared-core/nouveau_notifier.c index 36dba654..238e3c8b 100644 --- a/shared-core/nouveau_notifier.c +++ b/shared-core/nouveau_notifier.c @@ -37,7 +37,8 @@ nouveau_notifier_init_channel(struct drm_device *dev, int channel, DRMFILE filp) int flags, ret; /*TODO: PCI notifier blocks */ - if (dev_priv->agp_heap) + if (dev_priv->agp_heap && + dev_priv->gart_info.type != NOUVEAU_GART_SGDMA) flags = NOUVEAU_MEM_AGP | NOUVEAU_MEM_FB_ACCEPTABLE; else flags = NOUVEAU_MEM_FB; diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index ea0edb08..f0025d7a 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -596,7 +596,7 @@ nouveau_gpuobj_dma_new(struct drm_device *dev, int channel, int class, switch (target) { case NV_DMA_TARGET_AGP: - offset += dev_priv->agp_phys; + offset += dev_priv->gart_info.aper_base; break; case NV_DMA_TARGET_PCI_NONLINEAR: /*assume the "offset" is a virtual memory address*/ @@ -689,10 +689,20 @@ nouveau_gpuobj_dma_new(struct drm_device *dev, int channel, int class, } } } else { - INSTANCE_WR(*gpuobj, 0, 0x00190000 | class); + uint32_t flags0, flags5; + + if (target == NV_DMA_TARGET_VIDMEM) { + flags0 = 0x00190000; + flags5 = 0x00010000; + } else { + flags0 = 0x7fc00000; + flags5 = 0x00080000; + } + + INSTANCE_WR(*gpuobj, 0, flags0 | class); INSTANCE_WR(*gpuobj, 1, offset + size - 1); INSTANCE_WR(*gpuobj, 2, offset); - INSTANCE_WR(*gpuobj, 5, 0x00010000); + INSTANCE_WR(*gpuobj, 5, flags5); } (*gpuobj)->engine = NVOBJ_ENGINE_SW; @@ -700,6 +710,42 @@ nouveau_gpuobj_dma_new(struct drm_device *dev, int channel, int class, return 0; } +int +nouveau_gpuobj_gart_dma_new(struct drm_device *dev, int channel, + uint64_t offset, uint64_t size, int access, + struct nouveau_gpuobj **gpuobj, + uint32_t *o_ret) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int ret; + + if (dev_priv->gart_info.type == NOUVEAU_GART_AGP || + (dev_priv->card_type >= NV_50 && + dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) { + ret = nouveau_gpuobj_dma_new(dev, channel, + NV_CLASS_DMA_IN_MEMORY, + offset, size, access, + NV_DMA_TARGET_AGP, gpuobj); + if (o_ret) + *o_ret = 0; + } else + if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) { + *gpuobj = dev_priv->gart_info.sg_ctxdma; + if (offset & ~0xffffffffULL) { + DRM_ERROR("obj offset exceeds 32-bits\n"); + return DRM_ERR(EINVAL); + } + if (o_ret) + *o_ret = (uint32_t)offset; + ret = (*gpuobj != NULL) ? 0 : DRM_ERR(EINVAL); + } else { + DRM_ERROR("Invalid GART type %d\n", dev_priv->gart_info.type); + return DRM_ERR(EINVAL); + } + + return ret; +} + /* Context objects in the instance RAM have the following structure. * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes. @@ -857,7 +903,7 @@ nouveau_gpuobj_channel_init(struct drm_device *dev, int channel, struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; struct nouveau_gpuobj *vram = NULL, *tt = NULL; - int ret; + int ret, i; DRM_DEBUG("ch%d vram=0x%08x tt=0x%08x\n", channel, vram_h, tt_h); @@ -870,6 +916,29 @@ nouveau_gpuobj_channel_init(struct drm_device *dev, int channel, return ret; } + /* NV50 VM, point offset 0-512MiB at shared PCIEGART table */ + if (dev_priv->card_type >= NV_50) { + uint32_t vm_offset; + + vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200; + vm_offset += chan->ramin->gpuobj->im_pramin->start; + if ((ret = nouveau_gpuobj_new_fake(dev, vm_offset, 0x4000, + 0, &chan->vm_pd, NULL))) + return ret; + for (i=0; i<0x4000; i+=8) { + INSTANCE_WR(chan->vm_pd, (i+0)/4, 0x00000000); + INSTANCE_WR(chan->vm_pd, (i+4)/4, 0xdeadcafe); + } + + if ((ret = nouveau_gpuobj_ref_add(dev, -1, 0, + dev_priv->gart_info.sg_ctxdma, + &chan->vm_gart_pt))) + return ret; + INSTANCE_WR(chan->vm_pd, (0+0)/4, + chan->vm_gart_pt->instance | 0x03); + INSTANCE_WR(chan->vm_pd, (0+4)/4, 0x00000000); + } + /* RAMHT */ if (dev_priv->card_type < NV_50) { ret = nouveau_gpuobj_ref_add(dev, -1, 0, dev_priv->ramht, @@ -899,40 +968,34 @@ nouveau_gpuobj_channel_init(struct drm_device *dev, int channel, return ret; } - if (dev_priv->agp_heap) { - /* AGPGART ctxdma */ - if ((ret = nouveau_gpuobj_dma_new(dev, channel, NV_CLASS_DMA_IN_MEMORY, - 0, dev_priv->agp_available_size, - NV_DMA_ACCESS_RW, - NV_DMA_TARGET_AGP, &tt))) { - DRM_ERROR("Error creating AGP TT ctxdma: %d\n", DRM_ERR(ENOMEM)); - return DRM_ERR(ENOMEM); - } - - ret = nouveau_gpuobj_ref_add(dev, channel, tt_h, tt, NULL); - if (ret) { - DRM_ERROR("Error referencing AGP TT ctxdma: %d\n", ret); - return ret; - } + /* TT memory ctxdma */ + if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) { + ret = nouveau_gpuobj_gart_dma_new(dev, channel, 0, + dev_priv->gart_info.aper_size, + NV_DMA_ACCESS_RW, &tt, NULL); + } else + if (dev_priv->pci_heap) { + ret = nouveau_gpuobj_dma_new(dev, channel, + NV_CLASS_DMA_IN_MEMORY, + 0, dev->sg->pages * PAGE_SIZE, + NV_DMA_ACCESS_RW, + NV_DMA_TARGET_PCI_NONLINEAR, &tt); + } else { + DRM_ERROR("Invalid GART type %d\n", dev_priv->gart_info.type); + ret = DRM_ERR(EINVAL); } - else if ( dev_priv->pci_heap) { - if (dev_priv -> card_type >= NV_50 ) return 0; /*no PCIGART for NV50*/ - /*PCI*/ - if((ret = nouveau_gpuobj_dma_new(dev, channel, NV_CLASS_DMA_IN_MEMORY, - 0, dev->sg->pages * PAGE_SIZE, - NV_DMA_ACCESS_RW, - NV_DMA_TARGET_PCI_NONLINEAR, &tt))) { - DRM_ERROR("Error creating PCI TT ctxdma: %d\n", DRM_ERR(ENOMEM)); - return 0; //this is noncritical - } - - ret = nouveau_gpuobj_ref_add(dev, channel, tt_h, tt, NULL); - if (ret) { - DRM_ERROR("Error referencing PCI TT ctxdma: %d\n", ret); - return ret; - } + if (ret) { + DRM_ERROR("Error creating TT ctxdma: %d\n", ret); + return ret; } + + ret = nouveau_gpuobj_ref_add(dev, channel, tt_h, tt, NULL); + if (ret) { + DRM_ERROR("Error referencing TT ctxdma: %d\n", ret); + return ret; + } + return 0; } @@ -951,6 +1014,9 @@ nouveau_gpuobj_channel_takedown(struct drm_device *dev, int channel) } nouveau_gpuobj_ref_del(dev, &chan->ramht); + nouveau_gpuobj_del(dev, &chan->vm_pd); + nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt); + if (chan->ramin_heap) nouveau_mem_takedown(&chan->ramin_heap); if (chan->ramin) diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index 69e9c221..4e3b39dd 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -332,7 +332,12 @@ static void nouveau_card_takedown(struct drm_device *dev) engine->fb.takedown(dev); engine->timer.takedown(dev); engine->mc.takedown(dev); + + nouveau_sgdma_nottm_hack_takedown(dev); + nouveau_sgdma_takedown(dev); + nouveau_gpuobj_takedown(dev); + nouveau_mem_close(dev); engine->instmem.takedown(dev); @@ -442,7 +447,7 @@ int nouveau_ioctl_getparam(DRM_IOCTL_ARGS) getparam.value=dev_priv->fb_phys; break; case NOUVEAU_GETPARAM_AGP_PHYSICAL: - getparam.value=dev_priv->agp_phys; + getparam.value=dev_priv->gart_info.aper_base; break; case NOUVEAU_GETPARAM_PCI_PHYSICAL: if ( dev -> sg ) @@ -457,7 +462,7 @@ int nouveau_ioctl_getparam(DRM_IOCTL_ARGS) getparam.value=dev_priv->fb_available_size; break; case NOUVEAU_GETPARAM_AGP_SIZE: - getparam.value=dev_priv->agp_available_size; + getparam.value=dev_priv->gart_info.aper_size; break; default: DRM_ERROR("unknown parameter %lld\n", getparam.param); diff --git a/shared-core/nv50_graph.c b/shared-core/nv50_graph.c index 54fe498b..6a04c158 100644 --- a/shared-core/nv50_graph.c +++ b/shared-core/nv50_graph.c @@ -271,7 +271,7 @@ nv50_graph_load_context(struct drm_device *dev, int channel) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo *chan = dev_priv->fifos[channel]; uint32_t inst = ((chan->ramin->instance >> 12) | (1<<31)); - int ret; + int ret; (void)ret; DRM_DEBUG("ch%d\n", channel); |