From e5d3c7f260d18168eec755c73f01ac617390d96c Mon Sep 17 00:00:00 2001 From: Michel Daenzer Date: Fri, 16 May 2003 23:41:27 +0000 Subject: Support AGP bridges where the AGP aperture can't be accessed directly by the CPU (David Mosberger, Benjamin Herrenschmidt, myself, Paul Mackerras, Jeff Wiedemeier) --- linux-core/drmP.h | 23 +++--- linux-core/drm_bufs.c | 4 +- linux-core/drm_drv.c | 2 +- linux-core/drm_memory.h | 165 ++++++++++++++++++++++++++++++++++++++++-- linux-core/drm_memory_debug.h | 12 +-- linux-core/drm_vm.c | 41 +++++++---- linux-core/i810_dma.c | 8 +- linux-core/i830_dma.c | 8 +- 8 files changed, 215 insertions(+), 48 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 43d3776f..1f0ef128 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -251,16 +251,16 @@ static inline struct page * vmalloc_to_page(void * vmalloc_addr) if (len > DRM_PROC_LIMIT) { ret; *eof = 1; return len - offset; } /* Mapping helper macros */ -#define DRM_IOREMAP(map) \ - (map)->handle = DRM(ioremap)( (map)->offset, (map)->size ) +#define DRM_IOREMAP(map, dev) \ + (map)->handle = DRM(ioremap)( (map)->offset, (map)->size, (dev) ) -#define DRM_IOREMAP_NOCACHE(map) \ - (map)->handle = DRM(ioremap_nocache)((map)->offset, (map)->size) +#define DRM_IOREMAP_NOCACHE(map, dev) \ + (map)->handle = DRM(ioremap_nocache)((map)->offset, (map)->size, (dev)) -#define DRM_IOREMAPFREE(map) \ - do { \ - if ( (map)->handle && (map)->size ) \ - DRM(ioremapfree)( (map)->handle, (map)->size ); \ +#define DRM_IOREMAPFREE(map, dev) \ + do { \ + if ( (map)->handle && (map)->size ) \ + DRM(ioremapfree)( (map)->handle, (map)->size, (dev) ); \ } while (0) #define DRM_FIND_MAP(_map, _o) \ @@ -682,9 +682,10 @@ extern void DRM(free)(void *pt, size_t size, int area); extern unsigned long DRM(alloc_pages)(int order, int area); extern void DRM(free_pages)(unsigned long address, int order, int area); -extern void *DRM(ioremap)(unsigned long offset, unsigned long size); -extern void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size); -extern void DRM(ioremapfree)(void *pt, unsigned long size); +extern void *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev); +extern void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size, + drm_device_t *dev); +extern void DRM(ioremapfree)(void *pt, unsigned long size, drm_device_t *dev); #if __REALLY_HAVE_AGP extern agp_memory *DRM(alloc_agp)(int pages, u32 type); diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c index 84a9a611..6943eb3d 100644 --- a/linux-core/drm_bufs.c +++ b/linux-core/drm_bufs.c @@ -124,7 +124,7 @@ int DRM(addmap)( struct inode *inode, struct file *filp, MTRR_TYPE_WRCOMB, 1 ); } #endif - map->handle = DRM(ioremap)( map->offset, map->size ); + map->handle = DRM(ioremap)( map->offset, map->size, dev ); break; case _DRM_SHM: @@ -246,7 +246,7 @@ int DRM(rmmap)(struct inode *inode, struct file *filp, DRM_DEBUG("mtrr_del = %d\n", retcode); } #endif - DRM(ioremapfree)(map->handle, map->size); + DRM(ioremapfree)(map->handle, map->size, dev); break; case _DRM_SHM: vfree(map->handle); diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 2d1a9448..0a4390d3 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -454,7 +454,7 @@ static int DRM(takedown)( drm_device_t *dev ) DRM_DEBUG( "mtrr_del=%d\n", retcode ); } #endif - DRM(ioremapfree)( map->handle, map->size ); + DRM(ioremapfree)( map->handle, map->size, dev ); break; case _DRM_SHM: vfree(map->handle); diff --git a/linux-core/drm_memory.h b/linux-core/drm_memory.h index ae5737f1..5f111459 100644 --- a/linux-core/drm_memory.h +++ b/linux-core/drm_memory.h @@ -39,6 +39,159 @@ */ #define DEBUG_MEMORY 0 +/* Need the 4-argument version of vmap(). */ +#if __REALLY_HAVE_AGP && defined(VMAP_4_ARGS) + +#include + +#ifdef HAVE_PAGE_AGP +#include +#else +# ifdef __powerpc__ +# define PAGE_AGP __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE) +# else +# define PAGE_AGP PAGE_KERNEL +# endif +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) +# include +#else +# define pte_offset_kernel(dir, address) pte_offset(dir, address) +# define pte_pfn(pte) (pte_page(pte) - mem_map) +# define pfn_to_page(pfn) (mem_map + (pfn)) +# define flush_tlb_kernel_range(s,e) flush_tlb_all() +#endif + +/* + * Find the drm_map that covers the range [offset, offset+size). + */ +static inline drm_map_t * +drm_lookup_map (unsigned long offset, unsigned long size, drm_device_t *dev) +{ + struct list_head *list; + drm_map_list_t *r_list; + drm_map_t *map; + + list_for_each(list, &dev->maplist->head) { + r_list = (drm_map_list_t *) list; + map = r_list->map; + if (!map) + continue; + if (map->offset <= offset && (offset + size) <= (map->offset + map->size)) + return map; + } + return NULL; +} + +static inline void * +agp_remap (unsigned long offset, unsigned long size, drm_device_t *dev) +{ + unsigned long *phys_addr_map, i, num_pages = PAGE_ALIGN(size) / PAGE_SIZE; + struct drm_agp_mem *agpmem; + struct page **page_map; + void *addr; + + size = PAGE_ALIGN(size); + +#ifdef __alpha__ + offset -= dev->hose->mem_space->start; +#endif + + for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) + if (agpmem->bound <= offset + && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >= (offset + size)) + break; + if (!agpmem) + return NULL; + + /* + * OK, we're mapping AGP space on a chipset/platform on which memory accesses by + * the CPU do not get remapped by the GART. We fix this by using the kernel's + * page-table instead (that's probably faster anyhow...). + */ + /* note: use vmalloc() because num_pages could be large... */ + page_map = vmalloc(num_pages * sizeof(struct page *)); + if (!page_map) + return NULL; + + phys_addr_map = agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE; + for (i = 0; i < num_pages; ++i) + page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT); + addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP); + vfree(page_map); + if (!addr) + return NULL; + + flush_tlb_kernel_range((unsigned long) addr, (unsigned long) addr + size); + return addr; +} + +static inline unsigned long +drm_follow_page (void *vaddr) +{ + pgd_t *pgd = pgd_offset_k((unsigned long) vaddr); + pmd_t *pmd = pmd_offset(pgd, (unsigned long) vaddr); + pte_t *ptep = pte_offset_kernel(pmd, (unsigned long) vaddr); + return pte_pfn(*ptep) << PAGE_SHIFT; +} + +#endif /* __REALLY_HAVE_AGP && defined(VMAP_4_ARGS) */ + +static inline void *drm_ioremap(unsigned long offset, unsigned long size, drm_device_t *dev) +{ +#if __REALLY_HAVE_AGP && defined(VMAP_4_ARGS) + if (dev->agp && dev->agp->cant_use_aperture) { + drm_map_t *map = drm_lookup_map(offset, size, dev); + + if (map && map->type == _DRM_AGP) + return agp_remap(offset, size, dev); + } +#endif + + return ioremap(offset, size); +} + +static inline void *drm_ioremap_nocache(unsigned long offset, unsigned long size, + drm_device_t *dev) +{ +#if __REALLY_HAVE_AGP && defined(VMAP_4_ARGS) + if (dev->agp && dev->agp->cant_use_aperture) { + drm_map_t *map = drm_lookup_map(offset, size, dev); + + if (map && map->type == _DRM_AGP) + return agp_remap(offset, size, dev); + } +#endif + + return ioremap_nocache(offset, size); +} + +static inline void drm_ioremapfree(void *pt, unsigned long size, drm_device_t *dev) +{ +#if __REALLY_HAVE_AGP && defined(VMAP_4_ARGS) + /* + * This is a bit ugly. It would be much cleaner if the DRM API would use separate + * routines for handling mappings in the AGP space. Hopefully this can be done in + * a future revision of the interface... + */ + if (dev->agp && dev->agp->cant_use_aperture + && ((unsigned long) pt >= VMALLOC_START && (unsigned long) pt < VMALLOC_END)) + { + unsigned long offset; + drm_map_t *map; + + offset = drm_follow_page(pt) | ((unsigned long) pt & ~PAGE_MASK); + map = drm_lookup_map(offset, size, dev); + if (map && map->type == _DRM_AGP) { + vunmap(pt); + return; + } + } +#endif + + iounmap(pt); +} #if DEBUG_MEMORY #include "drm_memory_debug.h" @@ -119,19 +272,19 @@ void DRM(free_pages)(unsigned long address, int order, int area) free_pages(address, order); } -void *DRM(ioremap)(unsigned long offset, unsigned long size) +void *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev) { - return ioremap(offset, size); + return drm_ioremap(offset, size, dev); } -void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size) +void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size, drm_device_t *dev) { - return ioremap_nocache(offset, size); + return drm_ioremap_nocache(offset, size, dev); } -void DRM(ioremapfree)(void *pt, unsigned long size) +void DRM(ioremapfree)(void *pt, unsigned long size, drm_device_t *dev) { - iounmap(pt); + drm_ioremapfree(pt, size, dev); } #if __REALLY_HAVE_AGP diff --git a/linux-core/drm_memory_debug.h b/linux-core/drm_memory_debug.h index f4fd2425..d74267fb 100644 --- a/linux-core/drm_memory_debug.h +++ b/linux-core/drm_memory_debug.h @@ -270,7 +270,7 @@ void DRM(free_pages)(unsigned long address, int order, int area) } } -void *DRM(ioremap)(unsigned long offset, unsigned long size) +void *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev) { void *pt; @@ -280,7 +280,7 @@ void *DRM(ioremap)(unsigned long offset, unsigned long size) return NULL; } - if (!(pt = ioremap(offset, size))) { + if (!(pt = drm_ioremap(offset, size, dev))) { spin_lock(&DRM(mem_lock)); ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count; spin_unlock(&DRM(mem_lock)); @@ -293,7 +293,7 @@ void *DRM(ioremap)(unsigned long offset, unsigned long size) return pt; } -void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size) +void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size, drm_device_t *dev) { void *pt; @@ -303,7 +303,7 @@ void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size) return NULL; } - if (!(pt = ioremap_nocache(offset, size))) { + if (!(pt = drm_ioremap_nocache(offset, size, dev))) { spin_lock(&DRM(mem_lock)); ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count; spin_unlock(&DRM(mem_lock)); @@ -316,7 +316,7 @@ void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size) return pt; } -void DRM(ioremapfree)(void *pt, unsigned long size) +void DRM(ioremapfree)(void *pt, unsigned long size, drm_device_t *dev) { int alloc_count; int free_count; @@ -325,7 +325,7 @@ void DRM(ioremapfree)(void *pt, unsigned long size) DRM_MEM_ERROR(DRM_MEM_MAPPINGS, "Attempt to free NULL pointer\n"); else - iounmap(pt); + drm_ioremapfree(pt, size, dev); spin_lock(&DRM(mem_lock)); DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_freed += size; diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 76a10cf5..59b19da3 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -108,12 +108,12 @@ struct page *DRM(vm_nopage)(struct vm_area_struct *vma, * Get the page, inc the use count, and return it */ offset = (baddr - agpmem->bound) >> PAGE_SHIFT; - agpmem->memory->memory[offset] &= dev->agp->page_mask; page = virt_to_page(__va(agpmem->memory->memory[offset])); get_page(page); - DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lx\n", - baddr, __va(agpmem->memory->memory[offset]), offset); + DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n", + baddr, __va(agpmem->memory->memory[offset]), offset, + atomic_read(&page->count)); return page; } @@ -207,7 +207,7 @@ void DRM(vm_shm_close)(struct vm_area_struct *vma) DRM_DEBUG("mtrr_del = %d\n", retcode); } #endif - DRM(ioremapfree)(map->handle, map->size); + DRM(ioremapfree)(map->handle, map->size, dev); break; case _DRM_SHM: vfree(map->handle); @@ -381,7 +381,16 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma) if ( !priv->authenticated ) return -EACCES; - if (!VM_OFFSET(vma)) return DRM(mmap_dma)(filp, vma); + /* We check for "dma". On Apple's UniNorth, it's valid to have + * the AGP mapped at physical address 0 + * --BenH. + */ + if (!VM_OFFSET(vma) +#if __REALLY_HAVE_AGP + && (!dev->agp || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE) +#endif + ) + return DRM(mmap_dma)(filp, vma); /* A sequential search of a linked list is fine here because: 1) there will only be @@ -421,15 +430,19 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma) switch (map->type) { case _DRM_AGP: -#if defined(__alpha__) +#if __REALLY_HAVE_AGP + if (dev->agp->cant_use_aperture) { /* - * On Alpha we can't talk to bus dma address from the - * CPU, so for memory of type DRM_AGP, we'll deal with - * sorting out the real physical pages and mappings - * in nopage() + * On some platforms we can't talk to bus dma address from the CPU, so for + * memory of type DRM_AGP, we'll deal with sorting out the real physical + * pages and mappings in nopage() */ +#if defined(__powerpc__) + pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; +#endif vma->vm_ops = &DRM(vm_ops); break; + } #endif /* fall through to _DRM_FRAME_BUFFER... */ case _DRM_FRAME_BUFFER: @@ -440,15 +453,15 @@ int DRM(mmap)(struct file *filp, struct vm_area_struct *vma) pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT; } -#elif defined(__ia64__) - if (map->type != _DRM_AGP) - vma->vm_page_prot = - pgprot_writecombine(vma->vm_page_prot); #elif defined(__powerpc__) pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE | _PAGE_GUARDED; #endif vma->vm_flags |= VM_IO; /* not in core dump */ } +#if defined(__ia64__) + if (map->type != _DRM_AGP) + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); +#endif offset = DRIVER_GET_REG_OFS(); #ifdef __sparc__ if (io_remap_page_range(DRM_RPR_ARG(vma) vma->vm_start, diff --git a/linux-core/i810_dma.c b/linux-core/i810_dma.c index 8e0e8f45..678610c6 100644 --- a/linux-core/i810_dma.c +++ b/linux-core/i810_dma.c @@ -253,7 +253,7 @@ int i810_dma_cleanup(drm_device_t *dev) if(dev_priv->ring.virtual_start) { DRM(ioremapfree)((void *) dev_priv->ring.virtual_start, - dev_priv->ring.Size); + dev_priv->ring.Size, dev); } if (dev_priv->hw_status_page) { pci_free_consistent(dev->pdev, PAGE_SIZE, @@ -270,7 +270,7 @@ int i810_dma_cleanup(drm_device_t *dev) drm_buf_t *buf = dma->buflist[ i ]; drm_i810_buf_priv_t *buf_priv = buf->dev_private; if ( buf_priv->kernel_virtual && buf->total ) - DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total); + DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total, dev); } } return 0; @@ -340,7 +340,7 @@ static int i810_freelist_init(drm_device_t *dev, drm_i810_private_t *dev_priv) *buf_priv->in_use = I810_BUF_FREE; buf_priv->kernel_virtual = DRM(ioremap)(buf->bus_address, - buf->total); + buf->total, dev); } return 0; } @@ -393,7 +393,7 @@ static int i810_dma_initialize(drm_device_t *dev, dev_priv->ring.virtual_start = DRM(ioremap)(dev->agp->base + init->ring_start, - init->ring_size); + init->ring_size, dev); if (dev_priv->ring.virtual_start == NULL) { dev->dev_private = (void *) dev_priv; diff --git a/linux-core/i830_dma.c b/linux-core/i830_dma.c index af9c1cb7..48b64d7c 100644 --- a/linux-core/i830_dma.c +++ b/linux-core/i830_dma.c @@ -253,7 +253,7 @@ int i830_dma_cleanup(drm_device_t *dev) if (dev_priv->ring.virtual_start) { DRM(ioremapfree)((void *) dev_priv->ring.virtual_start, - dev_priv->ring.Size); + dev_priv->ring.Size, dev); } if (dev_priv->hw_status_page) { pci_free_consistent(dev->pdev, PAGE_SIZE, @@ -271,7 +271,7 @@ int i830_dma_cleanup(drm_device_t *dev) drm_buf_t *buf = dma->buflist[ i ]; drm_i830_buf_priv_t *buf_priv = buf->dev_private; if ( buf_priv->kernel_virtual && buf->total ) - DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total); + DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total, dev); } } return 0; @@ -347,7 +347,7 @@ static int i830_freelist_init(drm_device_t *dev, drm_i830_private_t *dev_priv) *buf_priv->in_use = I830_BUF_FREE; buf_priv->kernel_virtual = DRM(ioremap)(buf->bus_address, - buf->total); + buf->total, dev); } return 0; } @@ -401,7 +401,7 @@ static int i830_dma_initialize(drm_device_t *dev, dev_priv->ring.virtual_start = DRM(ioremap)(dev->agp->base + init->ring_start, - init->ring_size); + init->ring_size, dev); if (dev_priv->ring.virtual_start == NULL) { dev->dev_private = (void *) dev_priv; -- cgit v1.2.3