summaryrefslogtreecommitdiff
path: root/linux-core
AgeCommit message (Expand)Author
2007-01-08drm: remove drm_follow_page, and drm_ioremap and ioremapfreeChristoph Hellwig
2007-01-08fixup i810/i830 to use drm_core_ioremap instead of drm_ioremapDave Airlie
2007-01-05nouveau: Add an mtrr over the whole FBStephane Marchesin
2007-01-02linux-core: Make git ignore generated module symbol version files.Michel Dänzer
2007-01-02nouveau: Add nv40-specific PGRAPH code, not hooked up yet.Ben Skeggs
2007-01-01make build against 2.6.20 hopefullyDave Airlie
2007-01-01fixup permission along line of kernelDave Airlie
2006-12-27Proper allocation of AGP pages for ttms.Thomas Hellstrom
2006-12-21Bug #9120.Thomas Hellstrom
2006-12-21Improve memory manager accounting printout formatting.Thomas Hellstrom
2006-12-21Fix buggy aligned allocations.Thomas Hellstrom
2006-12-20Remove the stupid root_node field from the core memory manager.Thomas Hellstrom
2006-12-20Replace vmalloc_32.Thomas Hellstrom
2006-12-20Some via PCI posting flushes.Thomas Hellstrom
2006-12-20Merge branch 'nouveau-1'Dave Airlie
2006-12-20fixup symlinks via MakefileDave Airlie
2006-12-20add nouveau symlinks via gitDave Airlie
2006-12-19Security fix. Zero pages before they are handed to user space.Thomas Hellstrom
2006-12-19Security fix. Zero pages before they are handed to user space.Thomas Hellstrom
2006-12-19Reclaim buffers locked fixup.Thomas Hellstrom
2006-12-19add kcalloc compat for before 2.6.10Dave Airlie
2006-12-19remove do munmap 4 argsDave Airlie
2006-12-19fixup inclusion of agp.hDave Airlie
2006-12-19remove drm pci from 2.5 daysDave Airlie
2006-12-19remove legacy taskqueue codeDave Airlie
2006-12-19drm: remove all 2.4 support for drm development tree.Dave Airlie
2006-12-19[SPARC]: Respect vm_page_prot in io_remap_page_range().Dave Airlie
2006-12-19[PATCH] mm: incorrect VM_FAULT_OOM returns from driversDave Airlie
2006-12-19fix irq args compatiblity with pre 2.6.19Dave Airlie
2006-12-19make sizeof match the copy structDave Airlie
2006-12-19use spin_lock_init in via dmablitDave Airlie
2006-12-19Revert "drm: ioremap balanced with iounmap for drivers/char/drm"Dave Airlie
2006-12-19drm: ioremap balanced with iounmap for drivers/char/drmDave Airlie
2006-12-16drm/linux-core: drmP.h compilation fixMichael Buesch
2006-12-15Remove the memory caches for fence objects and memory manager nodes,Thomas Hellstrom
2006-12-01Unshare drm_drawable.c again for now.Michel Dänzer
2006-12-01Track linux-core symlinks in git.Michel Dänzer
2006-11-06drm: fixup page alignment on SAREA map on ppc64Dave Airlie
2006-11-06Merge branch 'master' into nouveau-1Dave Airlie
2006-11-05nouveau: add compat ioc32 supportDave Airlie
2006-11-05remove config.hDave Airlie
2006-10-30Bugzilla Bug #8819Thomas Hellstrom
2006-10-27Last minute changes to support multi-page size buffer offset alignments.Thomas Hellstrom
2006-10-26New mm function names. Update header.Thomas Hellstrom
2006-10-26Add improved alignment functionality to the core memory manager.Thomas Hellstrom
2006-10-26Add a one-page hole in the file offset space between buffers.Thomas Hellstrom
2006-10-21Merge branch 'master' of git+ssh://git.freedesktop.org/git/mesa/drmThomas Hellstrom
2006-10-21The CPU cache must be flushed _before_ we start modifying the kernel map ptes,Thomas Hellstrom
2006-10-20Bug #1746: Set dev_priv_size for the MGA driver.Tilman Sauerbeck
2006-10-20We apparently need this global cache flush anyway.Thomas Hellstrom
s="hl ppc"> /* Very simple allocator for GART memory, working on a static range * already mapped into each client's address space. */ static struct mem_block *split_block(struct mem_block *p, int start, int size, DRMFILE filp) { /* Maybe cut off the start of an existing block */ if (start > p->start) { struct mem_block *newblock = drm_alloc(sizeof(*newblock), DRM_MEM_BUFS); if (!newblock) goto out; newblock->start = start; newblock->size = p->size - (start - p->start); newblock->filp = NULL; newblock->next = p->next; newblock->prev = p; p->next->prev = newblock; p->next = newblock; p->size -= newblock->size; p = newblock; } /* Maybe cut off the end of an existing block */ if (size < p->size) { struct mem_block *newblock = drm_alloc(sizeof(*newblock), DRM_MEM_BUFS); if (!newblock) goto out; newblock->start = start + size; newblock->size = p->size - size; newblock->filp = NULL; newblock->next = p->next; newblock->prev = p; p->next->prev = newblock; p->next = newblock; p->size = size; } out: /* Our block is in the middle */ p->filp = filp; return p; } static struct mem_block *alloc_block(struct mem_block *heap, int size, int align2, DRMFILE filp) { struct mem_block *p; int mask = (1 << align2) - 1; list_for_each(p, heap) { int start = (p->start + mask) & ~mask; if (p->filp == 0 && start + size <= p->start + p->size) return split_block(p, start, size, filp); } return NULL; } static struct mem_block *find_block(struct mem_block *heap, int start) { struct mem_block *p; list_for_each(p, heap) if (p->start == start) return p; return NULL; } static void free_block(struct mem_block *p) { p->filp = NULL; /* Assumes a single contiguous range. Needs a special filp in * 'heap' to stop it being subsumed. */ if (p->next->filp == 0) { struct mem_block *q = p->next; p->size += q->size; p->next = q->next; p->next->prev = p; drm_free(q, sizeof(*q), DRM_MEM_BUFS); } if (p->prev->filp == 0) { struct mem_block *q = p->prev; q->size += p->size; q->next = p->next; q->next->prev = q; drm_free(p, sizeof(*q), DRM_MEM_BUFS); } } /* Initialize. How to check for an uninitialized heap? */ static int init_heap(struct mem_block **heap, int start, int size) { struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS); if (!blocks) return DRM_ERR(ENOMEM); *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS); if (!*heap) { drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS); return DRM_ERR(ENOMEM); } blocks->start = start; blocks->size = size; blocks->filp = NULL; blocks->next = blocks->prev = *heap; memset(*heap, 0, sizeof(**heap)); (*heap)->filp = (DRMFILE) - 1; (*heap)->next = (*heap)->prev = blocks; return 0; } /* Free all blocks associated with the releasing file. */ void radeon_mem_release(DRMFILE filp, struct mem_block *heap) { struct mem_block *p; if (!heap || !heap->next) return; list_for_each(p, heap) { if (p->filp == filp) p->filp = NULL; } /* Assumes a single contiguous range. Needs a special filp in * 'heap' to stop it being subsumed. */ list_for_each(p, heap) { while (p->filp == 0 && p->next->filp == 0) { struct mem_block *q = p->next; p->size += q->size; p->next = q->next; p->next->prev = p; drm_free(q, sizeof(*q), DRM_MEM_DRIVER); } } } /* Shutdown. */ void radeon_mem_takedown(struct mem_block **heap) { struct mem_block *p; if (!*heap) return; for (p = (*heap)->next; p != *heap;) { struct mem_block *q = p; p = p->next; drm_free(q, sizeof(*q), DRM_MEM_DRIVER); } drm_free(*heap, sizeof(**heap), DRM_MEM_DRIVER); *heap = NULL; } /* IOCTL HANDLERS */ static struct mem_block **get_heap(drm_radeon_private_t * dev_priv, int region) { switch (region) { case RADEON_MEM_REGION_GART: return &dev_priv->gart_heap; case RADEON_MEM_REGION_FB: return &dev_priv->fb_heap; default: return NULL; } } int radeon_mem_alloc(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_mem_alloc_t alloc; struct mem_block *block, **heap; if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); return DRM_ERR(EINVAL); } DRM_COPY_FROM_USER_IOCTL(alloc, (drm_radeon_mem_alloc_t __user *) data, sizeof(alloc)); heap = get_heap(dev_priv, alloc.region); if (!heap || !*heap) return DRM_ERR(EFAULT); /* Make things easier on ourselves: all allocations at least * 4k aligned. */ if (alloc.alignment < 12) alloc.alignment = 12; block = alloc_block(*heap, alloc.size, alloc.alignment, filp); if (!block) return DRM_ERR(ENOMEM); if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) { DRM_ERROR("copy_to_user\n"); return DRM_ERR(EFAULT); } return 0; } int radeon_mem_free(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_mem_free_t memfree; struct mem_block *block, **heap; if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); return DRM_ERR(EINVAL); } DRM_COPY_FROM_USER_IOCTL(memfree, (drm_radeon_mem_free_t __user *) data, sizeof(memfree)); heap = get_heap(dev_priv, memfree.region); if (!heap || !*heap) return DRM_ERR(EFAULT); block = find_block(*heap, memfree.region_offset); if (!block) return DRM_ERR(EFAULT); if (block->filp != filp) return DRM_ERR(EPERM); free_block(block); return 0; } int radeon_mem_init_heap(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_mem_init_heap_t initheap; struct mem_block **heap; if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); return DRM_ERR(EINVAL); } DRM_COPY_FROM_USER_IOCTL(initheap, (drm_radeon_mem_init_heap_t __user *) data, sizeof(initheap)); heap = get_heap(dev_priv, initheap.region); if (!heap) return DRM_ERR(EFAULT); if (*heap) { DRM_ERROR("heap already initialized?"); return DRM_ERR(EFAULT); } return init_heap(heap, initheap.start, initheap.size); }