summaryrefslogtreecommitdiff
path: root/linux-core/xgi_pcie.c
diff options
context:
space:
mode:
authorIan Romanick <idr@us.ibm.com>2007-07-19 10:29:18 -0700
committerIan Romanick <idr@us.ibm.com>2007-07-19 10:29:18 -0700
commit5ba94c2ab8be350fee495e5cfe94afb8f663956a (patch)
tree609018b8c0060e00bd34a6bad37d6926f8de60cb /linux-core/xgi_pcie.c
parent8d60bf2f199d57ec45feaab836b31832b9bbabb9 (diff)
Initial pass at converting driver to DRM infrastructure.
Diffstat (limited to 'linux-core/xgi_pcie.c')
-rw-r--r--linux-core/xgi_pcie.c941
1 files changed, 170 insertions, 771 deletions
diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c
index cfc9febc..49c531fc 100644
--- a/linux-core/xgi_pcie.c
+++ b/linux-core/xgi_pcie.c
@@ -26,176 +26,81 @@
* DEALINGS IN THE SOFTWARE.
***************************************************************************/
-#include "xgi_linux.h"
#include "xgi_drv.h"
#include "xgi_regs.h"
-#include "xgi_pcie.h"
#include "xgi_misc.h"
-static struct xgi_pcie_heap *xgi_pcie_heap = NULL;
-static struct kmem_cache *xgi_pcie_cache_block = NULL;
-static struct xgi_pcie_block *xgi_pcie_vertex_block = NULL;
-static struct xgi_pcie_block *xgi_pcie_cmdlist_block = NULL;
-static struct xgi_pcie_block *xgi_pcie_scratchpad_block = NULL;
-extern struct list_head xgi_mempid_list;
-
-static unsigned long xgi_pcie_lut_alloc(unsigned long page_order)
-{
- struct page *page;
- unsigned long page_addr = 0;
- unsigned long page_count = 0;
- int i;
-
- page_count = (1 << page_order);
- page_addr = __get_free_pages(GFP_KERNEL, page_order);
-
- if (page_addr == 0UL) {
- XGI_ERROR("Can't get free pages: 0x%lx from system memory !\n",
- page_count);
- return 0;
- }
-
- page = virt_to_page(page_addr);
-
- for (i = 0; i < page_count; i++, page++) {
- XGI_INC_PAGE_COUNT(page);
- XGILockPage(page);
- }
-
- XGI_INFO("page_count: 0x%lx page_order: 0x%lx page_addr: 0x%lx \n",
- page_count, page_order, page_addr);
- return page_addr;
-}
-
-static void xgi_pcie_lut_free(unsigned long page_addr, unsigned long page_order)
-{
- struct page *page;
- unsigned long page_count = 0;
- int i;
-
- page_count = (1 << page_order);
- page = virt_to_page(page_addr);
-
- for (i = 0; i < page_count; i++, page++) {
- XGI_DEC_PAGE_COUNT(page);
- XGIUnlockPage(page);
- }
-
- free_pages(page_addr, page_order);
-}
+static struct xgi_mem_block *xgi_pcie_vertex_block = NULL;
+static struct xgi_mem_block *xgi_pcie_cmdlist_block = NULL;
+static struct xgi_mem_block *xgi_pcie_scratchpad_block = NULL;
static int xgi_pcie_lut_init(struct xgi_info * info)
{
- unsigned char *page_addr = NULL;
- unsigned long pciePageCount, lutEntryNum, lutPageCount, lutPageOrder;
- unsigned long count = 0;
u8 temp = 0;
+ int err;
+ unsigned i;
+ struct drm_scatter_gather request;
+ struct drm_sg_mem *sg;
+ u32 *lut;
- /* Jong 06/06/2006 */
- unsigned long pcie_aperture_size;
-
- info->pcie.size = 128 * 1024 * 1024;
/* Get current FB aperture size */
- temp = In3x5(0x27);
- XGI_INFO("In3x5(0x27): 0x%x \n", temp);
+ temp = IN3X5B(info->mmio_map, 0x27);
+ DRM_INFO("In3x5(0x27): 0x%x \n", temp);
if (temp & 0x01) { /* 256MB; Jong 06/05/2006; 0x10000000 */
- /* Jong 06/06/2006; allocate memory */
- pcie_aperture_size = 256 * 1024 * 1024;
- /* info->pcie.base = 256 * 1024 * 1024; *//* pcie base is different from fb base */
+ info->pcie.base = 256 * 1024 * 1024;
} else { /* 128MB; Jong 06/05/2006; 0x08000000 */
-
- /* Jong 06/06/2006; allocate memory */
- pcie_aperture_size = 128 * 1024 * 1024;
- /* info->pcie.base = 128 * 1024 * 1024; */
+ info->pcie.base = 128 * 1024 * 1024;
}
- /* Jong 06/06/2006; allocate memory; it can be used for build-in kernel modules */
- /* info->pcie.base=(unsigned long)alloc_bootmem(pcie_mem_size); */
- /* total 496 MB; need 256 MB (0x10000000); start from 240 MB (0x0F000000) */
- /* info->pcie.base=ioremap(0x0F000000, 0x10000000); *//* Cause system hang */
- info->pcie.base = pcie_aperture_size; /* works */
- /* info->pcie.base=info->fb.base + info->fb.size; *//* System hang */
- /* info->pcie.base=128 * 1024 * 1024; *//* System hang */
- XGI_INFO("Jong06062006-info->pcie.base: 0x%lx \n", info->pcie.base);
+ DRM_INFO("info->pcie.base: 0x%lx\n", (unsigned long) info->pcie.base);
/* Get current lookup table page size */
- temp = bReadReg(0xB00C);
+ temp = DRM_READ8(info->mmio_map, 0xB00C);
if (temp & 0x04) { /* 8KB */
info->lutPageSize = 8 * 1024;
} else { /* 4KB */
-
info->lutPageSize = 4 * 1024;
}
- XGI_INFO("info->lutPageSize: 0x%lx \n", info->lutPageSize);
+ DRM_INFO("info->lutPageSize: 0x%x \n", info->lutPageSize);
-#if 0
- /* Get current lookup table location */
- temp = bReadReg(0xB00C);
- if (temp & 0x02) { /* LFB */
- info->isLUTInLFB = TRUE;
- /* Current we only support lookup table in LFB */
- temp &= 0xFD;
- bWriteReg(0xB00C, temp);
- info->isLUTInLFB = FALSE;
- } else { /* SFB */
- info->isLUTInLFB = FALSE;
+ request.size = info->pcie.size;
+ err = drm_sg_alloc(info->dev, & request);
+ if (err) {
+ DRM_ERROR("cannot allocate PCIE GART backing store! "
+ "size = %d\n", info->pcie.size);
+ return err;
}
- XGI_INFO("info->lutPageSize: 0x%lx \n", info->lutPageSize);
+ sg = info->dev->sg;
- /* Get current SDFB page size */
- temp = bReadReg(0xB00C);
- if (temp & 0x08) { /* 8MB */
- info->sdfbPageSize = 8 * 1024 * 1024;
- } else { /* 4MB */
-
- info->sdfbPageSize = 4 * 1024 * 1024;
+ info->lut_handle = drm_pci_alloc(info->dev,
+ sizeof(u32) * sg->pages,
+ PAGE_SIZE,
+ DMA_31BIT_MASK);
+ if (info->lut_handle == NULL) {
+ DRM_ERROR("cannot allocate PCIE lut page!\n");
+ return DRM_ERR(ENOMEM);
}
-#endif
- pciePageCount = (info->pcie.size + PAGE_SIZE - 1) / PAGE_SIZE;
- /*
- * Allocate memory for PCIE GART table;
- */
- lutEntryNum = pciePageCount;
- lutPageCount = (lutEntryNum * 4 + PAGE_SIZE - 1) / PAGE_SIZE;
-
- /* get page_order base on page_count */
- count = lutPageCount;
- for (lutPageOrder = 0; count; count >>= 1, ++lutPageOrder) ;
-
- if ((lutPageCount << 1) == (1 << lutPageOrder)) {
- lutPageOrder -= 1;
- }
-
- XGI_INFO("lutEntryNum: 0x%lx lutPageCount: 0x%lx lutPageOrder 0x%lx\n",
- lutEntryNum, lutPageCount, lutPageOrder);
-
- info->lutPageOrder = lutPageOrder;
- page_addr = (unsigned char *)xgi_pcie_lut_alloc(lutPageOrder);
+ lut = info->lut_handle->vaddr;
+ for (i = 0; i < sg->pages; i++) {
+ info->dev->sg->busaddr[i] = pci_map_page(info->dev->pdev,
+ sg->pagelist[i],
+ 0,
+ PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(info->dev->sg->busaddr[i])) {
+ DRM_ERROR("cannot map GART backing store for DMA!\n");
+ return DRM_ERR(-(info->dev->sg->busaddr[i]));
+ }
- if (!page_addr) {
- XGI_ERROR("cannot allocate PCIE lut page!\n");
- goto fail;
+ lut[i] = info->dev->sg->busaddr[i];
}
- info->lut_base = (unsigned long *)page_addr;
-
- XGI_INFO("page_addr: 0x%p virt_to_phys(page_virtual): 0x%lx \n",
- page_addr, virt_to_phys(page_addr));
-
- XGI_INFO
- ("info->lut_base: 0x%p __pa(info->lut_base): 0x%lx info->lutPageOrder 0x%lx\n",
- info->lut_base, __pa(info->lut_base), info->lutPageOrder);
-
- /*
- * clean all PCIE GART Entry
- */
- memset(page_addr, 0, PAGE_SIZE << lutPageOrder);
#if defined(__i386__) || defined(__x86_64__)
asm volatile ("wbinvd":::"memory");
@@ -204,675 +109,186 @@ static int xgi_pcie_lut_init(struct xgi_info * info)
#endif
/* Set GART in SFB */
- bWriteReg(0xB00C, bReadReg(0xB00C) & ~0x02);
+ temp = DRM_READ8(info->mmio_map, 0xB00C);
+ DRM_WRITE8(info->mmio_map, 0xB00C, temp & ~0x02);
+
/* Set GART base address to HW */
- dwWriteReg(0xB034, __pa(info->lut_base));
+ dwWriteReg(info->mmio_map, 0xB034, info->lut_handle->busaddr);
- return 1;
- fail:
return 0;
}
-static void xgi_pcie_lut_cleanup(struct xgi_info * info)
-{
- if (info->lut_base) {
- XGI_INFO("info->lut_base: 0x%p info->lutPageOrder: 0x%lx \n",
- info->lut_base, info->lutPageOrder);
- xgi_pcie_lut_free((unsigned long)info->lut_base,
- info->lutPageOrder);
- info->lut_base = NULL;
- }
-}
-
-static struct xgi_pcie_block *xgi_pcie_new_node(void)
+void xgi_pcie_lut_cleanup(struct xgi_info * info)
{
- struct xgi_pcie_block *block =
- (struct xgi_pcie_block *) kmem_cache_alloc(xgi_pcie_cache_block,
- GFP_KERNEL);
- if (block == NULL) {
- return NULL;
+ if (info->dev->sg) {
+ drm_sg_free(info->dev, info->dev->sg->handle);
}
- block->offset = 0; /* block's offset in pcie memory, begin from 0 */
- block->size = 0; /* The block size. */
- block->bus_addr = 0; /* CPU access address/bus address */
- block->hw_addr = 0; /* GE access address */
- block->page_count = 0;
- block->page_order = 0;
- block->page_block = NULL;
- block->page_table = NULL;
- block->owner = PCIE_INVALID;
-
- return block;
-}
-
-static void xgi_pcie_block_stuff_free(struct xgi_pcie_block * block)
-{
- struct page *page;
- struct xgi_page_block *page_block = block->page_block;
- struct xgi_page_block *free_block;
- unsigned long page_count = 0;
- int i;
-
- //XGI_INFO("block->page_block: 0x%p \n", block->page_block);
- while (page_block) {
- page_count = page_block->page_count;
-
- page = virt_to_page(page_block->virt_addr);
- for (i = 0; i < page_count; i++, page++) {
- XGI_DEC_PAGE_COUNT(page);
- XGIUnlockPage(page);
- }
- free_pages(page_block->virt_addr, page_block->page_order);
-
- page_block->phys_addr = 0;
- page_block->virt_addr = 0;
- page_block->page_count = 0;
- page_block->page_order = 0;
-
- free_block = page_block;
- page_block = page_block->next;
- //XGI_INFO("free free_block: 0x%p \n", free_block);
- kfree(free_block);
- free_block = NULL;
- }
-
- if (block->page_table) {
- //XGI_INFO("free block->page_table: 0x%p \n", block->page_table);
- kfree(block->page_table);
- block->page_table = NULL;
+ if (info->lut_handle) {
+ drm_pci_free(info->dev, info->lut_handle);
+ info->lut_handle = NULL;
}
}
int xgi_pcie_heap_init(struct xgi_info * info)
{
- struct xgi_pcie_block *block;
-
- if (!xgi_pcie_lut_init(info)) {
- XGI_ERROR("xgi_pcie_lut_init failed\n");
- return 0;
- }
-
- xgi_pcie_heap =
- (struct xgi_pcie_heap *) kmalloc(sizeof(struct xgi_pcie_heap), GFP_KERNEL);
- if (!xgi_pcie_heap) {
- XGI_ERROR("xgi_pcie_heap alloc failed\n");
- goto fail1;
- }
- INIT_LIST_HEAD(&xgi_pcie_heap->free_list);
- INIT_LIST_HEAD(&xgi_pcie_heap->used_list);
- INIT_LIST_HEAD(&xgi_pcie_heap->sort_list);
-
- xgi_pcie_heap->max_freesize = info->pcie.size;
-
- xgi_pcie_cache_block =
- kmem_cache_create("xgi_pcie_block", sizeof(struct xgi_pcie_block), 0,
- SLAB_HWCACHE_ALIGN, NULL, NULL);
+ int err;
- if (NULL == xgi_pcie_cache_block) {
- XGI_ERROR("Fail to creat xgi_pcie_block\n");
- goto fail2;
+ err = xgi_pcie_lut_init(info);
+ if (err) {
+ DRM_ERROR("xgi_pcie_lut_init failed\n");
+ return err;
}
- block = (struct xgi_pcie_block *) xgi_pcie_new_node();
- if (!block) {
- XGI_ERROR("xgi_pcie_new_node failed\n");
- goto fail3;
- }
-
- block->offset = 0; /* block's offset in pcie memory, begin from 0 */
- block->size = info->pcie.size;
-
- list_add(&block->list, &xgi_pcie_heap->free_list);
- XGI_INFO("PCIE start address: 0x%lx, memory size : 0x%lx\n",
- block->offset, block->size);
- return 1;
- fail3:
- if (xgi_pcie_cache_block) {
- kmem_cache_destroy(xgi_pcie_cache_block);
- xgi_pcie_cache_block = NULL;
+ err = xgi_mem_heap_init(&info->pcie_heap, 0, info->pcie.size);
+ if (err) {
+ xgi_pcie_lut_cleanup(info);
}
- fail2:
- if (xgi_pcie_heap) {
- kfree(xgi_pcie_heap);
- xgi_pcie_heap = NULL;
- }
- fail1:
- xgi_pcie_lut_cleanup(info);
- return 0;
+ return err;
}
-void xgi_pcie_heap_cleanup(struct xgi_info * info)
-{
- struct list_head *free_list;
- struct xgi_pcie_block *block;
- struct xgi_pcie_block *next;
- int j;
-
- xgi_pcie_lut_cleanup(info);
- XGI_INFO("xgi_pcie_lut_cleanup scceeded\n");
-
- if (xgi_pcie_heap) {
- free_list = &xgi_pcie_heap->free_list;
- for (j = 0; j < 3; j++, free_list++) {
- list_for_each_entry_safe(block, next, free_list, list) {
- XGI_INFO
- ("No. %d block offset: 0x%lx size: 0x%lx\n",
- j, block->offset, block->size);
- xgi_pcie_block_stuff_free(block);
- block->bus_addr = 0;
- block->hw_addr = 0;
-
- //XGI_INFO("No. %d free block: 0x%p \n", j, block);
- kmem_cache_free(xgi_pcie_cache_block, block);
- }
- }
-
- XGI_INFO("free xgi_pcie_heap: 0x%p \n", xgi_pcie_heap);
- kfree(xgi_pcie_heap);
- xgi_pcie_heap = NULL;
- }
- if (xgi_pcie_cache_block) {
- kmem_cache_destroy(xgi_pcie_cache_block);
- xgi_pcie_cache_block = NULL;
- }
-}
-
-static struct xgi_pcie_block *xgi_pcie_mem_alloc(struct xgi_info * info,
- unsigned long originalSize,
- enum PcieOwner owner)
+int xgi_pcie_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
+ DRMFILE filp)
{
- struct xgi_pcie_block *block, *used_block, *free_block;
- struct xgi_page_block *page_block, *prev_page_block;
- struct page *page;
- unsigned long page_order = 0, count = 0, index = 0;
- unsigned long page_addr = 0;
- u32 *lut_addr = NULL;
- unsigned long lut_id = 0;
- unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK;
- int i, j, page_count = 0;
- int temp = 0;
-
- XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-Begin\n");
- XGI_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n",
- originalSize, size);
-
- if (owner == PCIE_3D) {
- if (xgi_pcie_vertex_block) {
- XGI_INFO
- ("PCIE Vertex has been created, return directly.\n");
- return xgi_pcie_vertex_block;
- }
- }
+ struct xgi_mem_block *block;
- if (owner == PCIE_3D_CMDLIST) {
- if (xgi_pcie_cmdlist_block) {
- XGI_INFO
- ("PCIE Cmdlist has been created, return directly.\n");
- return xgi_pcie_cmdlist_block;
- }
+ down(&info->pcie_sem);
+ if ((alloc->owner == PCIE_3D) && (xgi_pcie_vertex_block)) {
+ DRM_INFO("PCIE Vertex has been created, return directly.\n");
+ block = xgi_pcie_vertex_block;
}
-
- if (owner == PCIE_3D_SCRATCHPAD) {
- if (xgi_pcie_scratchpad_block) {
- XGI_INFO
- ("PCIE Scratchpad has been created, return directly.\n");
- return xgi_pcie_scratchpad_block;
- }
- }
-
- if (size == 0) {
- XGI_ERROR("size == 0 \n");
- return (NULL);
+ else if ((alloc->owner == PCIE_3D_CMDLIST) && (xgi_pcie_cmdlist_block)) {
+ DRM_INFO("PCIE Cmdlist has been created, return directly.\n");
+ block = xgi_pcie_cmdlist_block;
}
-
- XGI_INFO("max_freesize: 0x%lx \n", xgi_pcie_heap->max_freesize);
- if (size > xgi_pcie_heap->max_freesize) {
- XGI_ERROR
- ("size: 0x%lx bigger than PCIE total free size: 0x%lx.\n",
- size, xgi_pcie_heap->max_freesize);
- return (NULL);
+ else if ((alloc->owner == PCIE_3D_SCRATCHPAD) && (xgi_pcie_scratchpad_block)) {
+ DRM_INFO("PCIE Scratchpad has been created, return directly.\n");
+ block = xgi_pcie_scratchpad_block;
}
+ else {
+ block = xgi_mem_alloc(&info->pcie_heap, alloc->size, alloc->owner);
- /* Jong 05/30/2006; find next free list which has enough space */
- list_for_each_entry(block, &xgi_pcie_heap->free_list, list) {
- if (size <= block->size) {
- break;
+ if (alloc->owner == PCIE_3D) {
+ xgi_pcie_vertex_block = block;
+ }
+ else if (alloc->owner == PCIE_3D_CMDLIST) {
+ xgi_pcie_cmdlist_block = block;
+ }
+ else if (alloc->owner == PCIE_3D_SCRATCHPAD) {
+ xgi_pcie_scratchpad_block = block;
}
}
+ up(&info->pcie_sem);
- if (&block->list == &xgi_pcie_heap->free_list) {
- XGI_ERROR("Can't allocate %ldk size from PCIE memory !\n",
- size / 1024);
- return (NULL);
- }
-
- free_block = block;
- XGI_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n",
- size, free_block->offset, free_block->size);
-
- if (size == free_block->size) {
- used_block = free_block;
- XGI_INFO("size==free_block->size: free_block = 0x%p\n",
- free_block);
- list_del(&free_block->list);
+ if (block == NULL) {
+ alloc->location = XGI_MEMLOC_INVALID;
+ alloc->size = 0;
+ DRM_ERROR("PCIE RAM allocation failed\n");
+ return DRM_ERR(ENOMEM);
} else {
- used_block = xgi_pcie_new_node();
- if (used_block == NULL) {
- return NULL;
- }
-
- if (used_block == free_block) {
- XGI_ERROR("used_block == free_block = 0x%p\n",
- used_block);
- }
-
- used_block->offset = free_block->offset;
- used_block->size = size;
+ DRM_INFO("PCIE RAM allocation succeeded: offset = 0x%lx\n",
+ block->offset);
+ alloc->location = XGI_MEMLOC_NON_LOCAL;
+ alloc->size = block->size;
+ alloc->hw_addr = block->offset + info->pcie.base;
+ alloc->offset = block->offset;
- free_block->offset += size;
- free_block->size -= size;
+ block->filp = filp;
+ return 0;
}
+}
- xgi_pcie_heap->max_freesize -= size;
- used_block->bus_addr = info->pcie.base + used_block->offset;
- used_block->hw_addr = info->pcie.base + used_block->offset;
- used_block->page_count = page_count = size / PAGE_SIZE;
+int xgi_pcie_alloc_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct xgi_mem_alloc alloc;
+ struct xgi_info *info = dev->dev_private;
+ int err;
- /* get page_order base on page_count */
- for (used_block->page_order = 0; page_count; page_count >>= 1) {
- ++used_block->page_order;
- }
+ DRM_COPY_FROM_USER_IOCTL(alloc, (struct xgi_mem_alloc __user *) data,
+ sizeof(alloc));
- if ((used_block->page_count << 1) == (1 << used_block->page_order)) {
- used_block->page_order--;
- }
- XGI_INFO
- ("used_block->offset: 0x%lx, used_block->size: 0x%lx, used_block->bus_addr: 0x%lx, used_block->hw_addr: 0x%lx, used_block->page_count: 0x%lx used_block->page_order: 0x%lx\n",
- used_block->offset, used_block->size, used_block->bus_addr,
- used_block->hw_addr, used_block->page_count,
- used_block->page_order);
-
- used_block->page_block = NULL;
- //used_block->page_block = (struct xgi_pages_block *)kmalloc(sizeof(struct xgi_pages_block), GFP_KERNEL);
- //if (!used_block->page_block) return NULL;_t
- //used_block->page_block->next = NULL;
-
- used_block->page_table =
- (struct xgi_pte *) kmalloc(sizeof(struct xgi_pte) * used_block->page_count,
- GFP_KERNEL);
- if (used_block->page_table == NULL) {
- goto fail;
+ err = xgi_pcie_alloc(info, & alloc, filp);
+ if (err) {
+ return err;
}
+
+ DRM_COPY_TO_USER_IOCTL((struct xgi_mem_alloc __user *) data,
+ alloc, sizeof(alloc));
- lut_id = (used_block->offset >> PAGE_SHIFT);
- lut_addr = info->lut_base;
- lut_addr += lut_id;
- XGI_INFO("lutAddr: 0x%p lutID: 0x%lx \n", lut_addr, lut_id);
-
- /* alloc free pages from system */
- page_count = used_block->page_count;
- page_block = used_block->page_block;
- prev_page_block = used_block->page_block;
- for (i = 0; page_count > 0; i++) {
- /* if size is bigger than 2M bytes, it should be split */
- if (page_count > (1 << XGI_PCIE_ALLOC_MAX_ORDER)) {
- page_order = XGI_PCIE_ALLOC_MAX_ORDER;
- } else {
- count = page_count;
- for (page_order = 0; count; count >>= 1, ++page_order) ;
-
- if ((page_count << 1) == (1 << page_order)) {
- page_order -= 1;
- }
- }
+ return 0;
+}
- count = (1 << page_order);
- page_addr = __get_free_pages(GFP_KERNEL, page_order);
- XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-page_addr=0x%lx \n",
- page_addr);
- if (!page_addr) {
- XGI_ERROR
- ("No: %d :Can't get free pages: 0x%lx from system memory !\n",
- i, count);
- goto fail;
- }
+/**
+ * Free all blocks associated with a particular file handle.
+ */
+void xgi_pcie_free_all(struct xgi_info * info, DRMFILE filp)
+{
+ if (!info->pcie_heap.initialized) {
+ return;
+ }
- /* Jong 05/30/2006; test */
- memset((unsigned char *)page_addr, 0xFF,
- PAGE_SIZE << page_order);
- /* memset((unsigned char *)page_addr, 0, PAGE_SIZE << page_order); */
-
- if (page_block == NULL) {
- page_block =
- (struct xgi_page_block *)
- kmalloc(sizeof(struct xgi_page_block), GFP_KERNEL);
- if (!page_block) {
- XGI_ERROR
- ("Can't get memory for page_block! \n");
- goto fail;
- }
- }
+ down(&info->pcie_sem);
- if (prev_page_block == NULL) {
- used_block->page_block = page_block;
- prev_page_block = page_block;
- } else {
- prev_page_block->next = page_block;
- prev_page_block = page_block;
- }
+ do {
+ struct xgi_mem_block *block;
- page_block->next = NULL;
- page_block->phys_addr = __pa(page_addr);
- page_block->virt_addr = page_addr;
- page_block->page_count = count;
- page_block->page_order = page_order;
-
- XGI_INFO
- ("Jong05302006-xgi_pcie_mem_alloc-page_block->phys_addr=0x%lx \n",
- page_block->phys_addr);
- XGI_INFO
- ("Jong05302006-xgi_pcie_mem_alloc-page_block->virt_addr=0x%lx \n",
- page_block->virt_addr);
-
- page = virt_to_page(page_addr);
-
- //XGI_INFO("No: %d page_order: 0x%lx page_count: 0x%x count: 0x%lx index: 0x%lx lut_addr: 0x%p"
- // "page_block->phys_addr: 0x%lx page_block->virt_addr: 0x%lx \n",
- // i, page_order, page_count, count, index, lut_addr, page_block->phys_addr, page_block->virt_addr);
-
- for (j = 0; j < count; j++, page++, lut_addr++) {
- used_block->page_table[index + j].phys_addr =
- __pa(page_address(page));
- used_block->page_table[index + j].virt_addr =
- (unsigned long)page_address(page);
-
- XGI_INFO
- ("Jong05302006-xgi_pcie_mem_alloc-used_block->page_table[index + j].phys_addr=0x%lx \n",
- used_block->page_table[index + j].phys_addr);
- XGI_INFO
- ("Jong05302006-xgi_pcie_mem_alloc-used_block->page_table[index + j].virt_addr=0x%lx \n",
- used_block->page_table[index + j].virt_addr);
-
- *lut_addr = __pa(page_address(page));
- XGI_INC_PAGE_COUNT(page);
- XGILockPage(page);
-
- if (temp) {
- XGI_INFO
- ("__pa(page_address(page)): 0x%lx lutAddr: 0x%p lutAddr No: 0x%x = 0x%lx \n",
- __pa(page_address(page)), lut_addr, j,
- *lut_addr);
- temp--;
+ list_for_each_entry(block, &info->pcie_heap.used_list, list) {
+ if (block->filp == filp) {
+ break;
}
}
- page_block = page_block->next;
- page_count -= count;
- index += count;
- temp = 0;
- }
-
- used_block->owner = owner;
- list_add(&used_block->list, &xgi_pcie_heap->used_list);
-
-#if defined(__i386__) || defined(__x86_64__)
- asm volatile ("wbinvd":::"memory");
-#else
- mb();
-#endif
-
- /* Flush GART Table */
- bWriteReg(0xB03F, 0x40);
- bWriteReg(0xB03F, 0x00);
-
- if (owner == PCIE_3D) {
- xgi_pcie_vertex_block = used_block;
- }
-
- if (owner == PCIE_3D_CMDLIST) {
- xgi_pcie_cmdlist_block = used_block;
- }
-
- if (owner == PCIE_3D_SCRATCHPAD) {
- xgi_pcie_scratchpad_block = used_block;
- }
-
- XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-End \n");
- return (used_block);
-
- fail:
- xgi_pcie_block_stuff_free(used_block);
- kmem_cache_free(xgi_pcie_cache_block, used_block);
- return NULL;
-}
-
-static struct xgi_pcie_block *xgi_pcie_mem_free(struct xgi_info * info,
- unsigned long offset)
-{
- struct xgi_pcie_block *used_block, *block;
- struct xgi_pcie_block *prev, *next;
- unsigned long upper, lower;
-
- list_for_each_entry(block, &xgi_pcie_heap->used_list, list) {
- if (block->offset == offset) {
+ if (&block->list == &info->pcie_heap.used_list) {
break;
}
- }
-
- if (&block->list == &xgi_pcie_heap->used_list) {
- XGI_ERROR("can't find block: 0x%lx to free!\n", offset);
- return (NULL);
- }
-
- used_block = block;
- XGI_INFO
- ("used_block: 0x%p, offset = 0x%lx, size = 0x%lx, bus_addr = 0x%lx, hw_addr = 0x%lx\n",
- used_block, used_block->offset, used_block->size,
- used_block->bus_addr, used_block->hw_addr);
-
- xgi_pcie_block_stuff_free(used_block);
- /* update xgi_pcie_heap */
- xgi_pcie_heap->max_freesize += used_block->size;
+ (void) xgi_pcie_free(info, block->offset, filp);
+ } while(1);
- prev = next = NULL;
- upper = used_block->offset + used_block->size;
- lower = used_block->offset;
-
- list_for_each_entry(block, &xgi_pcie_heap->free_list, list) {
- if (block->offset == upper) {
- next = block;
- } else if ((block->offset + block->size) == lower) {
- prev = block;
- }
- }
-
- XGI_INFO("next = 0x%p, prev = 0x%p\n", next, prev);
- list_del(&used_block->list);
-
- if (prev && next) {
- prev->size += (used_block->size + next->size);
- list_del(&next->list);
- XGI_INFO("free node 0x%p\n", next);
- kmem_cache_free(xgi_pcie_cache_block, next);
- kmem_cache_free(xgi_pcie_cache_block, used_block);
- next = NULL;
- used_block = NULL;
- return (prev);
- }
-
- if (prev) {
- prev->size += used_block->size;
- XGI_INFO("free node 0x%p\n", used_block);
- kmem_cache_free(xgi_pcie_cache_block, used_block);
- used_block = NULL;
- return (prev);
- }
-
- if (next) {
- next->size += used_block->size;
- next->offset = used_block->offset;
- XGI_INFO("free node 0x%p\n", used_block);
- kmem_cache_free(xgi_pcie_cache_block, used_block);
- used_block = NULL;
- return (next);
- }
-
- used_block->bus_addr = 0;
- used_block->hw_addr = 0;
- used_block->page_count = 0;
- used_block->page_order = 0;
- list_add(&used_block->list, &xgi_pcie_heap->free_list);
- XGI_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n",
- used_block, used_block->offset, used_block->size);
- return (used_block);
+ up(&info->pcie_sem);
}
-void xgi_pcie_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
- pid_t pid)
-{
- struct xgi_pcie_block *block;
-
- xgi_down(info->pcie_sem);
- block = xgi_pcie_mem_alloc(info, alloc->size, alloc->owner);
- xgi_up(info->pcie_sem);
-
- if (block == NULL) {
- alloc->location = XGI_MEMLOC_INVALID;
- alloc->size = 0;
- alloc->bus_addr = 0;
- alloc->hw_addr = 0;
- XGI_ERROR("PCIE RAM allocation failed\n");
- } else {
- XGI_INFO
- ("PCIE RAM allocation succeeded: offset = 0x%lx, bus_addr = 0x%lx\n",
- block->offset, block->bus_addr);
- alloc->location = XGI_MEMLOC_NON_LOCAL;
- alloc->size = block->size;
- alloc->bus_addr = block->bus_addr;
- alloc->hw_addr = block->hw_addr;
-
- /*
- manage mempid, handle PCIE_3D, PCIE_3D_TEXTURE.
- PCIE_3D request means a opengl process created.
- PCIE_3D_TEXTURE request means texture cannot alloc from fb.
- */
- if ((alloc->owner == PCIE_3D)
- || (alloc->owner == PCIE_3D_TEXTURE)) {
- struct xgi_mem_pid *mempid_block =
- kmalloc(sizeof(struct xgi_mem_pid), GFP_KERNEL);
- if (!mempid_block)
- XGI_ERROR("mempid_block alloc failed\n");
- mempid_block->location = XGI_MEMLOC_NON_LOCAL;
- if (alloc->owner == PCIE_3D)
- mempid_block->bus_addr = 0xFFFFFFFF; /*xgi_pcie_vertex_block has the address */
- else
- mempid_block->bus_addr = alloc->bus_addr;
- mempid_block->pid = pid;
-
- XGI_INFO
- ("Memory ProcessID add one pcie block pid:%ld successfully! \n",
- mempid_block->pid);
- list_add(&mempid_block->list, &xgi_mempid_list);
- }
- }
-}
-void xgi_pcie_free(struct xgi_info * info, unsigned long bus_addr)
+int xgi_pcie_free(struct xgi_info * info, unsigned long offset, DRMFILE filp)
{
- struct xgi_pcie_block *block;
- unsigned long offset = bus_addr - info->pcie.base;
- struct xgi_mem_pid *mempid_block;
- struct xgi_mem_pid *mempid_freeblock = NULL;
- char isvertex = 0;
- int processcnt;
-
- if (xgi_pcie_vertex_block
- && xgi_pcie_vertex_block->bus_addr == bus_addr)
- isvertex = 1;
-
- if (isvertex) {
- /*check is there any other process using vertex */
- processcnt = 0;
-
- list_for_each_entry(mempid_block, &xgi_mempid_list, list) {
- if (mempid_block->location == XGI_MEMLOC_NON_LOCAL
- && mempid_block->bus_addr == 0xFFFFFFFF) {
- ++processcnt;
- }
- }
- if (processcnt > 1) {
- return;
- }
- }
+ const bool isvertex = (xgi_pcie_vertex_block
+ && (xgi_pcie_vertex_block->offset == offset));
+ int err;
- xgi_down(info->pcie_sem);
- block = xgi_pcie_mem_free(info, offset);
- xgi_up(info->pcie_sem);
+ down(&info->pcie_sem);
+ err = xgi_mem_free(&info->pcie_heap, offset, filp);
+ up(&info->pcie_sem);
- if (block == NULL) {
- XGI_ERROR("xgi_pcie_free() failed at base 0x%lx\n", offset);
+ if (err) {
+ DRM_ERROR("xgi_pcie_free() failed at base 0x%lx\n", offset);
}
if (isvertex)
xgi_pcie_vertex_block = NULL;
- /* manage mempid */
- list_for_each_entry(mempid_block, &xgi_mempid_list, list) {
- if (mempid_block->location == XGI_MEMLOC_NON_LOCAL
- && ((isvertex && mempid_block->bus_addr == 0xFFFFFFFF)
- || (!isvertex && mempid_block->bus_addr == bus_addr))) {
- mempid_freeblock = mempid_block;
- break;
- }
- }
- if (mempid_freeblock) {
- list_del(&mempid_freeblock->list);
- XGI_INFO
- ("Memory ProcessID delete one pcie block pid:%ld successfully! \n",
- mempid_freeblock->pid);
- kfree(mempid_freeblock);
- }
+ return err;
}
-/*
- * given a bus address, fid the pcie mem block
- * uses the bus address as the key.
- */
-struct xgi_pcie_block *xgi_find_pcie_block(struct xgi_info * info,
- unsigned long address)
-{
- struct xgi_pcie_block *block;
- int i;
-
- list_for_each_entry(block, &xgi_pcie_heap->used_list, list) {
- if (block->bus_addr == address) {
- return block;
- }
-
- if (block->page_table) {
- for (i = 0; i < block->page_count; i++) {
- unsigned long offset = block->bus_addr;
- if ((address >= offset)
- && (address < (offset + PAGE_SIZE))) {
- return block;
- }
- }
- }
- }
+int xgi_pcie_free_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct xgi_info *info = dev->dev_private;
+ u32 offset;
- XGI_ERROR("could not find map for vm 0x%lx\n", address);
+ DRM_COPY_FROM_USER_IOCTL(offset, (unsigned long __user *) data,
+ sizeof(offset));
- return NULL;
+ return xgi_pcie_free(info, offset, filp);
}
+
/**
* xgi_find_pcie_virt
* @address: GE HW address
@@ -880,60 +296,43 @@ struct xgi_pcie_block *xgi_find_pcie_block(struct xgi_info * info,
* Returns CPU virtual address. Assumes the CPU VAddr is continuous in not
* the same block
*/
-void *xgi_find_pcie_virt(struct xgi_info * info, unsigned long address)
+void *xgi_find_pcie_virt(struct xgi_info * info, u32 address)
{
- struct xgi_pcie_block *block;
- const unsigned long offset_in_page = address & (PAGE_SIZE - 1);
-
- XGI_INFO("begin (address = 0x%lx, offset_in_page = %lu)\n",
- address, offset_in_page);
-
- list_for_each_entry(block, &xgi_pcie_heap->used_list, list) {
- XGI_INFO("block = 0x%p (hw_addr = 0x%lx, size=%lu)\n",
- block, block->hw_addr, block->size);
-
- if ((address >= block->hw_addr)
- && (address < (block->hw_addr + block->size))) {
- const unsigned long loc_in_pagetable =
- (address - block->hw_addr) >> PAGE_SHIFT;
- void *const ret =
- (void *)(block->page_table[loc_in_pagetable].
- virt_addr + offset_in_page);
-
- XGI_INFO("PAGE_SHIFT = %d\n", PAGE_SHIFT);
- XGI_INFO("block->page_table[0x%lx].virt_addr = 0x%lx\n",
- loc_in_pagetable,
- block->page_table[loc_in_pagetable].virt_addr);
- XGI_INFO("return 0x%p\n", ret);
-
- return ret;
- }
- }
+ const unsigned long offset = address - info->pcie.base;
- XGI_ERROR("could not find map for vm 0x%lx\n", address);
- return NULL;
+ return ((u8 *) info->dev->sg->virtual) + offset;
}
/*
address -- GE hw address
*/
-void xgi_test_rwinkernel(struct xgi_info * info, unsigned long address)
+int xgi_test_rwinkernel_ioctl(DRM_IOCTL_ARGS)
{
+ DRM_DEVICE;
+ struct xgi_info *info = dev->dev_private;
+ u32 address;
u32 *virtaddr = 0;
- XGI_INFO("input GE HW addr is 0x%x\n", address);
+ DRM_COPY_FROM_USER_IOCTL(address, (unsigned long __user *) data,
+ sizeof(address));
+
+ DRM_INFO("input GE HW addr is 0x%x\n", address);
if (address == 0) {
- return;
+ return DRM_ERR(EFAULT);
}
virtaddr = (u32 *)xgi_find_pcie_virt(info, address);
- XGI_INFO("convert to CPU virt addr 0x%p\n", virtaddr);
+ DRM_INFO("convert to CPU virt addr 0x%p\n", virtaddr);
if (virtaddr != NULL) {
- XGI_INFO("original [virtaddr] = 0x%x\n", *virtaddr);
+ DRM_INFO("original [virtaddr] = 0x%x\n", *virtaddr);
*virtaddr = 0x00f00fff;
- XGI_INFO("modified [virtaddr] = 0x%x\n", *virtaddr);
+ DRM_INFO("modified [virtaddr] = 0x%x\n", *virtaddr);
+ } else {
+ return DRM_ERR(EFAULT);
}
+
+ return 0;
}