From ee8954cb53e4964a5565833b5a937f1cbcb60d44 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Sun, 6 May 2007 11:17:30 +1000 Subject: drm/ttm: cleanup mm_ioctl ioctls to be separate ioctls. This is the first bunch of ioctls --- linux-core/drm_bo.c | 160 ++++++++++++++++++++++++++++++++--------------- linux-core/drm_compat.c | 3 +- linux-core/drm_drv.c | 12 +++- linux-core/drm_objects.h | 3 + 4 files changed, 123 insertions(+), 55 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 1c7013b3..f78a6f95 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -2158,11 +2158,48 @@ EXPORT_SYMBOL(drm_bo_driver_init); int drm_mm_init_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; + struct drm_mm_init_arg arg; + drm_buffer_manager_t *bm = &dev->bm; + drm_bo_driver_t *driver = dev->driver->bo_driver; + int ret; - int ret = 0; - drm_mm_init_arg_t arg; + if (!driver) { + DRM_ERROR("Buffer objects are not supported by this driver\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + ret = -EINVAL; + mutex_lock(&dev->bm.init_mutex); + mutex_lock(&dev->struct_mutex); + if (!bm->initialized) { + DRM_ERROR("DRM memory manager was not initialized.\n"); + goto out; + } + if (arg.mem_type == 0) { + DRM_ERROR("System memory buffers already initialized.\n"); + goto out; + } + ret = drm_bo_init_mm(dev, arg.mem_type, + arg.p_offset, arg.p_size); + +out: + mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->bm.init_mutex); + if (ret) + return ret; + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return 0; +} + +int drm_mm_takedown_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct drm_mm_type_arg arg; drm_buffer_manager_t *bm = &dev->bm; drm_bo_driver_t *driver = dev->driver->bo_driver; + int ret; if (!driver) { DRM_ERROR("Buffer objects are not supported by this driver\n"); @@ -2171,59 +2208,78 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - switch (arg.req.op) { - case mm_init: - ret = -EINVAL; - mutex_lock(&dev->bm.init_mutex); - mutex_lock(&dev->struct_mutex); - if (!bm->initialized) { - DRM_ERROR("DRM memory manager was not initialized.\n"); - break; - } - if (arg.req.mem_type == 0) { - DRM_ERROR - ("System memory buffers already initialized.\n"); - break; - } - ret = drm_bo_init_mm(dev, arg.req.mem_type, - arg.req.p_offset, arg.req.p_size); - break; - case mm_takedown: - LOCK_TEST_WITH_RETURN(dev, filp); - mutex_lock(&dev->bm.init_mutex); - mutex_lock(&dev->struct_mutex); - ret = -EINVAL; - if (!bm->initialized) { - DRM_ERROR("DRM memory manager was not initialized\n"); - break; - } - if (arg.req.mem_type == 0) { - DRM_ERROR("No takedown for System memory buffers.\n"); - break; - } - ret = 0; - if (drm_bo_clean_mm(dev, arg.req.mem_type)) { - DRM_ERROR("Memory manager type %d not clean. " - "Delaying takedown\n", arg.req.mem_type); - } - break; - case mm_lock: - LOCK_TEST_WITH_RETURN(dev, filp); - mutex_lock(&dev->bm.init_mutex); - mutex_lock(&dev->struct_mutex); - ret = drm_bo_lock_mm(dev, arg.req.mem_type); - break; - case mm_unlock: - LOCK_TEST_WITH_RETURN(dev, filp); - mutex_lock(&dev->bm.init_mutex); - mutex_lock(&dev->struct_mutex); - ret = 0; - break; - default: - DRM_ERROR("Function not implemented yet\n"); + LOCK_TEST_WITH_RETURN(dev, filp); + mutex_lock(&dev->bm.init_mutex); + mutex_lock(&dev->struct_mutex); + ret = -EINVAL; + if (!bm->initialized) { + DRM_ERROR("DRM memory manager was not initialized\n"); + goto out; + } + if (arg.mem_type == 0) { + DRM_ERROR("No takedown for System memory buffers.\n"); + goto out; + } + ret = 0; + if (drm_bo_clean_mm(dev, arg.mem_type)) { + DRM_ERROR("Memory manager type %d not clean. " + "Delaying takedown\n", arg.mem_type); + } +out: + mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->bm.init_mutex); + if (ret) + return ret; + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return 0; +} + +int drm_mm_lock_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct drm_mm_type_arg arg; + drm_bo_driver_t *driver = dev->driver->bo_driver; + int ret; + + if (!driver) { + DRM_ERROR("Buffer objects are not supported by this driver\n"); return -EINVAL; } + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + LOCK_TEST_WITH_RETURN(dev, filp); + mutex_lock(&dev->bm.init_mutex); + mutex_lock(&dev->struct_mutex); + ret = drm_bo_lock_mm(dev, arg.mem_type); + mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->bm.init_mutex); + if (ret) + return ret; + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return 0; +} + +int drm_mm_unlock_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct drm_mm_type_arg arg; + drm_bo_driver_t *driver = dev->driver->bo_driver; + int ret; + + if (!driver) { + DRM_ERROR("Buffer objects are not supported by this driver\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + LOCK_TEST_WITH_RETURN(dev, filp); + mutex_lock(&dev->bm.init_mutex); + mutex_lock(&dev->struct_mutex); + ret = 0; + mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->bm.init_mutex); if (ret) diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 9ac5658c..867cee85 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -184,7 +184,7 @@ static int drm_pte_is_clear(struct vm_area_struct *vma, spin_unlock(&mm->page_table_lock); return ret; } - +#if 0 static int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) { @@ -195,6 +195,7 @@ static int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, vma->vm_page_prot); return ret; } +#endif static struct page *drm_bo_vm_fault(struct vm_area_struct *vma, struct fault_data *data) diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index e5788d76..55a3435b 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -119,10 +119,18 @@ static drm_ioctl_desc_t drm_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0}, [DRM_IOCTL_NR(DRM_IOCTL_FENCE)] = {drm_fence_ioctl, DRM_AUTH}, [DRM_IOCTL_NR(DRM_IOCTL_BUFOBJ)] = {drm_bo_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_MM_INIT)] = {drm_mm_init_ioctl, - DRM_AUTH }, [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW)] = {drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + + + [DRM_IOCTL_NR(DRM_IOCTL_MM_INIT)] = {drm_mm_init_ioctl, + DRM_AUTH }, + [DRM_IOCTL_NR(DRM_IOCTL_MM_TAKEDOWN)] = {drm_mm_takedown_ioctl, + DRM_AUTH }, + [DRM_IOCTL_NR(DRM_IOCTL_MM_LOCK)] = {drm_mm_lock_ioctl, + DRM_AUTH }, + [DRM_IOCTL_NR(DRM_IOCTL_MM_UNLOCK)] = {drm_mm_unlock_ioctl, + DRM_AUTH }, }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index ea300c18..e05b46cc 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -430,6 +430,9 @@ typedef struct drm_bo_driver { extern int drm_bo_ioctl(DRM_IOCTL_ARGS); extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS); +extern int drm_mm_takedown_ioctl(DRM_IOCTL_ARGS); +extern int drm_mm_lock_ioctl(DRM_IOCTL_ARGS); +extern int drm_mm_unlock_ioctl(DRM_IOCTL_ARGS); extern int drm_bo_driver_finish(struct drm_device *dev); extern int drm_bo_driver_init(struct drm_device *dev); extern int drm_bo_pci_offset(struct drm_device *dev, -- cgit v1.2.3 From 6a62941ecaa7d2b8f14b30920856bfa52aee4775 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Sun, 6 May 2007 11:35:11 +1000 Subject: drm/ttm: cleanup most of fence ioctl split out --- linux-core/drm_drv.c | 13 +- linux-core/drm_fence.c | 310 +++++++++++++++++++++++++++++++++++++++++++++++ linux-core/drm_objects.h | 10 +- 3 files changed, 331 insertions(+), 2 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 55a3435b..6b98f2c1 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -117,7 +117,7 @@ static drm_ioctl_desc_t drm_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0}, - [DRM_IOCTL_NR(DRM_IOCTL_FENCE)] = {drm_fence_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_BUFOBJ)] = {drm_bo_ioctl, DRM_AUTH}, [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW)] = {drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, @@ -131,6 +131,17 @@ static drm_ioctl_desc_t drm_ioctls[] = { DRM_AUTH }, [DRM_IOCTL_NR(DRM_IOCTL_MM_UNLOCK)] = {drm_mm_unlock_ioctl, DRM_AUTH }, + + [DRM_IOCTL_NR(DRM_IOCTL_FENCE_CREATE)] = {drm_fence_create_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_FENCE_DESTROY)] = {drm_fence_destroy_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_FENCE_REFERENCE)] = {drm_fence_reference_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_FENCE_UNREFERENCE)] = {drm_fence_unreference_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_FENCE_SIGNALED)] = {drm_fence_signaled_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_FENCE_FLUSH)] = {drm_fence_flush_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_FENCE_WAIT)] = {drm_fence_wait_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_FENCE_EMIT)] = {drm_fence_emit_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_FENCE_BUFFERS)] = {drm_fence_buffers_ioctl, DRM_AUTH}, + }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index ce161dc3..fe11e87b 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -659,3 +659,313 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS) DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return ret; } + +int drm_fence_create_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + int ret; + drm_fence_manager_t *fm = &dev->fm; + drm_fence_arg_t arg; + drm_fence_object_t *fence; + unsigned long flags; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + if (arg.flags & DRM_FENCE_FLAG_EMIT) + LOCK_TEST_WITH_RETURN(dev, filp); + ret = drm_fence_object_create(dev, arg.class, + arg.type, arg.flags, &fence); + if (ret) + return ret; + ret = drm_fence_add_user_object(priv, fence, + arg.flags & + DRM_FENCE_FLAG_SHAREABLE); + if (ret) { + drm_fence_usage_deref_unlocked(dev, fence); + return ret; + } + + /* + * usage > 0. No need to lock dev->struct_mutex; + */ + + atomic_inc(&fence->usage); + arg.handle = fence->base.hash.key; + + read_lock_irqsave(&fm->lock, flags); + arg.class = fence->class; + arg.type = fence->type; + arg.signaled = fence->signaled; + read_unlock_irqrestore(&fm->lock, flags); + drm_fence_usage_deref_unlocked(dev, fence); + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return ret; +} + +int drm_fence_destroy_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + int ret; + drm_fence_manager_t *fm = &dev->fm; + drm_fence_arg_t arg; + drm_user_object_t *uo; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + mutex_lock(&dev->struct_mutex); + uo = drm_lookup_user_object(priv, arg.handle); + if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) { + mutex_unlock(&dev->struct_mutex); + return -EINVAL; + } + ret = drm_remove_user_object(priv, uo); + mutex_unlock(&dev->struct_mutex); + return ret; +} + + +int drm_fence_reference_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + int ret; + drm_fence_manager_t *fm = &dev->fm; + drm_fence_arg_t arg; + drm_fence_object_t *fence; + drm_user_object_t *uo; + unsigned long flags; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + ret = drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo); + if (ret) + return ret; + fence = drm_lookup_fence_object(priv, arg.handle); + + read_lock_irqsave(&fm->lock, flags); + arg.class = fence->class; + arg.type = fence->type; + arg.signaled = fence->signaled; + read_unlock_irqrestore(&fm->lock, flags); + drm_fence_usage_deref_unlocked(dev, fence); + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return ret; +} + + +int drm_fence_unreference_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + int ret; + drm_fence_manager_t *fm = &dev->fm; + drm_fence_arg_t arg; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + return drm_user_object_unref(priv, arg.handle, drm_fence_type); +} + +int drm_fence_signaled_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + int ret; + drm_fence_manager_t *fm = &dev->fm; + drm_fence_arg_t arg; + drm_fence_object_t *fence; + unsigned long flags; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + fence = drm_lookup_fence_object(priv, arg.handle); + if (!fence) + return -EINVAL; + + read_lock_irqsave(&fm->lock, flags); + arg.class = fence->class; + arg.type = fence->type; + arg.signaled = fence->signaled; + read_unlock_irqrestore(&fm->lock, flags); + drm_fence_usage_deref_unlocked(dev, fence); + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return ret; +} + +int drm_fence_flush_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + int ret; + drm_fence_manager_t *fm = &dev->fm; + drm_fence_arg_t arg; + drm_fence_object_t *fence; + unsigned long flags; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + fence = drm_lookup_fence_object(priv, arg.handle); + if (!fence) + return -EINVAL; + ret = drm_fence_object_flush(dev, fence, arg.type); + + read_lock_irqsave(&fm->lock, flags); + arg.class = fence->class; + arg.type = fence->type; + arg.signaled = fence->signaled; + read_unlock_irqrestore(&fm->lock, flags); + drm_fence_usage_deref_unlocked(dev, fence); + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return ret; +} + + +int drm_fence_wait_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + int ret; + drm_fence_manager_t *fm = &dev->fm; + drm_fence_arg_t arg; + drm_fence_object_t *fence; + unsigned long flags; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + fence = drm_lookup_fence_object(priv, arg.handle); + if (!fence) + return -EINVAL; + ret = drm_fence_object_wait(dev, fence, + arg.flags & DRM_FENCE_FLAG_WAIT_LAZY, + 0, arg.type); + + read_lock_irqsave(&fm->lock, flags); + arg.class = fence->class; + arg.type = fence->type; + arg.signaled = fence->signaled; + read_unlock_irqrestore(&fm->lock, flags); + drm_fence_usage_deref_unlocked(dev, fence); + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return ret; +} + + +int drm_fence_emit_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + int ret; + drm_fence_manager_t *fm = &dev->fm; + drm_fence_arg_t arg; + drm_fence_object_t *fence; + unsigned long flags; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + LOCK_TEST_WITH_RETURN(dev, filp); + fence = drm_lookup_fence_object(priv, arg.handle); + if (!fence) + return -EINVAL; + ret = drm_fence_object_emit(dev, fence, arg.flags, arg.class, + arg.type); + + read_lock_irqsave(&fm->lock, flags); + arg.class = fence->class; + arg.type = fence->type; + arg.signaled = fence->signaled; + read_unlock_irqrestore(&fm->lock, flags); + drm_fence_usage_deref_unlocked(dev, fence); + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return ret; +} + +int drm_fence_buffers_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + int ret; + drm_fence_manager_t *fm = &dev->fm; + drm_fence_arg_t arg; + drm_fence_object_t *fence; + unsigned long flags; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized\n"); + return -EINVAL; + } + LOCK_TEST_WITH_RETURN(dev, filp); + ret = drm_fence_buffer_objects(priv, NULL, arg.flags, + NULL, &fence); + if (ret) + return ret; + ret = drm_fence_add_user_object(priv, fence, + arg.flags & + DRM_FENCE_FLAG_SHAREABLE); + if (ret) + return ret; + atomic_inc(&fence->usage); + arg.handle = fence->base.hash.key; + + read_lock_irqsave(&fm->lock, flags); + arg.class = fence->class; + arg.type = fence->type; + arg.signaled = fence->signaled; + read_unlock_irqrestore(&fm->lock, flags); + drm_fence_usage_deref_unlocked(dev, fence); + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return ret; +} diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index e05b46cc..17338da5 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -211,8 +211,16 @@ extern int drm_fence_object_create(struct drm_device *dev, uint32_t type, drm_fence_object_t ** c_fence); extern int drm_fence_add_user_object(drm_file_t * priv, drm_fence_object_t * fence, int shareable); -extern int drm_fence_ioctl(DRM_IOCTL_ARGS); +extern int drm_fence_create_ioctl(DRM_IOCTL_ARGS); +extern int drm_fence_destroy_ioctl(DRM_IOCTL_ARGS); +extern int drm_fence_reference_ioctl(DRM_IOCTL_ARGS); +extern int drm_fence_unreference_ioctl(DRM_IOCTL_ARGS); +extern int drm_fence_signaled_ioctl(DRM_IOCTL_ARGS); +extern int drm_fence_flush_ioctl(DRM_IOCTL_ARGS); +extern int drm_fence_wait_ioctl(DRM_IOCTL_ARGS); +extern int drm_fence_emit_ioctl(DRM_IOCTL_ARGS); +extern int drm_fence_buffers_ioctl(DRM_IOCTL_ARGS); /************************************************** *TTMs */ -- cgit v1.2.3 From 25c51f539f254937d116699e66f8c382d78e71d4 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 8 May 2007 17:53:58 +1000 Subject: drm/ttm: ioctl cleanup for buffer object - user side only This just cleans up the xf86drm.c to what I want and drm.h, I need to fix up the kernel internals to suit these changes now. I've moved to using struct instead of typedefs for the bo and it doesn't look that bad so I'll do the same thing for mm and fence.. --- linux-core/drm_bo.c | 74 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index f78a6f95..43be21a8 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1784,6 +1784,80 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS) return 0; } +int drm_bo_create_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + drm_bo_create_arg_t arg; + unsigned long next; + drm_user_object_t *uo; + drm_buffer_object_t *entry; + int ret = 0; + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + ret = drm_bo_lock_test(dev, filp); + if (ret) + goto out; + + ret = drm_buffer_object_create(priv->head->dev, + req->size, req->type, req->mask, + req->hint, req->page_alignment, + req->buffer_start, &entry); + if (ret) + goto out; + + ret = drm_bo_add_user_object(priv, entry, + req->mask & DRM_BO_FLAG_SHAREABLE); + if (ret) { + drm_bo_usage_deref_unlocked(entry); + goto out; + } + + mutex_lock(&entry->mutex); + drm_bo_fill_rep_arg(entry, &rep); + mutex_unlock(&entry->mutex); + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); +out: + return 0; +} + +int drm_bo_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + drm_bo_arg_t arg; + drm_bo_arg_request_t *req = &arg.d.req; + drm_bo_arg_reply_t rep; + unsigned long next; + drm_user_object_t *uo; + drm_buffer_object_t *entry; + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + rep.ret = 0; + + rep.ret = drm_buffer_object_unmap(priv, req->handle); + + + if (rep.ret == -EAGAIN) + return -EAGAIN; + + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + + return 0; +} + /** *Clean the unfenced list and put on regular LRU. *This is part of the memory manager cleanup and should only be -- cgit v1.2.3 From ae677472af25786fe935309ff1ac287e1610c819 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 8 May 2007 17:55:57 +1000 Subject: drm/ttm: remove old fence ioctl --- linux-core/drm_fence.c | 120 ------------------------------------------------- 1 file changed, 120 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index fe11e87b..3d928016 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -540,126 +540,6 @@ drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle) return fence; } -int drm_fence_ioctl(DRM_IOCTL_ARGS) -{ - DRM_DEVICE; - int ret; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_arg_t arg; - drm_fence_object_t *fence; - drm_user_object_t *uo; - unsigned long flags; - ret = 0; - - if (!fm->initialized) { - DRM_ERROR("The DRM driver does not support fencing.\n"); - return -EINVAL; - } - - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - switch (arg.op) { - case drm_fence_create: - if (arg.flags & DRM_FENCE_FLAG_EMIT) - LOCK_TEST_WITH_RETURN(dev, filp); - ret = drm_fence_object_create(dev, arg.class, - arg.type, arg.flags, &fence); - if (ret) - return ret; - ret = drm_fence_add_user_object(priv, fence, - arg.flags & - DRM_FENCE_FLAG_SHAREABLE); - if (ret) { - drm_fence_usage_deref_unlocked(dev, fence); - return ret; - } - - /* - * usage > 0. No need to lock dev->struct_mutex; - */ - - atomic_inc(&fence->usage); - arg.handle = fence->base.hash.key; - break; - case drm_fence_destroy: - mutex_lock(&dev->struct_mutex); - uo = drm_lookup_user_object(priv, arg.handle); - if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) { - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } - ret = drm_remove_user_object(priv, uo); - mutex_unlock(&dev->struct_mutex); - return ret; - case drm_fence_reference: - ret = - drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo); - if (ret) - return ret; - fence = drm_lookup_fence_object(priv, arg.handle); - break; - case drm_fence_unreference: - ret = drm_user_object_unref(priv, arg.handle, drm_fence_type); - return ret; - case drm_fence_signaled: - fence = drm_lookup_fence_object(priv, arg.handle); - if (!fence) - return -EINVAL; - break; - case drm_fence_flush: - fence = drm_lookup_fence_object(priv, arg.handle); - if (!fence) - return -EINVAL; - ret = drm_fence_object_flush(dev, fence, arg.type); - break; - case drm_fence_wait: - fence = drm_lookup_fence_object(priv, arg.handle); - if (!fence) - return -EINVAL; - ret = - drm_fence_object_wait(dev, fence, - arg.flags & DRM_FENCE_FLAG_WAIT_LAZY, - 0, arg.type); - break; - case drm_fence_emit: - LOCK_TEST_WITH_RETURN(dev, filp); - fence = drm_lookup_fence_object(priv, arg.handle); - if (!fence) - return -EINVAL; - ret = drm_fence_object_emit(dev, fence, arg.flags, arg.class, - arg.type); - break; - case drm_fence_buffers: - if (!dev->bm.initialized) { - DRM_ERROR("Buffer object manager is not initialized\n"); - return -EINVAL; - } - LOCK_TEST_WITH_RETURN(dev, filp); - ret = drm_fence_buffer_objects(priv, NULL, arg.flags, - NULL, &fence); - if (ret) - return ret; - ret = drm_fence_add_user_object(priv, fence, - arg.flags & - DRM_FENCE_FLAG_SHAREABLE); - if (ret) - return ret; - atomic_inc(&fence->usage); - arg.handle = fence->base.hash.key; - break; - default: - return -EINVAL; - } - read_lock_irqsave(&fm->lock, flags); - arg.class = fence->class; - arg.type = fence->type; - arg.signaled = fence->signaled; - read_unlock_irqrestore(&fm->lock, flags); - drm_fence_usage_deref_unlocked(dev, fence); - - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); - return ret; -} - int drm_fence_create_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; -- cgit v1.2.3 From b2a875ba8955cfbf3df2dc1ecb25915a252eef9f Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 8 May 2007 18:25:15 +1000 Subject: ttm: complete drm buffer object ioctl split retain the op operation for validate/fence operations --- linux-core/drm_bo.c | 283 ++++++++++++++++++++++++++++------------------- linux-core/drm_drv.c | 14 ++- linux-core/drm_objects.h | 12 +- 3 files changed, 196 insertions(+), 113 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 43be21a8..be5fd6a8 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1068,7 +1068,7 @@ static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait, */ static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo, - drm_bo_arg_reply_t * rep) + struct drm_bo_info_rep *rep) { rep->handle = bo->base.hash.key; rep->flags = bo->mem.flags; @@ -1096,7 +1096,7 @@ static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo, static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle, uint32_t map_flags, unsigned hint, - drm_bo_arg_reply_t * rep) + struct drm_bo_info_rep *rep) { drm_buffer_object_t *bo; drm_device_t *dev = priv->head->dev; @@ -1459,7 +1459,7 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, uint32_t flags, uint32_t mask, uint32_t hint, - drm_bo_arg_reply_t * rep) + struct drm_bo_info_rep *rep) { drm_buffer_object_t *bo; int ret; @@ -1494,8 +1494,8 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, return ret; } -static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle, - drm_bo_arg_reply_t * rep) +static int drm_bo_handle_info(drm_file_t *priv, uint32_t handle, + struct drm_bo_info_rep *rep) { drm_buffer_object_t *bo; @@ -1512,8 +1512,9 @@ static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle, return 0; } -static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle, - uint32_t hint, drm_bo_arg_reply_t * rep) +static int drm_bo_handle_wait(drm_file_t *priv, uint32_t handle, + uint32_t hint, + struct drm_bo_info_rep *rep) { drm_buffer_object_t *bo; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; @@ -1652,15 +1653,14 @@ static int drm_bo_lock_test(drm_device_t * dev, struct file *filp) return 0; } -int drm_bo_ioctl(DRM_IOCTL_ARGS) +int drm_bo_op_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_bo_arg_t arg; - drm_bo_arg_request_t *req = &arg.d.req; - drm_bo_arg_reply_t rep; + struct drm_bo_op_arg arg; + struct drm_bo_op_req *req = &arg.d.req; + struct drm_bo_info_rep rep; unsigned long next; - drm_user_object_t *uo; - drm_buffer_object_t *entry; + int ret; if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); @@ -1675,97 +1675,28 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS) continue; } - rep.ret = 0; + ret = 0; switch (req->op) { - case drm_bo_create: - rep.ret = drm_bo_lock_test(dev, filp); - if (rep.ret) - break; - rep.ret = - drm_buffer_object_create(priv->head->dev, - req->size, - req->type, - req->mask, - req->hint, - req->page_alignment, - req->buffer_start, &entry); - if (rep.ret) - break; - - rep.ret = - drm_bo_add_user_object(priv, entry, - req-> - mask & - DRM_BO_FLAG_SHAREABLE); - if (rep.ret) - drm_bo_usage_deref_unlocked(entry); - - if (rep.ret) - break; - - mutex_lock(&entry->mutex); - drm_bo_fill_rep_arg(entry, &rep); - mutex_unlock(&entry->mutex); - break; - case drm_bo_unmap: - rep.ret = drm_buffer_object_unmap(priv, req->handle); - break; - case drm_bo_map: - rep.ret = drm_buffer_object_map(priv, req->handle, - req->mask, - req->hint, &rep); - break; - case drm_bo_destroy: - mutex_lock(&dev->struct_mutex); - uo = drm_lookup_user_object(priv, req->handle); - if (!uo || (uo->type != drm_buffer_type) - || uo->owner != priv) { - mutex_unlock(&dev->struct_mutex); - rep.ret = -EINVAL; - break; - } - rep.ret = drm_remove_user_object(priv, uo); - mutex_unlock(&dev->struct_mutex); - break; - case drm_bo_reference: - rep.ret = drm_user_object_ref(priv, req->handle, - drm_buffer_type, &uo); - if (rep.ret) - break; - - rep.ret = drm_bo_handle_info(priv, req->handle, &rep); - break; - case drm_bo_unreference: - rep.ret = drm_user_object_unref(priv, req->handle, - drm_buffer_type); - break; case drm_bo_validate: - rep.ret = drm_bo_lock_test(dev, filp); - - if (rep.ret) + ret = drm_bo_lock_test(dev, filp); + if (ret) break; - rep.ret = - drm_bo_handle_validate(priv, req->handle, req->mask, - req->arg_handle, req->hint, - &rep); + ret = drm_bo_handle_validate(priv, req->bo_req.handle, + req->bo_req.mask, + req->arg_handle, + req->bo_req.hint, + &rep); break; case drm_bo_fence: - rep.ret = drm_bo_lock_test(dev, filp); - if (rep.ret) + ret = drm_bo_lock_test(dev, filp); + if (ret) break; - /**/ break; - case drm_bo_info: - rep.ret = drm_bo_handle_info(priv, req->handle, &rep); - break; - case drm_bo_wait_idle: - rep.ret = drm_bo_handle_wait(priv, req->handle, - req->hint, &rep); break; case drm_bo_ref_fence: - rep.ret = -EINVAL; + ret = -EINVAL; DRM_ERROR("Function is not implemented yet.\n"); default: - rep.ret = -EINVAL; + ret = -EINVAL; } next = arg.next; @@ -1773,11 +1704,12 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS) * A signal interrupted us. Make sure the ioctl is restartable. */ - if (rep.ret == -EAGAIN) + if (ret == -EAGAIN) return -EAGAIN; arg.handled = 1; - arg.d.rep = rep; + arg.d.rep.ret = ret; + arg.d.rep.bo_info = rep; DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); data = next; } while (data); @@ -1787,9 +1719,9 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS) int drm_bo_create_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_bo_create_arg_t arg; - unsigned long next; - drm_user_object_t *uo; + struct drm_bo_create_arg arg; + struct drm_bo_create_req *req = &arg.d.req; + struct drm_bo_info_rep *rep = &arg.d.rep; drm_buffer_object_t *entry; int ret = 0; @@ -1819,24 +1751,48 @@ int drm_bo_create_ioctl(DRM_IOCTL_ARGS) } mutex_lock(&entry->mutex); - drm_bo_fill_rep_arg(entry, &rep); + drm_bo_fill_rep_arg(entry, rep); mutex_unlock(&entry->mutex); DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); out: - return 0; + return ret; } -int drm_bo_ioctl(DRM_IOCTL_ARGS) + +int drm_bo_destroy_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_bo_arg_t arg; - drm_bo_arg_request_t *req = &arg.d.req; - drm_bo_arg_reply_t rep; - unsigned long next; + struct drm_bo_handle_arg arg; drm_user_object_t *uo; - drm_buffer_object_t *entry; + int ret = 0; + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + mutex_lock(&dev->struct_mutex); + uo = drm_lookup_user_object(priv, arg.handle); + if (!uo || (uo->type != drm_buffer_type) || uo->owner != priv) { + mutex_unlock(&dev->struct_mutex); + return -EINVAL; + } + ret = drm_remove_user_object(priv, uo); + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +int drm_bo_map_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct drm_bo_map_wait_idle_arg arg; + struct drm_bo_info_req *req = &arg.d.req; + struct drm_bo_info_rep *rep = &arg.d.rep; + int ret; if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -1844,20 +1800,125 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - rep.ret = 0; + ret = drm_buffer_object_map(priv, req->handle, req->mask, + req->hint, rep); + if (ret) + return ret; - rep.ret = drm_buffer_object_unmap(priv, req->handle); + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return 0; +} +int drm_bo_unmap_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct drm_bo_handle_arg arg; + int ret; + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + ret = drm_buffer_object_unmap(priv, arg.handle); + return ret; +} - if (rep.ret == -EAGAIN) - return -EAGAIN; +int drm_bo_reference_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct drm_bo_reference_info_arg arg; + struct drm_bo_handle_arg *req = &arg.d.req; + struct drm_bo_info_rep *rep = &arg.d.rep; + drm_user_object_t *uo; + int ret; + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + ret = drm_user_object_ref(priv, req->handle, + drm_buffer_type, &uo); + if (ret) + return ret; + ret = drm_bo_handle_info(priv, req->handle, rep); + if (ret) + return ret; + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return 0; +} + +int drm_bo_unreference_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct drm_bo_handle_arg arg; + int ret = 0; + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + ret = drm_user_object_unref(priv, arg.handle, drm_buffer_type); + return ret; +} + +int drm_bo_info_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct drm_bo_reference_info_arg arg; + struct drm_bo_handle_arg *req = &arg.d.req; + struct drm_bo_info_rep *rep = &arg.d.rep; + int ret; + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + ret = drm_bo_handle_info(priv, req->handle, rep); + if (ret) + return ret; DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return 0; +} + +int drm_bo_wait_idle_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct drm_bo_map_wait_idle_arg arg; + struct drm_bo_info_req *req = &arg.d.req; + struct drm_bo_info_rep *rep = &arg.d.rep; + int ret; + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + + ret = drm_bo_handle_wait(priv, req->handle, + req->hint, rep); + if (ret) + return ret; + + DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return 0; } + + /** *Clean the unfenced list and put on regular LRU. *This is part of the memory manager cleanup and should only be diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 6b98f2c1..b931ce2f 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -118,7 +118,7 @@ static drm_ioctl_desc_t drm_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0}, - [DRM_IOCTL_NR(DRM_IOCTL_BUFOBJ)] = {drm_bo_ioctl, DRM_AUTH}, + // [DRM_IOCTL_NR(DRM_IOCTL_BUFOBJ)] = {drm_bo_ioctl, DRM_AUTH}, [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW)] = {drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, @@ -142,6 +142,18 @@ static drm_ioctl_desc_t drm_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_FENCE_EMIT)] = {drm_fence_emit_ioctl, DRM_AUTH}, [DRM_IOCTL_NR(DRM_IOCTL_FENCE_BUFFERS)] = {drm_fence_buffers_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_BO_CREATE)] = {drm_bo_create_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_BO_DESTROY)] = {drm_bo_destroy_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_BO_MAP)] = {drm_bo_map_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_BO_UNMAP)] = {drm_bo_unmap_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_BO_REFERENCE)] = {drm_bo_reference_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_BO_UNREFERENCE)] = {drm_bo_unreference_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_BO_OP)] = {drm_bo_op_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_BO_INFO)] = {drm_bo_info_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_IOCTL_BO_WAIT_IDLE)] = {drm_bo_wait_idle_ioctl, DRM_AUTH}, + + + }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 17338da5..61059a05 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -436,7 +436,17 @@ typedef struct drm_bo_driver { * buffer objects (drm_bo.c) */ -extern int drm_bo_ioctl(DRM_IOCTL_ARGS); +extern int drm_bo_create_ioctl(DRM_IOCTL_ARGS); +extern int drm_bo_destroy_ioctl(DRM_IOCTL_ARGS); +extern int drm_bo_map_ioctl(DRM_IOCTL_ARGS); +extern int drm_bo_unmap_ioctl(DRM_IOCTL_ARGS); +extern int drm_bo_reference_ioctl(DRM_IOCTL_ARGS); +extern int drm_bo_unreference_ioctl(DRM_IOCTL_ARGS); +extern int drm_bo_wait_idle_ioctl(DRM_IOCTL_ARGS); +extern int drm_bo_info_ioctl(DRM_IOCTL_ARGS); +extern int drm_bo_op_ioctl(DRM_IOCTL_ARGS); + + extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS); extern int drm_mm_takedown_ioctl(DRM_IOCTL_ARGS); extern int drm_mm_lock_ioctl(DRM_IOCTL_ARGS); -- cgit v1.2.3 From b6b5df24b962c94433afe4d8665b5f145bfa1ad3 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 12 Jun 2007 12:21:38 +0200 Subject: Try to make buffer object / fence object ioctl args 64-bit safe. Introduce tile members for future tiled buffer support. Allow user-space to explicitly define a fence-class. Remove the implicit fence-class mechanism. 64-bit wide buffer object flag member. --- linux-core/drm_bo.c | 79 ++++++++++++++++++++++++++++++++++++++---------- linux-core/drm_compat.c | 6 ++-- linux-core/drm_objects.h | 8 ++--- linux-core/i915_buffer.c | 5 ++- linux-core/via_buffer.c | 5 ++- 5 files changed, 74 insertions(+), 29 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index be5fd6a8..b6a31943 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -195,8 +195,8 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t * bo, if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) { drm_bo_mem_reg_t *old_mem = &bo->mem; - uint32_t save_flags = old_mem->flags; - uint32_t save_mask = old_mem->mask; + uint64_t save_flags = old_mem->flags; + uint64_t save_mask = old_mem->mask; *old_mem = *mem; mem->mm_node = NULL; @@ -871,7 +871,7 @@ int drm_bo_mem_space(drm_buffer_object_t * bo, EXPORT_SYMBOL(drm_bo_mem_space); static int drm_bo_new_mask(drm_buffer_object_t * bo, - uint32_t new_mask, uint32_t hint) + uint64_t new_mask, uint32_t hint) { uint32_t new_props; @@ -1343,7 +1343,8 @@ static int drm_bo_check_fake(drm_device_t * dev, drm_bo_mem_reg_t * mem) return 0; } - DRM_ERROR("Illegal fake buffer flags 0x%08x\n", mem->mask); + DRM_ERROR("Illegal fake buffer flags 0x%016llx\n", + (unsigned long long) mem->mask); return -EINVAL; } @@ -1352,22 +1353,45 @@ static int drm_bo_check_fake(drm_device_t * dev, drm_bo_mem_reg_t * mem) */ static int drm_buffer_object_validate(drm_buffer_object_t * bo, + uint32_t fence_class, int move_unfenced, int no_wait) { drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; drm_bo_driver_t *driver = dev->driver->bo_driver; + uint32_t ftype; int ret; - DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", bo->mem.mask, - bo->mem.flags); - ret = - driver->fence_type(bo, &bo->fence_class, &bo->fence_type); + DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n", + (unsigned long long) bo->mem.mask, + (unsigned long long) bo->mem.flags); + + ret = driver->fence_type(bo, &ftype); + if (ret) { DRM_ERROR("Driver did not support given buffer permissions\n"); return ret; } + /* + * We're switching command submission mechanism, + * or cannot simply rely on the hardware serializing for us. + * + * Wait for buffer idle. + */ + + if ((fence_class != bo->fence_class) || + ((ftype ^ bo->fence_type) & bo->fence_type)) { + + ret = drm_bo_wait(bo, 0, 0, no_wait); + + if (ret) + return ret; + + } + + bo->fence_class = fence_class; + bo->fence_type = ftype; ret = drm_bo_wait_unmapped(bo, no_wait); if (ret) return ret; @@ -1457,8 +1481,10 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, return 0; } -static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, - uint32_t flags, uint32_t mask, uint32_t hint, +static int drm_bo_handle_validate(drm_file_t * priv, + uint32_t handle, + uint32_t fence_class, + uint64_t flags, uint64_t mask, uint32_t hint, struct drm_bo_info_rep *rep) { drm_buffer_object_t *bo; @@ -1482,7 +1508,8 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, goto out; ret = - drm_buffer_object_validate(bo, !(hint & DRM_BO_HINT_DONT_FENCE), + drm_buffer_object_validate(bo, fence_class, + !(hint & DRM_BO_HINT_DONT_FENCE), no_wait); drm_bo_fill_rep_arg(bo, rep); @@ -1544,7 +1571,7 @@ static int drm_bo_handle_wait(drm_file_t *priv, uint32_t handle, int drm_buffer_object_create(drm_device_t *dev, unsigned long size, drm_bo_type_t type, - uint32_t mask, + uint64_t mask, uint32_t hint, uint32_t page_alignment, unsigned long buffer_start, @@ -1596,8 +1623,8 @@ int drm_buffer_object_create(drm_device_t *dev, bo->buffer_start = buffer_start; } bo->priv_flags = 0; - bo->mem.flags = 0; - bo->mem.mask = 0; + bo->mem.flags = 0ULL; + bo->mem.mask = 0ULL; atomic_inc(&bm->count); ret = drm_bo_new_mask(bo, mask, hint); @@ -1611,7 +1638,7 @@ int drm_buffer_object_create(drm_device_t *dev, if (ret) goto out_err; } - ret = drm_buffer_object_validate(bo, 0, hint & DRM_BO_HINT_DONT_BLOCK); + ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK); if (ret) goto out_err; @@ -1682,8 +1709,9 @@ int drm_bo_op_ioctl(DRM_IOCTL_ARGS) if (ret) break; ret = drm_bo_handle_validate(priv, req->bo_req.handle, + req->bo_req.fence_class, + req->bo_req.flags, req->bo_req.mask, - req->arg_handle, req->bo_req.hint, &rep); break; @@ -2305,6 +2333,25 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); ret = -EINVAL; + if (arg.magic != DRM_BO_INIT_MAGIC) { + DRM_ERROR("You are using an old libdrm that is not compatible with\n" + "\tthe kernel DRM module. Please upgrade your libdrm.\n"); + return -EINVAL; + } + if (arg.major != DRM_BO_INIT_MAJOR) { + DRM_ERROR("libdrm and kernel DRM buffer object interface major\n" + "\tversion don't match. Got %d, expected %d,\n", + arg.major, DRM_BO_INIT_MAJOR); + return -EINVAL; + } + if (arg.minor > DRM_BO_INIT_MINOR) { + DRM_ERROR("libdrm expects a newer DRM buffer object interface.\n" + "\tlibdrm buffer object interface version is %d.%d.\n" + "\tkernel DRM buffer object interface version is %d.%d\n", + arg.major, arg.minor, DRM_BO_INIT_MAJOR, DRM_BO_INIT_MINOR); + return -EINVAL; + } + mutex_lock(&dev->bm.init_mutex); mutex_lock(&dev->struct_mutex); if (!bm->initialized) { diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 867cee85..d47b92e5 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -184,7 +184,7 @@ static int drm_pte_is_clear(struct vm_area_struct *vma, spin_unlock(&mm->page_table_lock); return ret; } -#if 0 + static int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) { @@ -195,9 +195,9 @@ static int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, vma->vm_page_prot); return ret; } -#endif -static struct page *drm_bo_vm_fault(struct vm_area_struct *vma, + +static struct page *drm_bo_vm_fault(struct vm_area_struct *vma, struct fault_data *data) { unsigned long address = data->address; diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 61059a05..b40320aa 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -321,8 +321,8 @@ typedef struct drm_bo_mem_reg { unsigned long num_pages; uint32_t page_alignment; uint32_t mem_type; - uint32_t flags; - uint32_t mask; + uint64_t flags; + uint64_t mask; } drm_bo_mem_reg_t; typedef struct drm_buffer_object { @@ -423,8 +423,8 @@ typedef struct drm_bo_driver { uint32_t num_mem_busy_prio; drm_ttm_backend_t *(*create_ttm_backend_entry) (struct drm_device * dev); - int (*fence_type) (struct drm_buffer_object *bo, uint32_t * class, uint32_t * type); - int (*invalidate_caches) (struct drm_device * dev, uint32_t flags); + int (*fence_type) (struct drm_buffer_object *bo, uint32_t * type); + int (*invalidate_caches) (struct drm_device * dev, uint64_t flags); int (*init_mem_type) (struct drm_device * dev, uint32_t type, drm_mem_type_manager_t * man); uint32_t(*evict_mask) (struct drm_buffer_object *bo); diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index 8589f467..2850fb94 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -38,9 +38,8 @@ drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t * dev) return drm_agp_init_ttm(dev); } -int i915_fence_types(drm_buffer_object_t *bo, uint32_t * class, uint32_t * type) +int i915_fence_types(drm_buffer_object_t *bo, uint32_t * type) { - *class = 0; if (bo->mem.flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) *type = 3; else @@ -48,7 +47,7 @@ int i915_fence_types(drm_buffer_object_t *bo, uint32_t * class, uint32_t * type) return 0; } -int i915_invalidate_caches(drm_device_t * dev, uint32_t flags) +int i915_invalidate_caches(drm_device_t * dev, uint64_t flags) { /* * FIXME: Only emit once per batchbuffer submission. diff --git a/linux-core/via_buffer.c b/linux-core/via_buffer.c index ebc8c371..86883998 100644 --- a/linux-core/via_buffer.c +++ b/linux-core/via_buffer.c @@ -37,14 +37,13 @@ drm_ttm_backend_t *via_create_ttm_backend_entry(drm_device_t * dev) return drm_agp_init_ttm(dev); } -int via_fence_types(drm_buffer_object_t *bo, uint32_t * class, uint32_t * type) +int via_fence_types(drm_buffer_object_t *bo, uint32_t * type) { - *class = 0; *type = 3; return 0; } -int via_invalidate_caches(drm_device_t * dev, uint32_t flags) +int via_invalidate_caches(drm_device_t * dev, uint64_t flags) { /* * FIXME: Invalidate texture caches here. -- cgit v1.2.3 From f984b1b8d17f285dfacb593702178f1eb2fdb4ac Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 12 Jun 2007 12:30:33 +0200 Subject: Fix some obvious bugs. --- linux-core/drm_bo.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index b6a31943..a1a27d1e 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1716,13 +1716,13 @@ int drm_bo_op_ioctl(DRM_IOCTL_ARGS) &rep); break; case drm_bo_fence: - ret = drm_bo_lock_test(dev, filp); - if (ret) - break; + ret = -EINVAL; + DRM_ERROR("Function is not implemented yet.\n"); break; case drm_bo_ref_fence: ret = -EINVAL; DRM_ERROR("Function is not implemented yet.\n"); + break; default: ret = -EINVAL; } -- cgit v1.2.3 From 5156f1c897142171e78d0ea2c45a3aecb581fffa Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 13 Jun 2007 15:19:30 +0200 Subject: Fix fence object deref race. --- linux-core/drm_bo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 1c7013b3..bcb5c95d 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -338,7 +338,7 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all) DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); if (bo->fence && drm_fence_object_signaled(bo->fence, bo->fence_type)) { - drm_fence_usage_deref_locked(dev, bo->fence); + drm_fence_usage_deref_unlocked(dev, bo->fence); bo->fence = NULL; } -- cgit v1.2.3 From 62082ab3e63f6f474655da98b710e453b4124ed1 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 13 Jun 2007 15:38:59 +0200 Subject: Make sure we read fence->signaled while spinlocked. --- linux-core/drm_bo.c | 11 ++++++----- linux-core/drm_fence.c | 18 ++++++------------ linux-core/drm_objects.h | 4 +++- 3 files changed, 15 insertions(+), 18 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index bcb5c95d..1c0eebd0 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -268,7 +268,7 @@ int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, if (fence) { drm_device_t *dev = bo->dev; - if (drm_fence_object_signaled(fence, bo->fence_type)) { + if (drm_fence_object_signaled(dev, fence, bo->fence_type, 0)) { drm_fence_usage_deref_unlocked(dev, fence); bo->fence = NULL; return 0; @@ -337,7 +337,8 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all) DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); - if (bo->fence && drm_fence_object_signaled(bo->fence, bo->fence_type)) { + if (bo->fence && drm_fence_object_signaled(dev, bo->fence, + bo->fence_type, 0)) { drm_fence_usage_deref_unlocked(dev, bo->fence); bo->fence = NULL; } @@ -944,7 +945,7 @@ static int drm_bo_quick_busy(drm_buffer_object_t * bo) BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); if (fence) { drm_device_t *dev = bo->dev; - if (drm_fence_object_signaled(fence, bo->fence_type)) { + if (drm_fence_object_signaled(dev, fence, bo->fence_type, 0)) { drm_fence_usage_deref_unlocked(dev, fence); bo->fence = NULL; return 0; @@ -966,13 +967,13 @@ static int drm_bo_busy(drm_buffer_object_t * bo) BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); if (fence) { drm_device_t *dev = bo->dev; - if (drm_fence_object_signaled(fence, bo->fence_type)) { + if (drm_fence_object_signaled(dev, fence, bo->fence_type, 0)) { drm_fence_usage_deref_unlocked(dev, fence); bo->fence = NULL; return 0; } drm_fence_object_flush(dev, fence, DRM_FENCE_TYPE_EXE); - if (drm_fence_object_signaled(fence, bo->fence_type)) { + if (drm_fence_object_signaled(dev, fence, bo->fence_type, 0)) { drm_fence_usage_deref_unlocked(dev, fence); bo->fence = NULL; return 0; diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index ce161dc3..70baad9f 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -164,7 +164,7 @@ static void drm_fence_object_destroy(drm_file_t * priv, drm_fence_usage_deref_locked(dev, fence); } -static int fence_signaled(drm_device_t * dev, +int drm_fence_object_signaled(drm_device_t * dev, drm_fence_object_t * fence, uint32_t mask, int poke_flush) { @@ -200,12 +200,6 @@ static void drm_fence_flush_exe(drm_fence_class_manager_t * fc, } } -int drm_fence_object_signaled(drm_fence_object_t * fence, - uint32_t type) -{ - return ((fence->signaled & type) == type); -} - int drm_fence_object_flush(drm_device_t * dev, drm_fence_object_t * fence, uint32_t type) @@ -298,13 +292,13 @@ static int drm_fence_lazy_wait(drm_device_t *dev, do { DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ, - (signaled = fence_signaled(dev, fence, mask, 1))); + (signaled = drm_fence_object_signaled(dev, fence, mask, 1))); if (signaled) return 0; if (time_after_eq(jiffies, _end)) break; } while (ret == -EINTR && ignore_signals); - if (fence_signaled(dev, fence, mask, 0)) + if (drm_fence_object_signaled(dev, fence, mask, 0)) return 0; if (time_after_eq(jiffies, _end)) ret = -EBUSY; @@ -334,7 +328,7 @@ int drm_fence_object_wait(drm_device_t * dev, return -EINVAL; } - if (fence_signaled(dev, fence, mask, 0)) + if (drm_fence_object_signaled(dev, fence, mask, 0)) return 0; _end = jiffies + 3 * DRM_HZ; @@ -365,7 +359,7 @@ int drm_fence_object_wait(drm_device_t * dev, return ret; } } - if (drm_fence_object_signaled(fence, mask)) + if (drm_fence_object_signaled(dev, fence, mask, 0)) return 0; /* @@ -377,7 +371,7 @@ int drm_fence_object_wait(drm_device_t * dev, #endif do { schedule(); - signaled = fence_signaled(dev, fence, mask, 1); + signaled = drm_fence_object_signaled(dev, fence, mask, 1); } while (!signaled && !time_after_eq(jiffies, _end)); if (!signaled) diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 03ea927e..42c8e536 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -198,7 +198,9 @@ extern void drm_fence_flush_old(struct drm_device *dev, uint32_t class, uint32_t sequence); extern int drm_fence_object_flush(struct drm_device *dev, drm_fence_object_t * fence, uint32_t type); -extern int drm_fence_object_signaled(drm_fence_object_t * fence, uint32_t type); +extern int drm_fence_object_signaled(struct drm_device *dev, + drm_fence_object_t * fence, + uint32_t type, int flush); extern void drm_fence_usage_deref_locked(struct drm_device *dev, drm_fence_object_t * fence); extern void drm_fence_usage_deref_unlocked(struct drm_device *dev, -- cgit v1.2.3 From e1b8eabeee354822fc0a413dd097210b621eb73a Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 14 Jun 2007 11:52:38 +0200 Subject: Locking fixes and instrumentation. --- linux-core/drm_bo.c | 31 +++++++++++++++++++++++++++++++ linux-core/drm_fence.c | 23 +++++++++++------------ linux-core/drm_object.c | 9 +++++++++ linux-core/drm_objects.h | 8 ++++++++ 4 files changed, 59 insertions(+), 12 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 1c0eebd0..b9a261d5 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -67,6 +67,9 @@ void drm_bo_add_to_pinned_lru(drm_buffer_object_t * bo) { drm_mem_type_manager_t *man; + DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); + DRM_ASSERT_LOCKED(&bo->mutex); + man = &bo->dev->bm.man[bo->pinned_mem_type]; list_add_tail(&bo->pinned_lru, &man->pinned); } @@ -75,6 +78,8 @@ void drm_bo_add_to_lru(drm_buffer_object_t * bo) { drm_mem_type_manager_t *man; + DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); + if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)) || bo->mem.mem_type != bo->pinned_mem_type) { man = &bo->dev->bm.man[bo->mem.mem_type]; @@ -134,6 +139,8 @@ static int drm_bo_add_ttm(drm_buffer_object_t * bo) int ret = 0; bo->ttm = NULL; + DRM_ASSERT_LOCKED(&bo->mutex); + switch (bo->type) { case drm_bo_type_dc: bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT); @@ -266,6 +273,8 @@ int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, drm_fence_object_t *fence = bo->fence; int ret; + DRM_ASSERT_LOCKED(&bo->mutex); + if (fence) { drm_device_t *dev = bo->dev; if (drm_fence_object_signaled(dev, fence, bo->fence_type, 0)) { @@ -331,6 +340,8 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all) drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; + DRM_ASSERT_LOCKED(&dev->struct_mutex); + atomic_inc(&bo->usage); mutex_unlock(&dev->struct_mutex); mutex_lock(&bo->mutex); @@ -393,6 +404,8 @@ static void drm_bo_destroy_locked(drm_buffer_object_t * bo) drm_device_t *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; + DRM_ASSERT_LOCKED(&dev->struct_mutex); + if (list_empty(&bo->lru) && bo->mem.mm_node == NULL && list_empty(&bo->pinned_lru) && bo->pinned_node == NULL && list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) { @@ -415,6 +428,7 @@ static void drm_bo_destroy_locked(drm_buffer_object_t * bo) atomic_dec(&bm->count); + BUG_ON(!list_empty(&bo->base.list)); drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ); return; @@ -491,6 +505,8 @@ static void drm_bo_delayed_workqueue(struct work_struct *work) void drm_bo_usage_deref_locked(drm_buffer_object_t * bo) { + DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); + if (atomic_dec_and_test(&bo->usage)) { drm_bo_destroy_locked(bo); } @@ -501,6 +517,8 @@ static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo) drm_buffer_object_t *bo = drm_user_object_entry(uo, drm_buffer_object_t, base); + DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); + drm_bo_takedown_vm_locked(bo); drm_bo_usage_deref_locked(bo); } @@ -1462,11 +1480,14 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, uint32_t flags, uint32_t mask, uint32_t hint, drm_bo_arg_reply_t * rep) { + struct drm_device *dev = priv->head->dev; drm_buffer_object_t *bo; int ret; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; + mutex_lock(&dev->struct_mutex); bo = drm_lookup_buffer_object(priv, handle, 1); + mutex_unlock(&dev->struct_mutex); if (!bo) { return -EINVAL; } @@ -1498,9 +1519,13 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle, drm_bo_arg_reply_t * rep) { + struct drm_device *dev = priv->head->dev; drm_buffer_object_t *bo; + mutex_lock(&dev->struct_mutex); bo = drm_lookup_buffer_object(priv, handle, 1); + mutex_unlock(&dev->struct_mutex); + if (!bo) { return -EINVAL; } @@ -1520,7 +1545,11 @@ static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle, int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; int ret; + struct drm_device *dev = priv->head->dev; + mutex_lock(&dev->struct_mutex); bo = drm_lookup_buffer_object(priv, handle, 1); + mutex_unlock(&dev->struct_mutex); + if (!bo) { return -EINVAL; } @@ -2319,6 +2348,7 @@ static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo) drm_local_map_t *map; drm_device_t *dev = bo->dev; + DRM_ASSERT_LOCKED(&dev->struct_mutex); if (list->user_token) { drm_ht_remove_item(&dev->map_hash, &list->hash); list->user_token = 0; @@ -2344,6 +2374,7 @@ static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo) drm_local_map_t *map; drm_device_t *dev = bo->dev; + DRM_ASSERT_LOCKED(&dev->struct_mutex); list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ); if (!list->map) return -ENOMEM; diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index 70baad9f..b5fc2235 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -129,11 +129,14 @@ void drm_fence_usage_deref_locked(drm_device_t * dev, { drm_fence_manager_t *fm = &dev->fm; + DRM_ASSERT_LOCKED(&dev->struct_mutex); + if (atomic_dec_and_test(&fence->usage)) { drm_fence_unring(dev, &fence->ring); DRM_DEBUG("Destroyed a fence object 0x%08lx\n", fence->base.hash.key); atomic_dec(&fm->count); + BUG_ON(!list_empty(&fence->base.list)); drm_ctl_free(fence, sizeof(*fence), DRM_MEM_FENCE); } } @@ -148,6 +151,7 @@ void drm_fence_usage_deref_unlocked(drm_device_t * dev, if (atomic_read(&fence->usage) == 0) { drm_fence_unring(dev, &fence->ring); atomic_dec(&fm->count); + BUG_ON(!list_empty(&fence->base.list)); drm_ctl_free(fence, sizeof(*fence), DRM_MEM_FENCE); } mutex_unlock(&dev->struct_mutex); @@ -448,15 +452,16 @@ int drm_fence_add_user_object(drm_file_t * priv, drm_fence_object_t * fence, mutex_lock(&dev->struct_mutex); ret = drm_add_user_object(priv, &fence->base, shareable); - mutex_unlock(&dev->struct_mutex); if (ret) - return ret; + goto out; + atomic_inc(&fence->usage); fence->base.type = drm_fence_type; fence->base.remove = &drm_fence_object_destroy; DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key); - return 0; +out: + mutex_unlock(&dev->struct_mutex); + return ret; } - EXPORT_SYMBOL(drm_fence_add_user_object); int drm_fence_object_create(drm_device_t * dev, uint32_t class, uint32_t type, @@ -466,7 +471,7 @@ int drm_fence_object_create(drm_device_t * dev, uint32_t class, uint32_t type, int ret; drm_fence_manager_t *fm = &dev->fm; - fence = drm_ctl_alloc(sizeof(*fence), DRM_MEM_FENCE); + fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE); if (!fence) return -ENOMEM; ret = drm_fence_object_init(dev, class, type, flags, fence); @@ -566,13 +571,8 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS) drm_fence_usage_deref_unlocked(dev, fence); return ret; } - - /* - * usage > 0. No need to lock dev->struct_mutex; - */ - - atomic_inc(&fence->usage); arg.handle = fence->base.hash.key; + break; case drm_fence_destroy: mutex_lock(&dev->struct_mutex); @@ -637,7 +637,6 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS) DRM_FENCE_FLAG_SHAREABLE); if (ret) return ret; - atomic_inc(&fence->usage); arg.handle = fence->base.hash.key; break; default: diff --git a/linux-core/drm_object.c b/linux-core/drm_object.c index 03906034..567a7d2b 100644 --- a/linux-core/drm_object.c +++ b/linux-core/drm_object.c @@ -36,6 +36,8 @@ int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item, drm_device_t *dev = priv->head->dev; int ret; + DRM_ASSERT_LOCKED(&dev->struct_mutex); + atomic_set(&item->refcount, 1); item->shareable = shareable; item->owner = priv; @@ -56,6 +58,8 @@ drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, uint32_t key) int ret; drm_user_object_t *item; + DRM_ASSERT_LOCKED(&dev->struct_mutex); + ret = drm_ht_find_item(&dev->object_hash, key, &hash); if (ret) { return NULL; @@ -88,6 +92,8 @@ static void drm_deref_user_object(drm_file_t * priv, drm_user_object_t * item) int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item) { + DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); + if (item->owner != priv) { DRM_ERROR("Cannot destroy object not owned by you.\n"); return -EINVAL; @@ -125,6 +131,7 @@ int drm_add_ref_object(drm_file_t * priv, drm_user_object_t * referenced_object, drm_ref_object_t *item; drm_open_hash_t *ht = &priv->refd_object_hash[ref_action]; + DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); if (!referenced_object->shareable && priv != referenced_object->owner) { DRM_ERROR("Not allowed to reference this object\n"); return -EINVAL; @@ -181,6 +188,7 @@ drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv, drm_hash_item_t *hash; int ret; + DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); ret = drm_ht_find_item(&priv->refd_object_hash[ref_action], (unsigned long)referenced_object, &hash); if (ret) @@ -213,6 +221,7 @@ void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item) drm_open_hash_t *ht = &priv->refd_object_hash[item->unref_action]; drm_ref_t unref_action; + DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); unref_action = item->unref_action; if (atomic_dec_and_test(&item->refcount)) { ret = drm_ht_remove_item(ht, &item->hash); diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 42c8e536..59c8902d 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -473,4 +473,12 @@ extern int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo, uint32_t fence_flags, drm_bo_mem_reg_t * new_mem); +#ifdef CONFIG_DEBUG_MUTEXES +#define DRM_ASSERT_LOCKED(_mutex) \ + BUG_ON(!mutex_is_locked(_mutex) || \ + ((_mutex)->owner != current_thread_info())) +#else +#define DRM_ASSERT_LOCKED(_mutex) +#endif + #endif -- cgit v1.2.3 From d34b2c7b9e108766b1d67cd23b8f7ecc77835ac7 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 15 Jun 2007 10:21:31 +0200 Subject: Fix refcounting / lock race. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reported by Steve Wilkins / Michel Dänzer. --- linux-core/drm_bo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index b9a261d5..2a16ba5a 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -618,6 +618,7 @@ int drm_fence_buffer_objects(drm_file_t * priv, if (entry->fence) drm_fence_usage_deref_locked(dev, entry->fence); entry->fence = fence; + atomic_inc(&fence->usage); DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); DRM_WAKEUP(&entry->event_queue); @@ -627,7 +628,6 @@ int drm_fence_buffer_objects(drm_file_t * priv, drm_bo_usage_deref_locked(entry); l = f_list.next; } - atomic_add(count, &fence->usage); DRM_DEBUG("Fenced %d buffers\n", count); out: mutex_unlock(&dev->struct_mutex); -- cgit v1.2.3 From 3ee31a1f356df4b81e3ba226a416627fd3b70e07 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 15 Jun 2007 10:31:32 +0200 Subject: Indentation fixes. --- linux-core/drm_bo.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 2a16ba5a..f1ca0b44 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1480,7 +1480,7 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, uint32_t flags, uint32_t mask, uint32_t hint, drm_bo_arg_reply_t * rep) { - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = priv->head->dev; drm_buffer_object_t *bo; int ret; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; @@ -1519,7 +1519,7 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle, drm_bo_arg_reply_t * rep) { - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = priv->head->dev; drm_buffer_object_t *bo; mutex_lock(&dev->struct_mutex); @@ -1541,11 +1541,11 @@ static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle, static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle, uint32_t hint, drm_bo_arg_reply_t * rep) { + struct drm_device *dev = priv->head->dev; drm_buffer_object_t *bo; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; int ret; - struct drm_device *dev = priv->head->dev; mutex_lock(&dev->struct_mutex); bo = drm_lookup_buffer_object(priv, handle, 1); mutex_unlock(&dev->struct_mutex); -- cgit v1.2.3 From 84bea383538df83c049680497ba2179e50d07ca3 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 15 Jun 2007 10:35:52 +0200 Subject: Fix i915 sequence mask. --- linux-core/i915_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c index 7fdb0839..4c35b4c3 100644 --- a/linux-core/i915_drv.c +++ b/linux-core/i915_drv.c @@ -43,7 +43,7 @@ static drm_fence_driver_t i915_fence_driver = { .num_classes = 1, .wrap_diff = (1 << 30), .flush_diff = (1 << 29), - .sequence_mask = 0xffffffffU, + .sequence_mask = 0x7fffffffU, .lazy_capable = 1, .emit = i915_fence_emit_sequence, .poke_flush = i915_poke_flush, -- cgit v1.2.3 From 3d5d41fa9823cf44138c8f4bc954bca80539d74e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Fri, 15 Jun 2007 17:13:11 +0200 Subject: i915: Fix handling of breadcrumb counter wraparounds. --- linux-core/i915_drv.c | 6 +++--- linux-core/i915_fence.c | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'linux-core') diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c index 4c35b4c3..49437066 100644 --- a/linux-core/i915_drv.c +++ b/linux-core/i915_drv.c @@ -41,9 +41,9 @@ static struct pci_device_id pciidlist[] = { #ifdef I915_HAVE_FENCE static drm_fence_driver_t i915_fence_driver = { .num_classes = 1, - .wrap_diff = (1 << 30), - .flush_diff = (1 << 29), - .sequence_mask = 0x7fffffffU, + .wrap_diff = (1U << (BREADCRUMB_BITS - 1)), + .flush_diff = (1U << (BREADCRUMB_BITS - 2)), + .sequence_mask = BREADCRUMB_MASK, .lazy_capable = 1, .emit = i915_fence_emit_sequence, .poke_flush = i915_poke_flush, diff --git a/linux-core/i915_fence.c b/linux-core/i915_fence.c index 88daa57c..00873485 100644 --- a/linux-core/i915_fence.c +++ b/linux-core/i915_fence.c @@ -61,7 +61,7 @@ static void i915_perform_flush(drm_device_t * dev) * First update fences with the current breadcrumb. */ - diff = sequence - fc->last_exe_flush; + diff = (sequence - fc->last_exe_flush) & BREADCRUMB_MASK; if (diff < driver->wrap_diff && diff != 0) { drm_fence_handler(dev, 0, sequence, DRM_FENCE_TYPE_EXE); } -- cgit v1.2.3 From 638ebbab54a48004c2e1d9cc5498e1dec976911e Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Mon, 18 Jun 2007 12:45:20 +1000 Subject: fix radeon setparam on 32/64 systems, harder. Commit 9b01bd5b284bbf519b726b39f1352023cb5e9e69 introduced a compat_ioctl handler for RADEON_SETPARAM, the sole purpose of which was to handle the fact that on i386, alignof(uint64_t)==4. Unfortunately, this handler was installed for _all_ 64-bit architectures, instead of only x86_64 and ia64. And thus it breaks 32-bit compatibility on every other arch, where 64-bit integers are aligned to 8 bytes in 32-bit mode just the same as in 64-bit mode. Arnd has a cunning plan to use 'compat_u64' with appropriate alignment attributes according to the 32-bit ABI, but for now let's just make the compat_radeon_cp_setparam routine entirely disappear on 64-bit machines whose 32-bit compat support isn't for i386. It would be a no-op with compat_u64 anyway. Signed-off-by: David Woodhouse --- linux-core/radeon_ioc32.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/radeon_ioc32.c b/linux-core/radeon_ioc32.c index 1be50bd9..bc8aa35a 100644 --- a/linux-core/radeon_ioc32.c +++ b/linux-core/radeon_ioc32.c @@ -349,6 +349,8 @@ static int compat_radeon_irq_emit(struct file *file, unsigned int cmd, DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long) request); } +/* The two 64-bit arches where alignof(u64)==4 in 32-bit code */ +#if defined (CONFIG_X86_64) || defined(CONFIG_IA64) typedef struct drm_radeon_setparam32 { int param; u64 value; @@ -373,7 +375,9 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd, return drm_ioctl(file->f_dentry->d_inode, file, DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request); } - +#else +#define compat_radeon_cp_setparam NULL +#endif /* X86_64 || IA64 */ drm_ioctl_compat_t *radeon_compat_ioctls[] = { [DRM_RADEON_CP_INIT] = compat_radeon_cp_init, -- cgit v1.2.3 From f2e64d527699751d6b64698495ae1d48eeee6cf7 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Sun, 24 Jun 2007 18:56:01 +1000 Subject: nouveau: NV4X PFIFO engtab functions --- linux-core/Makefile.kernel | 1 + linux-core/nv40_fifo.c | 1 + 2 files changed, 2 insertions(+) create mode 120000 linux-core/nv40_fifo.c (limited to 'linux-core') diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 6f5b021b..3e78b6d7 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -25,6 +25,7 @@ nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ nv04_timer.o \ nv04_mc.o nv40_mc.o \ nv04_fb.o nv10_fb.o nv40_fb.o \ + nv40_fifo.o \ nv04_graph.o nv10_graph.o nv20_graph.o nv30_graph.o \ nv40_graph.o radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o diff --git a/linux-core/nv40_fifo.c b/linux-core/nv40_fifo.c new file mode 120000 index 00000000..cc71e7a4 --- /dev/null +++ b/linux-core/nv40_fifo.c @@ -0,0 +1 @@ +../shared-core/nv40_fifo.c \ No newline at end of file -- cgit v1.2.3 From 05d86d950a10b77ffaa708e9d89b2a87c11fed01 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Sun, 24 Jun 2007 18:57:09 +1000 Subject: nouveau: NV04 PFIFO engtab functions --- linux-core/Makefile.kernel | 2 +- linux-core/nv04_fifo.c | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 120000 linux-core/nv04_fifo.c (limited to 'linux-core') diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 3e78b6d7..45d2dc46 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -25,7 +25,7 @@ nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ nv04_timer.o \ nv04_mc.o nv40_mc.o \ nv04_fb.o nv10_fb.o nv40_fb.o \ - nv40_fifo.o \ + nv04_fifo.o nv40_fifo.o \ nv04_graph.o nv10_graph.o nv20_graph.o nv30_graph.o \ nv40_graph.o radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o diff --git a/linux-core/nv04_fifo.c b/linux-core/nv04_fifo.c new file mode 120000 index 00000000..d10beb19 --- /dev/null +++ b/linux-core/nv04_fifo.c @@ -0,0 +1 @@ +../shared-core/nv04_fifo.c \ No newline at end of file -- cgit v1.2.3 From 341bc7820749024e09275de6e689b10c2908689a Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Sun, 24 Jun 2007 18:58:14 +1000 Subject: nouveau: NV1X/2X/3X PFIFO engtab functions Earlier NV1X chips use the NV04 code, see previous commits about NV10 RAMFC entry size. --- linux-core/Makefile.kernel | 2 +- linux-core/nv10_fifo.c | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 120000 linux-core/nv10_fifo.c (limited to 'linux-core') diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 45d2dc46..9427a04b 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -25,7 +25,7 @@ nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ nv04_timer.o \ nv04_mc.o nv40_mc.o \ nv04_fb.o nv10_fb.o nv40_fb.o \ - nv04_fifo.o nv40_fifo.o \ + nv04_fifo.o nv10_fifo.o nv40_fifo.o \ nv04_graph.o nv10_graph.o nv20_graph.o nv30_graph.o \ nv40_graph.o radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o diff --git a/linux-core/nv10_fifo.c b/linux-core/nv10_fifo.c new file mode 120000 index 00000000..8630ad04 --- /dev/null +++ b/linux-core/nv10_fifo.c @@ -0,0 +1 @@ +../shared-core/nv10_fifo.c \ No newline at end of file -- cgit v1.2.3 From 7af9d670371de868f0642148fe2d594bc9a7dea3 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 26 Jun 2007 13:05:29 -0700 Subject: Initial XP10 code drop from XGI. See attachment 10246 on https://bugs.freedesktop.org/show_bug.cgi?id=5921 --- linux-core/xgi_cmdlist.c | 348 ++++++++++ linux-core/xgi_cmdlist.h | 79 +++ linux-core/xgi_drv.c | 1610 ++++++++++++++++++++++++++++++++++++++++++++++ linux-core/xgi_drv.h | 364 +++++++++++ linux-core/xgi_fb.c | 528 +++++++++++++++ linux-core/xgi_fb.h | 71 ++ linux-core/xgi_linux.h | 596 +++++++++++++++++ linux-core/xgi_misc.c | 657 +++++++++++++++++++ linux-core/xgi_misc.h | 49 ++ linux-core/xgi_pcie.c | 1060 ++++++++++++++++++++++++++++++ linux-core/xgi_pcie.h | 73 +++ linux-core/xgi_regs.h | 410 ++++++++++++ linux-core/xgi_types.h | 68 ++ 13 files changed, 5913 insertions(+) create mode 100644 linux-core/xgi_cmdlist.c create mode 100644 linux-core/xgi_cmdlist.h create mode 100644 linux-core/xgi_drv.c create mode 100644 linux-core/xgi_drv.h create mode 100644 linux-core/xgi_fb.c create mode 100644 linux-core/xgi_fb.h create mode 100644 linux-core/xgi_linux.h create mode 100644 linux-core/xgi_misc.c create mode 100644 linux-core/xgi_misc.h create mode 100644 linux-core/xgi_pcie.c create mode 100644 linux-core/xgi_pcie.h create mode 100644 linux-core/xgi_regs.h create mode 100644 linux-core/xgi_types.h (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c new file mode 100644 index 00000000..024b021c --- /dev/null +++ b/linux-core/xgi_cmdlist.c @@ -0,0 +1,348 @@ + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + + +#include "xgi_types.h" +#include "xgi_linux.h" +#include "xgi_drv.h" +#include "xgi_regs.h" +#include "xgi_misc.h" +#include "xgi_cmdlist.h" + + + +U32 s_emptyBegin[AGPCMDLIST_BEGIN_SIZE] = +{ + 0x10000000, // 3D Type Begin, Invalid + 0x80000004, // Length = 4; + 0x00000000, + 0x00000000 +}; + +U32 s_flush2D[AGPCMDLIST_FLUSH_CMD_LEN] = +{ + FLUSH_2D, + FLUSH_2D, + FLUSH_2D, + FLUSH_2D +}; + +xgi_cmdring_info_t s_cmdring; + +static void addFlush2D(xgi_info_t *info); +static U32 getCurBatchBeginPort(xgi_cmd_info_t *pCmdInfo); +static void triggerHWCommandList(xgi_info_t *info, U32 triggerCounter); +static void xgi_cmdlist_reset(void); + +int xgi_cmdlist_initialize(xgi_info_t *info, U32 size) +{ + //xgi_mem_req_t mem_req; + xgi_mem_alloc_t mem_alloc; + + //mem_req.size = size; + + xgi_pcie_alloc(info, size, PCIE_2D, &mem_alloc); + + if ((mem_alloc.size == 0) && (mem_alloc.hw_addr == 0)) + { + return -1; + } + + s_cmdring._cmdRingSize = mem_alloc.size; + s_cmdring._cmdRingBuffer = mem_alloc.hw_addr; + s_cmdring._cmdRingBusAddr = mem_alloc.bus_addr; + s_cmdring._lastBatchStartAddr = 0; + s_cmdring._cmdRingOffset = 0; + + return 1; +} + +void xgi_submit_cmdlist(xgi_info_t *info, xgi_cmd_info_t *pCmdInfo) +{ + U32 beginPort; + /** XGI_INFO("Jong-xgi_submit_cmdlist-Begin \n"); **/ + + /* Jong 05/25/2006 */ + /* return; */ + + beginPort = getCurBatchBeginPort(pCmdInfo); + XGI_INFO("Jong-xgi_submit_cmdlist-After getCurBatchBeginPort() \n"); + + /* Jong 05/25/2006 */ + /* return; */ + + if (s_cmdring._lastBatchStartAddr == 0) + { + U32 portOffset; + + /* Jong 06/13/2006; remove marked for system hang test */ + /* xgi_waitfor_pci_idle(info); */ + + /* Jong 06132006; BASE_3D_ENG=0x2800 */ + /* beginPort: 2D: 0x30 */ + portOffset = BASE_3D_ENG + beginPort; + + // Enable PCI Trigger Mode + XGI_INFO("Jong-xgi_submit_cmdlist-Enable PCI Trigger Mode \n"); + + /* Jong 05/25/2006 */ + /* return; */ + + /* Jong 06/13/2006; M2REG_AUTO_LINK_SETTING_ADDRESS=0x10 */ + XGI_INFO("Jong-M2REG_AUTO_LINK_SETTING_ADDRESS=0x%lx \n", M2REG_AUTO_LINK_SETTING_ADDRESS); + XGI_INFO("Jong-M2REG_CLEAR_COUNTERS_MASK=0x%lx \n", M2REG_CLEAR_COUNTERS_MASK); + XGI_INFO("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)=0x%lx \n", (M2REG_AUTO_LINK_SETTING_ADDRESS << 22)); + XGI_INFO("Jong-M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n\n", M2REG_PCI_TRIGGER_MODE_MASK); + + /* Jong 06/14/2006; 0x400001a */ + XGI_INFO("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|M2REG_CLEAR_COUNTERS_MASK|0x08|M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n", + (M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|M2REG_CLEAR_COUNTERS_MASK|0x08|M2REG_PCI_TRIGGER_MODE_MASK); + dwWriteReg(BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, + (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | + M2REG_CLEAR_COUNTERS_MASK | + 0x08 | + M2REG_PCI_TRIGGER_MODE_MASK); + + /* Jong 05/25/2006 */ + XGI_INFO("Jong-xgi_submit_cmdlist-After dwWriteReg() \n"); + /* return; */ /* OK */ + + /* Jong 06/14/2006; 0x400000a */ + XGI_INFO("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|0x08|M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n", + (M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|0x08|M2REG_PCI_TRIGGER_MODE_MASK); + dwWriteReg(BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, + (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | + 0x08 | + M2REG_PCI_TRIGGER_MODE_MASK); + + // Send PCI begin command + XGI_INFO("Jong-xgi_submit_cmdlist-Send PCI begin command \n"); + /* return; */ + + XGI_INFO("Jong-xgi_submit_cmdlist-portOffset=%d \n", portOffset); + XGI_INFO("Jong-xgi_submit_cmdlist-beginPort=%d \n", beginPort); + + /* beginPort = 48; */ + /* 0xc100000 */ + dwWriteReg(portOffset, (beginPort<<22) + (BEGIN_VALID_MASK) + pCmdInfo->_curDebugID); + XGI_INFO("Jong-(beginPort<<22)=0x%lx \n", (beginPort<<22)); + XGI_INFO("Jong-(BEGIN_VALID_MASK)=0x%lx \n", BEGIN_VALID_MASK); + XGI_INFO("Jong- pCmdInfo->_curDebugID=0x%lx \n", pCmdInfo->_curDebugID); + XGI_INFO("Jong- (beginPort<<22) + (BEGIN_VALID_MASK) + pCmdInfo->_curDebugID=0x%lx \n", (beginPort<<22) + (BEGIN_VALID_MASK) + pCmdInfo->_curDebugID); + XGI_INFO("Jong-xgi_submit_cmdlist-Send PCI begin command- After \n"); + /* return; */ /* OK */ + + /* 0x80000024 */ + dwWriteReg(portOffset+4, BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize); + XGI_INFO("Jong- BEGIN_LINK_ENABLE_MASK=0x%lx \n", BEGIN_LINK_ENABLE_MASK); + XGI_INFO("Jong- pCmdInfo->_firstSize=0x%lx \n", pCmdInfo->_firstSize); + XGI_INFO("Jong- BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize=0x%lx \n", BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize); + XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-1 \n"); + + /* 0x1010000 */ + dwWriteReg(portOffset+8, (pCmdInfo->_firstBeginAddr >> 4)); + XGI_INFO("Jong- pCmdInfo->_firstBeginAddr=0x%lx \n", pCmdInfo->_firstBeginAddr); + XGI_INFO("Jong- (pCmdInfo->_firstBeginAddr >> 4)=0x%lx \n", (pCmdInfo->_firstBeginAddr >> 4)); + XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-2 \n"); + + /* Jong 06/13/2006 */ + xgi_dump_register(info); + + /* Jong 06/12/2006; system hang; marked for test */ + dwWriteReg(portOffset+12, 0); + XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-3 \n"); + + /* Jong 06/13/2006; remove marked for system hang test */ + /* xgi_waitfor_pci_idle(info); */ + } + else + { + XGI_INFO("Jong-xgi_submit_cmdlist-s_cmdring._lastBatchStartAddr != 0 \n"); + U32 *lastBatchVirtAddr; + + /* Jong 05/25/2006 */ + /* return; */ + + if (pCmdInfo->_firstBeginType == BTYPE_3D) + { + addFlush2D(info); + } + + lastBatchVirtAddr = (U32*) xgi_find_pcie_virt(info, s_cmdring._lastBatchStartAddr); + + lastBatchVirtAddr[1] = BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize; + lastBatchVirtAddr[2] = pCmdInfo->_firstBeginAddr >> 4; + lastBatchVirtAddr[3] = 0; + //barrier(); + lastBatchVirtAddr[0] = (beginPort<<22) + (BEGIN_VALID_MASK) + (0xffff & pCmdInfo->_curDebugID); + + /* Jong 06/12/2006; system hang; marked for test */ + triggerHWCommandList(info, pCmdInfo->_beginCount); + + XGI_INFO("Jong-xgi_submit_cmdlist-s_cmdring._lastBatchStartAddr != 0 - End\n"); + } + + s_cmdring._lastBatchStartAddr = pCmdInfo->_lastBeginAddr; + XGI_INFO("Jong-xgi_submit_cmdlist-End \n"); +} + + +/* + state: 0 - console + 1 - graphic + 2 - fb + 3 - logout +*/ +void xgi_state_change(xgi_info_t *info, xgi_state_info_t *pStateInfo) +{ +#define STATE_CONSOLE 0 +#define STATE_GRAPHIC 1 +#define STATE_FBTERM 2 +#define STATE_LOGOUT 3 +#define STATE_REBOOT 4 +#define STATE_SHUTDOWN 5 + + if ((pStateInfo->_fromState == STATE_GRAPHIC) + && (pStateInfo->_toState == STATE_CONSOLE)) + { + XGI_INFO("[kd] I see, now is to leaveVT\n"); + // stop to received batch + } + else if ((pStateInfo->_fromState == STATE_CONSOLE) + && (pStateInfo->_toState == STATE_GRAPHIC)) + { + XGI_INFO("[kd] I see, now is to enterVT\n"); + xgi_cmdlist_reset(); + } + else if ((pStateInfo->_fromState == STATE_GRAPHIC) + && ( (pStateInfo->_toState == STATE_LOGOUT) + ||(pStateInfo->_toState == STATE_REBOOT) + ||(pStateInfo->_toState == STATE_SHUTDOWN))) + { + XGI_INFO("[kd] I see, not is to exit from X\n"); + // stop to received batch + } + else + { + XGI_ERROR("[kd] Should not happen\n"); + } + +} + +void xgi_cmdlist_reset(void) +{ + s_cmdring._lastBatchStartAddr = 0; + s_cmdring._cmdRingOffset = 0; +} + +void xgi_cmdlist_cleanup(xgi_info_t *info) +{ + if (s_cmdring._cmdRingBuffer != 0) + { + xgi_pcie_free(info, s_cmdring._cmdRingBusAddr); + s_cmdring._cmdRingBuffer = 0; + s_cmdring._cmdRingOffset = 0; + s_cmdring._cmdRingSize = 0; + } +} + +static void triggerHWCommandList(xgi_info_t *info, U32 triggerCounter) +{ + static U32 s_triggerID = 1; + + //Fix me, currently we just trigger one time + while (triggerCounter--) + { + dwWriteReg(BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS, + 0x05000000 + (0xffff & s_triggerID++)); + // xgi_waitfor_pci_idle(info); + } +} + +static U32 getCurBatchBeginPort(xgi_cmd_info_t *pCmdInfo) +{ + // Convert the batch type to begin port ID + switch(pCmdInfo->_firstBeginType) + { + case BTYPE_2D: + return 0x30; + case BTYPE_3D: + return 0x40; + case BTYPE_FLIP: + return 0x50; + case BTYPE_CTRL: + return 0x20; + default: + //ASSERT(0); + return 0xff; + } +} + +static void addFlush2D(xgi_info_t *info) +{ + U32 *flushBatchVirtAddr; + U32 flushBatchHWAddr; + + U32 *lastBatchVirtAddr; + + /* check buf is large enough to contain a new flush batch */ + if ((s_cmdring._cmdRingOffset + 0x20) >= s_cmdring._cmdRingSize) + { + s_cmdring._cmdRingOffset = 0; + } + + flushBatchHWAddr = s_cmdring._cmdRingBuffer + s_cmdring._cmdRingOffset; + flushBatchVirtAddr = (U32*) xgi_find_pcie_virt(info, flushBatchHWAddr); + + /* not using memcpy for I assume the address is discrete */ + *(flushBatchVirtAddr + 0) = 0x10000000; + *(flushBatchVirtAddr + 1) = 0x80000004; /* size = 0x04 dwords */ + *(flushBatchVirtAddr + 2) = 0x00000000; + *(flushBatchVirtAddr + 3) = 0x00000000; + *(flushBatchVirtAddr + 4) = FLUSH_2D; + *(flushBatchVirtAddr + 5) = FLUSH_2D; + *(flushBatchVirtAddr + 6) = FLUSH_2D; + *(flushBatchVirtAddr + 7) = FLUSH_2D; + + // ASSERT(s_cmdring._lastBatchStartAddr != NULL); + lastBatchVirtAddr = (U32*) xgi_find_pcie_virt(info, s_cmdring._lastBatchStartAddr); + + lastBatchVirtAddr[1] = BEGIN_LINK_ENABLE_MASK + 0x08; + lastBatchVirtAddr[2] = flushBatchHWAddr >> 4; + lastBatchVirtAddr[3] = 0; + + //barrier(); + + // BTYPE_CTRL & NO debugID + lastBatchVirtAddr[0] = (0x20<<22) + (BEGIN_VALID_MASK); + + triggerHWCommandList(info, 1); + + s_cmdring._cmdRingOffset += 0x20; + s_cmdring._lastBatchStartAddr = flushBatchHWAddr; +} diff --git a/linux-core/xgi_cmdlist.h b/linux-core/xgi_cmdlist.h new file mode 100644 index 00000000..1b0c4965 --- /dev/null +++ b/linux-core/xgi_cmdlist.h @@ -0,0 +1,79 @@ + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#ifndef _XGI_CMDLIST_H_ +#define _XGI_CMDLIST_H_ + +#define ONE_BIT_MASK 0x1 +#define TWENTY_BIT_MASK 0xfffff +#define M2REG_FLUSH_2D_ENGINE_MASK (ONE_BIT_MASK<<20) +#define M2REG_FLUSH_3D_ENGINE_MASK TWENTY_BIT_MASK +#define M2REG_FLUSH_FLIP_ENGINE_MASK (ONE_BIT_MASK<<21) +#define BASE_3D_ENG 0x2800 +#define M2REG_AUTO_LINK_SETTING_ADDRESS 0x10 +#define M2REG_CLEAR_COUNTERS_MASK (ONE_BIT_MASK<<4) +#define M2REG_PCI_TRIGGER_MODE_MASK (ONE_BIT_MASK<<1) +#define BEGIN_VALID_MASK (ONE_BIT_MASK<<20) +#define BEGIN_LINK_ENABLE_MASK (ONE_BIT_MASK<<31) +#define M2REG_PCI_TRIGGER_REGISTER_ADDRESS 0x14 + +typedef enum +{ + FLUSH_2D = M2REG_FLUSH_2D_ENGINE_MASK, + FLUSH_3D = M2REG_FLUSH_3D_ENGINE_MASK, + FLUSH_FLIP = M2REG_FLUSH_FLIP_ENGINE_MASK +}FLUSH_CODE; + +typedef enum +{ + AGPCMDLIST_SCRATCH_SIZE = 0x100, + AGPCMDLIST_BEGIN_SIZE = 0x004, + AGPCMDLIST_3D_SCRATCH_CMD_SIZE = 0x004, + AGPCMDLIST_2D_SCRATCH_CMD_SIZE = 0x00c, + AGPCMDLIST_FLUSH_CMD_LEN = 0x004, + AGPCMDLIST_DUMY_END_BATCH_LEN = AGPCMDLIST_BEGIN_SIZE +}CMD_SIZE; + +typedef struct xgi_cmdring_info_s +{ + U32 _cmdRingSize; + U32 _cmdRingBuffer; + U32 _cmdRingBusAddr; + U32 _lastBatchStartAddr; + U32 _cmdRingOffset; +}xgi_cmdring_info_t; + +extern int xgi_cmdlist_initialize(xgi_info_t *info, U32 size); + +extern void xgi_submit_cmdlist(xgi_info_t *info, xgi_cmd_info_t * pCmdInfo); + +extern void xgi_state_change(xgi_info_t *info, xgi_state_info_t * pStateInfo); + +extern void xgi_cmdlist_cleanup(xgi_info_t *info); + +#endif /* _XGI_CMDLIST_H_ */ diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c new file mode 100644 index 00000000..5e80d417 --- /dev/null +++ b/linux-core/xgi_drv.c @@ -0,0 +1,1610 @@ + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ +#include "xgi_types.h" +#include "xgi_linux.h" +#include "xgi_drv.h" +#include "xgi_regs.h" +#include "xgi_pcie.h" +#include "xgi_misc.h" +#include "xgi_cmdlist.h" + +/* for debug */ +static int xgi_temp = 1; +/* + * global parameters + */ +static struct xgi_dev { + u16 vendor; + u16 device; + const char *name; +} xgidev_list[] = { + {PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XP5, "XP5"}, + {PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XG47, "XG47"}, + {0, 0, NULL} +}; + +int xgi_major = XGI_DEV_MAJOR; /* xgi reserved major device number. */ + +static int xgi_num_devices = 0; + +xgi_info_t xgi_devices[XGI_MAX_DEVICES]; + +#if defined(XGI_PM_SUPPORT_APM) +static struct pm_dev *apm_xgi_dev[XGI_MAX_DEVICES] = { 0 }; +#endif + +/* add one for the control device */ +xgi_info_t xgi_ctl_device; +wait_queue_head_t xgi_ctl_waitqueue; + +#ifdef CONFIG_PROC_FS +struct proc_dir_entry *proc_xgi; +#endif + +#ifdef CONFIG_DEVFS_FS +devfs_handle_t xgi_devfs_handles[XGI_MAX_DEVICES]; +#endif + +struct list_head xgi_mempid_list; + +/* xgi_ functions.. do not take a state device parameter */ +static int xgi_post_vbios(xgi_ioctl_post_vbios_t *info); +static void xgi_proc_create(void); +static void xgi_proc_remove_all(struct proc_dir_entry *); +static void xgi_proc_remove(void); + +/* xgi_kern_ functions, interfaces used by linux kernel */ +int xgi_kern_probe(struct pci_dev *, const struct pci_device_id *); + +unsigned int xgi_kern_poll(struct file *, poll_table *); +int xgi_kern_ioctl(struct inode *, struct file *, unsigned int, unsigned long); +int xgi_kern_mmap(struct file *, struct vm_area_struct *); +int xgi_kern_open(struct inode *, struct file *); +int xgi_kern_release(struct inode *inode, struct file *filp); + +void xgi_kern_vma_open(struct vm_area_struct *vma); +void xgi_kern_vma_release(struct vm_area_struct *vma); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1)) +struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, + unsigned long address, int *type); +#else +struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, + unsigned long address, int write_access); +#endif + +int xgi_kern_read_card_info(char *, char **, off_t off, int, int *, void *); +int xgi_kern_read_status(char *, char **, off_t off, int, int *, void *); +int xgi_kern_read_pcie_info(char *, char **, off_t off, int, int *, void *); +int xgi_kern_read_version(char *, char **, off_t off, int, int *, void *); + +int xgi_kern_ctl_open(struct inode *, struct file *); +int xgi_kern_ctl_close(struct inode *, struct file *); +unsigned int xgi_kern_ctl_poll(struct file *, poll_table *); + +void xgi_kern_isr_bh(unsigned long); +irqreturn_t xgi_kern_isr(int, void *, struct pt_regs *); + +static void xgi_lock_init(xgi_info_t *info); + +#if defined(XGI_PM_SUPPORT_ACPI) +int xgi_kern_acpi_standby(struct pci_dev *, u32); +int xgi_kern_acpi_resume(struct pci_dev *); +#endif + +/* + * verify access to pci config space wasn't disabled behind our back + * unfortunately, XFree86 enables/disables memory access in pci config space at + * various times (such as restoring initial pci config space settings during vt + * switches or when doing mulicard). As a result, all of our register accesses + * are garbage at this point. add a check to see if access was disabled and + * reenable any such access. + */ +#define XGI_CHECK_PCI_CONFIG(xgi) \ + xgi_check_pci_config(xgi, __LINE__) + +static inline void xgi_check_pci_config(xgi_info_t *info, int line) +{ + unsigned short cmd, flag = 0; + + // don't do this on the control device, only the actual devices + if (info->flags & XGI_FLAG_CONTROL) + return; + + pci_read_config_word(info->dev, PCI_COMMAND, &cmd); + if (!(cmd & PCI_COMMAND_MASTER)) + { + XGI_INFO("restoring bus mastering! (%d)\n", line); + cmd |= PCI_COMMAND_MASTER; + flag = 1; + } + + if (!(cmd & PCI_COMMAND_MEMORY)) + { + XGI_INFO("restoring MEM access! (%d)\n", line); + cmd |= PCI_COMMAND_MEMORY; + flag = 1; + } + + if (flag) + pci_write_config_word(info->dev, PCI_COMMAND, cmd); +} + +static int xgi_post_vbios(xgi_ioctl_post_vbios_t *info) +{ + return 1; +} + +/* + * struct pci_device_id { + * unsigned int vendor, device; // Vendor and device ID or PCI_ANY_ID + * unsigned int subvendor, subdevice; // Subsystem ID's or PCI_ANY_ID + * unsigned int class, class_mask; // (class,subclass,prog-if) triplet + * unsigned long driver_data; // Data private to the driver + * }; + */ + +static struct pci_device_id xgi_dev_table[] = { + { + .vendor = PCI_VENDOR_ID_XGI, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_DISPLAY_VGA << 8), + .class_mask = ~0, + }, + { } +}; + +/* + * #define MODULE_DEVICE_TABLE(type,name) \ + * MODULE_GENERIC_TABLE(type##_device,name) + */ + MODULE_DEVICE_TABLE(pci, xgi_dev_table); + +/* + * struct pci_driver { + * struct list_head node; + * char *name; + * const struct pci_device_id *id_table; // NULL if wants all devices + * int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); // New device inserted + * void (*remove)(struct pci_dev *dev); // Device removed (NULL if not a hot-plug capable driver) + * int (*save_state)(struct pci_dev *dev, u32 state); // Save Device Context + * int (*suspend)(struct pci_dev *dev, u32 state); // Device suspended + * int (*resume)(struct pci_dev *dev); // Device woken up + * int (*enable_wake)(struct pci_dev *dev, u32 state, int enable); // Enable wake event + * }; + */ +static struct pci_driver xgi_pci_driver = { + .name = "xgi", + .id_table = xgi_dev_table, + .probe = xgi_kern_probe, +#if defined(XGI_SUPPORT_ACPI) + .suspend = xgi_kern_acpi_standby, + .resume = xgi_kern_acpi_resume, +#endif +}; + +/* + * find xgi devices and set initial state + */ +int xgi_kern_probe(struct pci_dev *dev, const struct pci_device_id *id_table) +{ + xgi_info_t *info; + + if ((dev->vendor != PCI_VENDOR_ID_XGI) + || (dev->class != (PCI_CLASS_DISPLAY_VGA << 8))) + { + return -1; + } + + if (xgi_num_devices == XGI_MAX_DEVICES) + { + XGI_INFO("maximum device number (%d) reached!\n", xgi_num_devices); + return -1; + } + + /* enable io, mem, and bus-mastering in pci config space */ + if (pci_enable_device(dev) != 0) + { + XGI_INFO("pci_enable_device failed, aborting\n"); + return -1; + } + + XGI_INFO("maximum device number (%d) reached \n", xgi_num_devices); + + pci_set_master(dev); + + info = &xgi_devices[xgi_num_devices]; + info->dev = dev; + info->vendor_id = dev->vendor; + info->device_id = dev->device; + info->bus = dev->bus->number; + info->slot = PCI_SLOT((dev)->devfn); + + xgi_lock_init(info); + + info->mmio.base = XGI_PCI_RESOURCE_START(dev, 1); + info->mmio.size = XGI_PCI_RESOURCE_SIZE(dev, 1); + + /* check IO region */ + if (!request_mem_region(info->mmio.base, info->mmio.size, "xgi")) + { + XGI_ERROR("cannot reserve MMIO memory\n"); + goto error_disable_dev; + } + + XGI_INFO("info->mmio.base: 0x%lx \n", info->mmio.base); + XGI_INFO("info->mmio.size: 0x%lx \n", info->mmio.size); + + info->mmio.vbase = (unsigned char *)ioremap_nocache(info->mmio.base, + info->mmio.size); + if (!info->mmio.vbase) + { + release_mem_region(info->mmio.base, info->mmio.size); + XGI_ERROR("info->mmio.vbase failed\n"); + goto error_disable_dev; + } + xgi_enable_mmio(info); + + //xgi_enable_ge(info); + + XGI_INFO("info->mmio.vbase: 0x%p \n", info->mmio.vbase); + + info->fb.base = XGI_PCI_RESOURCE_START(dev, 0); + info->fb.size = XGI_PCI_RESOURCE_SIZE(dev, 0); + + XGI_INFO("info->fb.base: 0x%lx \n", info->fb.base); + XGI_INFO("info->fb.size: 0x%lx \n", info->fb.size); + + info->fb.size = bIn3cf(0x54) * 8 * 1024 * 1024; + XGI_INFO("info->fb.size: 0x%lx \n", info->fb.size); + + /* check frame buffer region + if (!request_mem_region(info->fb.base, info->fb.size, "xgi")) + { + release_mem_region(info->mmio.base, info->mmio.size); + XGI_ERROR("cannot reserve frame buffer memory\n"); + goto error_disable_dev; + } + + + info->fb.vbase = (unsigned char *)ioremap_nocache(info->fb.base, + info->fb.size); + + if (!info->fb.vbase) + { + release_mem_region(info->mmio.base, info->mmio.size); + release_mem_region(info->fb.base, info->fb.size); + XGI_ERROR("info->fb.vbase failed\n"); + goto error_disable_dev; + } + */ + info->fb.vbase = NULL; + XGI_INFO("info->fb.vbase: 0x%p \n", info->fb.vbase); + + info->irq = dev->irq; + + /* check common error condition */ + if (info->irq == 0) + { + XGI_ERROR("Can't find an IRQ for your XGI card! \n"); + goto error_zero_dev; + } + XGI_INFO("info->irq: %lx \n", info->irq); + + //xgi_enable_dvi_interrupt(info); + + /* sanity check the IO apertures */ + if ((info->mmio.base == 0) || (info->mmio.size == 0) + || (info->fb.base == 0) || (info->fb.size == 0)) + { + XGI_ERROR("The IO regions for your XGI card are invalid.\n"); + + if ((info->mmio.base == 0) || (info->mmio.size == 0)) + { + XGI_ERROR("mmio appears to be wrong: 0x%lx 0x%lx\n", + info->mmio.base, + info->mmio.size); + } + + if ((info->fb.base == 0) || (info->fb.size == 0)) + { + XGI_ERROR("frame buffer appears to be wrong: 0x%lx 0x%lx\n", + info->fb.base, + info->fb.size); + } + + goto error_zero_dev; + } + + //xgi_num_devices++; + + return 0; + +error_zero_dev: + release_mem_region(info->fb.base, info->fb.size); + release_mem_region(info->mmio.base, info->mmio.size); + +error_disable_dev: + pci_disable_device(dev); + return -1; + +} + +/* + * vma operations... + * this is only called when the vmas are duplicated. this + * appears to only happen when the process is cloned to create + * a new process, and not when the process is threaded. + * + * increment the usage count for the physical pages, so when + * this clone unmaps the mappings, the pages are not + * deallocated under the original process. + */ +struct vm_operations_struct xgi_vm_ops = { + .open = xgi_kern_vma_open, + .close = xgi_kern_vma_release, + .nopage = xgi_kern_vma_nopage, +}; + +void xgi_kern_vma_open(struct vm_area_struct *vma) +{ + XGI_INFO("VM: vma_open for 0x%lx - 0x%lx, offset 0x%lx\n", + vma->vm_start, + vma->vm_end, + XGI_VMA_OFFSET(vma)); + + if (XGI_VMA_PRIVATE(vma)) + { + xgi_pcie_block_t *block = (xgi_pcie_block_t *)XGI_VMA_PRIVATE(vma); + XGI_ATOMIC_INC(block->use_count); + } +} + +void xgi_kern_vma_release(struct vm_area_struct *vma) +{ + XGI_INFO("VM: vma_release for 0x%lx - 0x%lx, offset 0x%lx\n", + vma->vm_start, + vma->vm_end, + XGI_VMA_OFFSET(vma)); + + if (XGI_VMA_PRIVATE(vma)) + { + xgi_pcie_block_t *block = (xgi_pcie_block_t *)XGI_VMA_PRIVATE(vma); + XGI_ATOMIC_DEC(block->use_count); + + /* + * if use_count is down to 0, the kernel virtual mapping was freed + * but the underlying physical pages were not, we need to clear the + * bit and free the physical pages. + */ + if (XGI_ATOMIC_READ(block->use_count) == 0) + { + // Need TO Finish + XGI_VMA_PRIVATE(vma) = NULL; + } + } +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1)) +struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, + unsigned long address, int *type) +{ + xgi_pcie_block_t *block = (xgi_pcie_block_t *)XGI_VMA_PRIVATE(vma); + struct page *page = NOPAGE_SIGBUS; + unsigned long offset = 0; + unsigned long page_addr = 0; +/* + XGI_INFO("VM: mmap([0x%lx-0x%lx] off=0x%lx) address: 0x%lx \n", + vma->vm_start, + vma->vm_end, + XGI_VMA_OFFSET(vma), + address); +*/ + offset = (address - vma->vm_start) + XGI_VMA_OFFSET(vma); + + offset = offset - block->bus_addr; + + offset >>= PAGE_SHIFT; + + page_addr = block->page_table[offset].virt_addr; + + if (xgi_temp) + { + XGI_INFO("block->bus_addr: 0x%lx block->hw_addr: 0x%lx" + "block->page_count: 0x%lx block->page_order: 0x%lx" + "block->page_table[0x%lx].virt_addr: 0x%lx\n", + block->bus_addr, block->hw_addr, + block->page_count, block->page_order, + offset, + block->page_table[offset].virt_addr); + xgi_temp = 0; + } + + if (!page_addr) goto out; /* hole or end-of-file */ + page = virt_to_page(page_addr); + + /* got it, now increment the count */ + get_page(page); +out: + return page; + +} +#else +struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, + unsigned long address, int write_access) +{ + xgi_pcie_block_t *block = (xgi_pcie_block_t *)XGI_VMA_PRIVATE(vma); + struct page *page = NOPAGE_SIGBUS; + unsigned long offset = 0; + unsigned long page_addr = 0; +/* + XGI_INFO("VM: mmap([0x%lx-0x%lx] off=0x%lx) address: 0x%lx \n", + vma->vm_start, + vma->vm_end, + XGI_VMA_OFFSET(vma), + address); +*/ + offset = (address - vma->vm_start) + XGI_VMA_OFFSET(vma); + + offset = offset - block->bus_addr; + + offset >>= PAGE_SHIFT; + + page_addr = block->page_table[offset].virt_addr; + + if (xgi_temp) + { + XGI_INFO("block->bus_addr: 0x%lx block->hw_addr: 0x%lx" + "block->page_count: 0x%lx block->page_order: 0x%lx" + "block->page_table[0x%lx].virt_addr: 0x%lx\n", + block->bus_addr, block->hw_addr, + block->page_count, block->page_order, + offset, + block->page_table[offset].virt_addr); + xgi_temp = 0; + } + + if (!page_addr) goto out; /* hole or end-of-file */ + page = virt_to_page(page_addr); + + /* got it, now increment the count */ + get_page(page); +out: + return page; +} +#endif + +#if 0 +static struct file_operations xgi_fops = { + /* owner: THIS_MODULE, */ + poll: xgi_kern_poll, + ioctl: xgi_kern_ioctl, + mmap: xgi_kern_mmap, + open: xgi_kern_open, + release: xgi_kern_release, +}; +#endif + +static struct file_operations xgi_fops = { + .owner = THIS_MODULE, + .poll = xgi_kern_poll, + .ioctl = xgi_kern_ioctl, + .mmap = xgi_kern_mmap, + .open = xgi_kern_open, + .release = xgi_kern_release, +}; + +static xgi_file_private_t * xgi_alloc_file_private(void) +{ + xgi_file_private_t *fp; + + XGI_KMALLOC(fp, sizeof(xgi_file_private_t)); + if (!fp) + return NULL; + + memset(fp, 0, sizeof(xgi_file_private_t)); + + /* initialize this file's event queue */ + init_waitqueue_head(&fp->wait_queue); + + xgi_init_lock(fp->fp_lock); + + return fp; +} + +static void xgi_free_file_private(xgi_file_private_t *fp) +{ + if (fp == NULL) + return; + + XGI_KFREE(fp, sizeof(xgi_file_private_t)); +} + +int xgi_kern_open(struct inode *inode, struct file *filp) +{ + xgi_info_t *info = NULL; + int dev_num; + int result = 0, status; + + /* + * the type and num values are only valid if we are not using devfs. + * However, since we use them to retrieve the device pointer, we + * don't need them with devfs as filp->private_data is already + * initialized + */ + filp->private_data = xgi_alloc_file_private(); + if (filp->private_data == NULL) + return -ENOMEM; + + XGI_INFO("filp->private_data %p\n", filp->private_data); + /* + * for control device, just jump to its open routine + * after setting up the private data + */ + if (XGI_IS_CONTROL_DEVICE(inode)) + return xgi_kern_ctl_open(inode, filp); + + /* what device are we talking about? */ + dev_num = XGI_DEVICE_NUMBER(inode); + if (dev_num >= XGI_MAX_DEVICES) + { + xgi_free_file_private(filp->private_data); + filp->private_data = NULL; + return -ENODEV; + } + + info = &xgi_devices[dev_num]; + + XGI_INFO("Jong-xgi_kern_open on device %d\n", dev_num); + + xgi_down(info->info_sem); + XGI_CHECK_PCI_CONFIG(info); + + XGI_INFO_FROM_FP(filp) = info; + + /* + * map the memory and allocate isr on first open + */ + + if (!(info->flags & XGI_FLAG_OPEN)) + { + XGI_INFO("info->flags & XGI_FLAG_OPEN \n"); + + if (info->device_id == 0) + { + XGI_INFO("open of nonexistent device %d\n", dev_num); + result = -ENXIO; + goto failed; + } + + /* initialize struct irqaction */ + status = request_irq(info->irq, xgi_kern_isr, + SA_INTERRUPT | SA_SHIRQ, "xgi", + (void *) info); + if (status != 0) + { + if (info->irq && (status == -EBUSY)) + { + XGI_ERROR("Tried to get irq %d, but another driver", + (unsigned int) info->irq); + XGI_ERROR("has it and is not sharing it.\n"); + } + XGI_ERROR("isr request failed 0x%x\n", status); + result = -EIO; + goto failed; + } + + /* + * #define DECLARE_TASKLET(name, func, data) \ + * struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } + */ + info->tasklet.func = xgi_kern_isr_bh; + info->tasklet.data = (unsigned long) info; + tasklet_enable(&info->tasklet); + + /* Alloc 1M bytes for cmdbuffer which is flush2D batch array */ + xgi_cmdlist_initialize(info, 0x100000); + + info->flags |= XGI_FLAG_OPEN; + } + + XGI_ATOMIC_INC(info->use_count); + +failed: + xgi_up(info->info_sem); + + if ((result) && filp->private_data) + { + xgi_free_file_private(filp->private_data); + filp->private_data = NULL; + } + + return result; +} + +int xgi_kern_release(struct inode *inode, struct file *filp) +{ + xgi_info_t *info = XGI_INFO_FROM_FP(filp); + + XGI_CHECK_PCI_CONFIG(info); + + /* + * for control device, just jump to its open routine + * after setting up the private data + */ + if (XGI_IS_CONTROL_DEVICE(inode)) + return xgi_kern_ctl_close(inode, filp); + + XGI_INFO("Jong-xgi_kern_release on device %d\n", XGI_DEVICE_NUMBER(inode)); + + xgi_down(info->info_sem); + if (XGI_ATOMIC_DEC_AND_TEST(info->use_count)) + { + + /* + * The usage count for this device has dropped to zero, it can be shut + * down safely; disable its interrupts. + */ + + /* + * Disable this device's tasklet to make sure that no bottom half will + * run with undefined device state. + */ + tasklet_disable(&info->tasklet); + + /* + * Free the IRQ, which may block until all pending interrupt processing + * has completed. + */ + free_irq(info->irq, (void *)info); + + xgi_cmdlist_cleanup(info); + + /* leave INIT flag alone so we don't reinit every time */ + info->flags &= ~XGI_FLAG_OPEN; + } + + xgi_up(info->info_sem); + + if (FILE_PRIVATE(filp)) + { + xgi_free_file_private(FILE_PRIVATE(filp)); + FILE_PRIVATE(filp) = NULL; + } + + return 0; +} + +int xgi_kern_mmap(struct file *filp, struct vm_area_struct *vma) +{ + //struct inode *inode = INODE_FROM_FP(filp); + xgi_info_t *info = XGI_INFO_FROM_FP(filp); + xgi_pcie_block_t *block; + int pages = 0; + unsigned long prot; + + XGI_INFO("Jong-VM: mmap([0x%lx-0x%lx] off=0x%lx)\n", + vma->vm_start, + vma->vm_end, + XGI_VMA_OFFSET(vma)); + + XGI_CHECK_PCI_CONFIG(info); + + if (XGI_MASK_OFFSET(vma->vm_start) + || XGI_MASK_OFFSET(vma->vm_end)) + { + XGI_ERROR("VM: bad mmap range: %lx - %lx\n", + vma->vm_start, vma->vm_end); + return -ENXIO; + } + + pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + + vma->vm_ops = &xgi_vm_ops; + + /* XGI IO(reg) space */ + if (IS_IO_OFFSET(info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) + { + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + if (XGI_REMAP_PAGE_RANGE(vma->vm_start, + XGI_VMA_OFFSET(vma), + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) + return -EAGAIN; + + /* mark it as IO so that we don't dump it on core dump */ + vma->vm_flags |= VM_IO; + XGI_INFO("VM: mmap io space \n"); + } + /* XGI fb space */ + /* Jong 06/14/2006; moved behind PCIE or modify IS_FB_OFFSET */ + else if (IS_FB_OFFSET(info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) + { + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + if (XGI_REMAP_PAGE_RANGE(vma->vm_start, + XGI_VMA_OFFSET(vma), + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) + return -EAGAIN; + + // mark it as IO so that we don't dump it on core dump + vma->vm_flags |= VM_IO; + XGI_INFO("VM: mmap fb space \n"); + } + /* PCIE allocator */ + /* XGI_VMA_OFFSET(vma) is offset based on pcie.base (HW address space) */ + else if (IS_PCIE_OFFSET(info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) + { + xgi_down(info->pcie_sem); + + block = (xgi_pcie_block_t *)xgi_find_pcie_block(info, XGI_VMA_OFFSET(vma)); + + if (block == NULL) + { + XGI_ERROR("couldn't find pre-allocated PCIE memory!\n"); + xgi_up(info->pcie_sem); + return -EAGAIN; + } + + if (block->page_count != pages) + { + XGI_ERROR("pre-allocated PCIE memory has wrong number of pages!\n"); + xgi_up(info->pcie_sem); + return -EAGAIN; + } + + vma->vm_private_data = block; + XGI_ATOMIC_INC(block->use_count); + xgi_up(info->pcie_sem); + + /* + * prevent the swapper from swapping it out + * mark the memory i/o so the buffers aren't + * dumped on core dumps */ + vma->vm_flags |= (VM_LOCKED | VM_IO); + + /* un-cached */ + prot = pgprot_val(vma->vm_page_prot); + /* + if (boot_cpu_data.x86 > 3) + prot |= _PAGE_PCD | _PAGE_PWT; + */ + vma->vm_page_prot = __pgprot(prot); + + XGI_INFO("VM: mmap pcie space \n"); + } +#if 0 + else if (IS_FB_OFFSET(info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) + { + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + if (XGI_REMAP_PAGE_RANGE(vma->vm_start, + XGI_VMA_OFFSET(vma), + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) + return -EAGAIN; + + // mark it as IO so that we don't dump it on core dump + vma->vm_flags |= VM_IO; + XGI_INFO("VM: mmap fb space \n"); + } +#endif + else + { + vma->vm_flags |= (VM_IO | VM_LOCKED); + XGI_ERROR("VM: mmap wrong range \n"); + } + + vma->vm_file = filp; + + return 0; +} + +unsigned int xgi_kern_poll(struct file *filp, struct poll_table_struct *wait) +{ + xgi_file_private_t *fp; + xgi_info_t *info; + unsigned int mask = 0; + unsigned long eflags; + + info = XGI_INFO_FROM_FP(filp); + + if (info->device_number == XGI_CONTROL_DEVICE_NUMBER) + return xgi_kern_ctl_poll(filp, wait); + + fp = XGI_GET_FP(filp); + + if (!(filp->f_flags & O_NONBLOCK)) + { + /* add us to the list */ + poll_wait(filp, &fp->wait_queue, wait); + } + + xgi_lock_irqsave(fp->fp_lock, eflags); + + /* wake the user on any event */ + if (fp->num_events) + { + XGI_INFO("Hey, an event occured!\n"); + /* + * trigger the client, when they grab the event, + * we'll decrement the event count + */ + mask |= (POLLPRI|POLLIN); + } + xgi_unlock_irqsave(fp->fp_lock, eflags); + + return mask; +} + +int xgi_kern_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + xgi_info_t *info; + xgi_mem_alloc_t *alloc = NULL; + + int status = 0; + void *arg_copy; + int arg_size; + int err = 0; + + info = XGI_INFO_FROM_FP(filp); + + XGI_INFO("Jong-ioctl(0x%x, 0x%x, 0x%lx, 0x%x)\n", _IOC_TYPE(cmd), _IOC_NR(cmd), arg, _IOC_SIZE(cmd)); + /* + * extract the type and number bitfields, and don't decode + * wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok() + */ + if (_IOC_TYPE(cmd) != XGI_IOCTL_MAGIC) return -ENOTTY; + if (_IOC_NR(cmd) > XGI_IOCTL_MAXNR) return -ENOTTY; + + /* + * the direction is a bitmask, and VERIFY_WRITE catches R/W + * transfers. `Type' is user-oriented, while + * access_ok is kernel-oriented, so the concept of "read" and + * "write" is reversed + */ + if (_IOC_DIR(cmd) & _IOC_READ) + { + err = !access_ok(VERIFY_WRITE, (void *)arg, _IOC_SIZE(cmd)); + } + else if (_IOC_DIR(cmd) & _IOC_WRITE) + { + err = !access_ok(VERIFY_READ, (void *)arg, _IOC_SIZE(cmd)); + } + if (err) return -EFAULT; + + XGI_CHECK_PCI_CONFIG(info); + + arg_size = _IOC_SIZE(cmd); + XGI_KMALLOC(arg_copy, arg_size); + if (arg_copy == NULL) + { + XGI_ERROR("failed to allocate ioctl memory\n"); + return -ENOMEM; + } + + /* Jong 05/25/2006 */ + /* copy_from_user(arg_copy, (void *)arg, arg_size); */ + if(copy_from_user(arg_copy, (void *)arg, arg_size)) + { + XGI_ERROR("failed to copyin ioctl data\n"); + XGI_INFO("Jong-copy_from_user-fail! \n"); + } + else + XGI_INFO("Jong-copy_from_user-OK! \n"); + + alloc = (xgi_mem_alloc_t *)arg_copy; + XGI_INFO("Jong-succeeded in copy_from_user 0x%lx, 0x%x bytes.\n", arg, arg_size); + + switch (_IOC_NR(cmd)) + { + case XGI_ESC_DEVICE_INFO: + XGI_INFO("Jong-xgi_ioctl_get_device_info \n"); + xgi_get_device_info(info, (struct xgi_chip_info_s *) arg_copy); + break; + case XGI_ESC_POST_VBIOS: + XGI_INFO("Jong-xgi_ioctl_post_vbios \n"); + break; + case XGI_ESC_FB_ALLOC: + XGI_INFO("Jong-xgi_ioctl_fb_alloc \n"); + xgi_fb_alloc(info, (struct xgi_mem_req_s *)arg_copy, alloc); + break; + case XGI_ESC_FB_FREE: + XGI_INFO("Jong-xgi_ioctl_fb_free \n"); + xgi_fb_free(info, *(unsigned long *) arg_copy); + break; + case XGI_ESC_MEM_COLLECT: + XGI_INFO("Jong-xgi_ioctl_mem_collect \n"); + xgi_mem_collect(info, (unsigned int *) arg_copy); + break; + case XGI_ESC_PCIE_ALLOC: + XGI_INFO("Jong-xgi_ioctl_pcie_alloc \n"); + xgi_pcie_alloc(info, ((xgi_mem_req_t *)arg_copy)->size, + ((xgi_mem_req_t *)arg_copy)->owner, alloc); + break; + case XGI_ESC_PCIE_FREE: + XGI_INFO("Jong-xgi_ioctl_pcie_free: bus_addr = 0x%lx \n", *((unsigned long *) arg_copy)); + xgi_pcie_free(info, *((unsigned long *) arg_copy)); + break; + case XGI_ESC_PCIE_CHECK: + XGI_INFO("Jong-xgi_pcie_heap_check \n"); + xgi_pcie_heap_check(); + break; + case XGI_ESC_GET_SCREEN_INFO: + XGI_INFO("Jong-xgi_get_screen_info \n"); + xgi_get_screen_info(info, (struct xgi_screen_info_s *) arg_copy); + break; + case XGI_ESC_PUT_SCREEN_INFO: + XGI_INFO("Jong-xgi_put_screen_info \n"); + xgi_put_screen_info(info, (struct xgi_screen_info_s *) arg_copy); + break; + case XGI_ESC_MMIO_INFO: + XGI_INFO("Jong-xgi_ioctl_get_mmio_info \n"); + xgi_get_mmio_info(info, (struct xgi_mmio_info_s *) arg_copy); + break; + case XGI_ESC_GE_RESET: + XGI_INFO("Jong-xgi_ioctl_ge_reset \n"); + xgi_ge_reset(info); + break; + case XGI_ESC_SAREA_INFO: + XGI_INFO("Jong-xgi_ioctl_sarea_info \n"); + xgi_sarea_info(info, (struct xgi_sarea_info_s *) arg_copy); + break; + case XGI_ESC_DUMP_REGISTER: + XGI_INFO("Jong-xgi_ioctl_dump_register \n"); + xgi_dump_register(info); + break; + case XGI_ESC_DEBUG_INFO: + XGI_INFO("Jong-xgi_ioctl_restore_registers \n"); + xgi_restore_registers(info); + //xgi_write_pcie_mem(info, (struct xgi_mem_req_s *) arg_copy); + //xgi_read_pcie_mem(info, (struct xgi_mem_req_s *) arg_copy); + break; + case XGI_ESC_SUBMIT_CMDLIST: + XGI_INFO("Jong-xgi_ioctl_submit_cmdlist \n"); + xgi_submit_cmdlist(info, (xgi_cmd_info_t *) arg_copy); + break; + case XGI_ESC_TEST_RWINKERNEL: + XGI_INFO("Jong-xgi_test_rwinkernel \n"); + xgi_test_rwinkernel(info, *(unsigned long*) arg_copy); + break; + case XGI_ESC_STATE_CHANGE: + XGI_INFO("Jong-xgi_state_change \n"); + xgi_state_change(info, (xgi_state_info_t *) arg_copy); + break; + case XGI_ESC_CPUID: + XGI_INFO("Jong-XGI_ESC_CPUID \n"); + xgi_get_cpu_id((struct cpu_info_s*) arg_copy); + break; + default: + XGI_INFO("Jong-xgi_ioctl_default \n"); + status = -EINVAL; + break; + } + + if (copy_to_user((void *)arg, arg_copy, arg_size)) + { + XGI_ERROR("failed to copyout ioctl data\n"); + XGI_INFO("Jong-copy_to_user-fail! \n"); + } + else + XGI_INFO("Jong-copy_to_user-OK! \n"); + + XGI_KFREE(arg_copy, arg_size); + return status; +} + + +/* + * xgi control driver operations defined here + */ +int xgi_kern_ctl_open(struct inode *inode, struct file *filp) +{ + xgi_info_t *info = &xgi_ctl_device; + + int rc = 0; + + XGI_INFO("Jong-xgi_kern_ctl_open\n"); + + xgi_down(info->info_sem); + info->device_number = XGI_CONTROL_DEVICE_NUMBER; + + /* save the xgi info in file->private_data */ + filp->private_data = info; + + if (XGI_ATOMIC_READ(info->use_count) == 0) + { + init_waitqueue_head(&xgi_ctl_waitqueue); + } + + info->flags |= XGI_FLAG_OPEN + XGI_FLAG_CONTROL; + + XGI_ATOMIC_INC(info->use_count); + xgi_up(info->info_sem); + + return rc; +} + +int xgi_kern_ctl_close(struct inode *inode, struct file *filp) +{ + xgi_info_t *info = XGI_INFO_FROM_FP(filp); + + XGI_INFO("Jong-xgi_kern_ctl_close\n"); + + xgi_down(info->info_sem); + if (XGI_ATOMIC_DEC_AND_TEST(info->use_count)) + { + info->flags = 0; + } + xgi_up(info->info_sem); + + if (FILE_PRIVATE(filp)) + { + xgi_free_file_private(FILE_PRIVATE(filp)); + FILE_PRIVATE(filp) = NULL; + } + + return 0; +} + +unsigned int xgi_kern_ctl_poll(struct file *filp, poll_table *wait) +{ + //xgi_info_t *info = XGI_INFO_FROM_FP(filp);; + unsigned int ret = 0; + + if (!(filp->f_flags & O_NONBLOCK)) + { + poll_wait(filp, &xgi_ctl_waitqueue, wait); + } + + return ret; +} + +/* + * xgi proc system + */ +static u8 xgi_find_pcie_capability(struct pci_dev *dev) +{ + u16 status; + u8 cap_ptr, cap_id; + + pci_read_config_word(dev, PCI_STATUS, &status); + status &= PCI_STATUS_CAP_LIST; + if (!status) + return 0; + + switch (dev->hdr_type) + { + case PCI_HEADER_TYPE_NORMAL: + case PCI_HEADER_TYPE_BRIDGE: + pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &cap_ptr); + break; + default: + return 0; + } + + do + { + cap_ptr &= 0xFC; + pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_ID, &cap_id); + pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_NEXT, &cap_ptr); + } while (cap_ptr && cap_id != 0xFF); + + return 0; +} + +static struct pci_dev* xgi_get_pci_device(xgi_info_t *info) +{ + struct pci_dev *dev; + + dev = XGI_PCI_GET_DEVICE(info->vendor_id, info->device_id, NULL); + while (dev) + { + if (XGI_PCI_SLOT_NUMBER(dev) == info->slot + && XGI_PCI_BUS_NUMBER(dev) == info->bus) + return dev; + dev = XGI_PCI_GET_DEVICE(info->vendor_id, info->device_id, dev); + } + + return NULL; +} + +int xgi_kern_read_card_info(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + struct pci_dev *dev; + char *type; + int len = 0; + + xgi_info_t *info; + info = (xgi_info_t *) data; + + dev = xgi_get_pci_device(info); + if (!dev) + return 0; + + type = xgi_find_pcie_capability(dev) ? "PCIE" : "PCI"; + len += sprintf(page+len, "Card Type: \t %s\n", type); + + XGI_PCI_DEV_PUT(dev); + return len; +} + +int xgi_kern_read_version(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + int len = 0; + + len += sprintf(page+len, "XGI version: %s\n", "1.0"); + len += sprintf(page+len, "GCC version: %s\n", "3.0"); + + return len; +} + +int xgi_kern_read_pcie_info(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + return 0; +} + +int xgi_kern_read_status(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + return 0; +} + + +static void xgi_proc_create(void) +{ +#ifdef CONFIG_PROC_FS + + struct pci_dev *dev; + int i = 0; + char name[6]; + + struct proc_dir_entry *entry; + struct proc_dir_entry *proc_xgi_pcie, *proc_xgi_cards; + + xgi_info_t *info; + xgi_info_t *xgi_max_devices; + + /* world readable directory */ + int flags = S_IFDIR | S_IRUGO | S_IXUGO; + + proc_xgi = create_proc_entry("xgi", flags, proc_root_driver); + if (!proc_xgi) + goto failed; + + proc_xgi_cards = create_proc_entry("cards", flags, proc_xgi); + if (!proc_xgi_cards) + goto failed; + + proc_xgi_pcie = create_proc_entry("pcie", flags, proc_xgi); + if (!proc_xgi_pcie) + goto failed; + + /* + * Set the module owner to ensure that the reference + * count reflects accesses to the proc files. + */ + proc_xgi->owner = THIS_MODULE; + proc_xgi_cards->owner = THIS_MODULE; + proc_xgi_pcie->owner = THIS_MODULE; + + xgi_max_devices = xgi_devices + XGI_MAX_DEVICES; + for (info = xgi_devices; info < xgi_max_devices; info++) + { + if (info->device_id == 0) + break; + + /* world readable file */ + flags = S_IFREG | S_IRUGO; + + dev = xgi_get_pci_device(info); + if (!dev) + break; + + sprintf(name, "%d", i++); + entry = create_proc_entry(name, flags, proc_xgi_cards); + if (!entry) + { + XGI_PCI_DEV_PUT(dev); + goto failed; + } + + entry->data = info; + entry->read_proc = xgi_kern_read_card_info; + entry->owner = THIS_MODULE; + + if (xgi_find_pcie_capability(dev)) + { + entry = create_proc_entry("status", flags, proc_xgi_pcie); + if (!entry) + { + XGI_PCI_DEV_PUT(dev); + goto failed; + } + + entry->data = info; + entry->read_proc = xgi_kern_read_status; + entry->owner = THIS_MODULE; + + entry = create_proc_entry("card", flags, proc_xgi_pcie); + if (!entry) + { + XGI_PCI_DEV_PUT(dev); + goto failed; + } + + entry->data = info; + entry->read_proc = xgi_kern_read_pcie_info; + entry->owner = THIS_MODULE; + } + + XGI_PCI_DEV_PUT(dev); + } + + entry = create_proc_entry("version", flags, proc_xgi); + if (!entry) + goto failed; + + entry->read_proc = xgi_kern_read_version; + entry->owner = THIS_MODULE; + + entry = create_proc_entry("host-bridge", flags, proc_xgi_pcie); + if (!entry) + goto failed; + + entry->data = NULL; + entry->read_proc = xgi_kern_read_pcie_info; + entry->owner = THIS_MODULE; + + return; + +failed: + XGI_ERROR("failed to create /proc entries!\n"); + xgi_proc_remove_all(proc_xgi); +#endif +} + +#ifdef CONFIG_PROC_FS +static void xgi_proc_remove_all(struct proc_dir_entry *entry) +{ + while (entry) + { + struct proc_dir_entry *next = entry->next; + if (entry->subdir) + xgi_proc_remove_all(entry->subdir); + remove_proc_entry(entry->name, entry->parent); + if (entry == proc_xgi) + break; + entry = next; + } +} +#endif + +static void xgi_proc_remove(void) +{ +#ifdef CONFIG_PROC_FS + xgi_proc_remove_all(proc_xgi); +#endif +} + +/* + * driver receives an interrupt if someone waiting, then hand it off. + */ +irqreturn_t xgi_kern_isr(int irq, void *dev_id, struct pt_regs *regs) +{ + xgi_info_t *info = (xgi_info_t *) dev_id; + u32 need_to_run_bottom_half = 0; + + //XGI_INFO("xgi_kern_isr \n"); + + //XGI_CHECK_PCI_CONFIG(info); + + //xgi_dvi_irq_handler(info); + + if (need_to_run_bottom_half) + { + tasklet_schedule(&info->tasklet); + } + + return IRQ_HANDLED; +} + +void xgi_kern_isr_bh(unsigned long data) +{ + xgi_info_t *info = (xgi_info_t *) data; + + XGI_INFO("xgi_kern_isr_bh \n"); + + //xgi_dvi_irq_handler(info); + + XGI_CHECK_PCI_CONFIG(info); +} + +static void xgi_lock_init(xgi_info_t *info) +{ + if (info == NULL) return; + + spin_lock_init(&info->info_lock); + + sema_init(&info->info_sem, 1); + sema_init(&info->fb_sem, 1); + sema_init(&info->pcie_sem, 1); + + XGI_ATOMIC_SET(info->use_count, 0); +} + +static void xgi_dev_init(xgi_info_t *info) +{ + struct pci_dev *pdev = NULL; + struct xgi_dev *dev; + int found = 0; + u16 pci_cmd; + + XGI_INFO("Enter xgi_dev_init \n"); + + //XGI_PCI_FOR_EACH_DEV(pdev) + { + for (dev = xgidev_list; dev->vendor; dev++) + { + if ((dev->vendor == pdev->vendor) && (dev->device == pdev->device)) + { + XGI_INFO("dev->vendor = pdev->vendor= %x \n", dev->vendor); + XGI_INFO("dev->device = pdev->device= %x \n", dev->device); + + xgi_devices[found].device_id = pdev->device; + + pci_read_config_byte(pdev, PCI_REVISION_ID, &xgi_devices[found].revision_id); + + XGI_INFO("PCI_REVISION_ID= %x \n", xgi_devices[found].revision_id); + + pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); + + XGI_INFO("PCI_COMMAND = %x \n", pci_cmd); + + break; + } + } + } +} +/* + * Export to Linux Kernel + */ + +static int __init xgi_init_module(void) +{ + xgi_info_t *info = &xgi_devices[xgi_num_devices]; + int i, result; + + XGI_INFO("Jong-xgi kernel driver %s initializing\n", XGI_DRV_VERSION); + //SET_MODULE_OWNER(&xgi_fops); + + memset(xgi_devices, 0, sizeof(xgi_devices)); + + if (pci_register_driver(&xgi_pci_driver) < 0) + { + pci_unregister_driver(&xgi_pci_driver); + XGI_ERROR("no XGI graphics adapter found\n"); + return -ENODEV; + } + + XGI_INFO("Jong-xgi_devices[%d].fb.base.: 0x%lx \n", xgi_num_devices, xgi_devices[xgi_num_devices].fb.base); + XGI_INFO("Jong-xgi_devices[%d].fb.size.: 0x%lx \n", xgi_num_devices, xgi_devices[xgi_num_devices].fb.size); + +/* Jong 07/27/2006; test for ubuntu */ +/* +#ifdef CONFIG_DEVFS_FS + + XGI_INFO("Jong-Use devfs \n"); + do + { + xgi_devfs_handles[0] = XGI_DEVFS_REGISTER("xgi", 0); + if (xgi_devfs_handles[0] == NULL) + { + result = -ENOMEM; + XGI_ERROR("devfs register failed\n"); + goto failed; + } + } while(0); +#else */ /* no devfs, do it the "classic" way */ + + + XGI_INFO("Jong-Use non-devfs \n"); + /* + * Register your major, and accept a dynamic number. This is the + * first thing to do, in order to avoid releasing other module's + * fops in scull_cleanup_module() + */ + result = XGI_REGISTER_CHRDEV(xgi_major, "xgi", &xgi_fops); + if (result < 0) + { + XGI_ERROR("register chrdev failed\n"); + pci_unregister_driver(&xgi_pci_driver); + return result; + } + if (xgi_major == 0) xgi_major = result; /* dynamic */ + +/* #endif */ /* CONFIG_DEVFS_FS */ + + XGI_INFO("Jong-major number %d\n", xgi_major); + + /* instantiate tasklets */ + for (i = 0; i < XGI_MAX_DEVICES; i++) + { + /* + * We keep one tasklet per card to avoid latency issues with more + * than one device; no two instances of a single tasklet are ever + * executed concurrently. + */ + XGI_ATOMIC_SET(xgi_devices[i].tasklet.count, 1); + } + + /* init the xgi control device */ + { + xgi_info_t *info_ctl = &xgi_ctl_device; + xgi_lock_init(info_ctl); + } + + /* Init the resource manager */ + INIT_LIST_HEAD(&xgi_mempid_list); + if (!xgi_fb_heap_init(info)) + { + XGI_ERROR("xgi_fb_heap_init() failed\n"); + result = -EIO; + goto failed; + } + + /* Init the resource manager */ + if (!xgi_pcie_heap_init(info)) + { + XGI_ERROR("xgi_pcie_heap_init() failed\n"); + result = -EIO; + goto failed; + } + + /* create /proc/driver/xgi */ + xgi_proc_create(); + +#if defined(DEBUG) + inter_module_register("xgi_devices", THIS_MODULE, xgi_devices); +#endif + + return 0; + +failed: +#ifdef CONFIG_DEVFS_FS + XGI_DEVFS_REMOVE_CONTROL(); + XGI_DEVFS_REMOVE_DEVICE(xgi_num_devices); +#endif + + if (XGI_UNREGISTER_CHRDEV(xgi_major, "xgi") < 0) + XGI_ERROR("unregister xgi chrdev failed\n"); + + for (i = 0; i < xgi_num_devices; i++) + { + if (xgi_devices[i].dev) + { + release_mem_region(xgi_devices[i].fb.base, xgi_devices[i].fb.size); + release_mem_region(xgi_devices[i].mmio.base, xgi_devices[i].mmio.size); + } + } + + pci_unregister_driver(&xgi_pci_driver); + return result; + + return 1; +} + +void __exit xgi_exit_module(void) +{ + int i; + xgi_info_t *info, *max_devices; + +#ifdef CONFIG_DEVFS_FS + /* + XGI_DEVFS_REMOVE_CONTROL(); + for (i = 0; i < XGI_MAX_DEVICES; i++) + XGI_DEVFS_REMOVE_DEVICE(i); + */ + XGI_DEVFS_REMOVE_DEVICE(xgi_num_devices); +#endif + + if (XGI_UNREGISTER_CHRDEV(xgi_major, "xgi") < 0) + XGI_ERROR("unregister xgi chrdev failed\n"); + + XGI_INFO("Jong-unregister xgi chrdev scceeded\n"); + for (i = 0; i < XGI_MAX_DEVICES; i++) + { + if (xgi_devices[i].dev) + { + /* clean up the flush2D batch array */ + xgi_cmdlist_cleanup(&xgi_devices[i]); + + if(xgi_devices[i].fb.vbase != NULL) + { + iounmap((void *)xgi_devices[i].fb.vbase); + xgi_devices[i].fb.vbase = NULL; + } + if(xgi_devices[i].mmio.vbase != NULL) + { + iounmap((void *)xgi_devices[i].mmio.vbase); + xgi_devices[i].mmio.vbase = NULL; + } + + //release_mem_region(xgi_devices[i].fb.base, xgi_devices[i].fb.size); + //XGI_INFO("release frame buffer mem region scceeded\n"); + + release_mem_region(xgi_devices[i].mmio.base, xgi_devices[i].mmio.size); + XGI_INFO("release MMIO mem region scceeded\n"); + + xgi_fb_heap_cleanup(&xgi_devices[i]); + XGI_INFO("xgi_fb_heap_cleanup scceeded\n"); + + xgi_pcie_heap_cleanup(&xgi_devices[i]); + XGI_INFO("xgi_pcie_heap_cleanup scceeded\n"); + + XGI_PCI_DISABLE_DEVICE(xgi_devices[i].dev); + } + } + + pci_unregister_driver(&xgi_pci_driver); + + /* remove /proc/driver/xgi */ + xgi_proc_remove(); + +#if defined(DEBUG) + inter_module_unregister("xgi_devices"); +#endif +} + +module_init(xgi_init_module); +module_exit(xgi_exit_module); + +#if defined(XGI_PM_SUPPORT_ACPI) +int xgi_acpi_event(struct pci_dev *dev, u32 state) +{ + return 1; +} + +int xgi_kern_acpi_standby(struct pci_dev *dev, u32 state) +{ + return 1; +} + +int xgi_kern_acpi_resume(struct pci_dev *dev) +{ + return 1; +} +#endif + +MODULE_AUTHOR("Andrea Zhang "); +MODULE_DESCRIPTION("xgi kernel driver for xgi cards"); +MODULE_LICENSE("GPL"); diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h new file mode 100644 index 00000000..568a7af1 --- /dev/null +++ b/linux-core/xgi_drv.h @@ -0,0 +1,364 @@ + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#ifndef _XGI_DRV_H_ +#define _XGI_DRV_H_ + +#define XGI_MAJOR_VERSION 0 +#define XGI_MINOR_VERSION 7 +#define XGI_PATCHLEVEL 5 + +#define XGI_DRV_VERSION "0.7.5" + +#ifndef XGI_DRV_NAME +#define XGI_DRV_NAME "xgi" +#endif + +/* + * xgi reserved major device number, Set this to 0 to + * request dynamic major number allocation. + */ +#ifndef XGI_DEV_MAJOR +#define XGI_DEV_MAJOR 0 +#endif + +#ifndef XGI_MAX_DEVICES +#define XGI_MAX_DEVICES 1 +#endif + +/* Jong 06/06/2006 */ +/* #define XGI_DEBUG */ + +#ifndef PCI_VENDOR_ID_XGI +/* +#define PCI_VENDOR_ID_XGI 0x1023 +*/ +#define PCI_VENDOR_ID_XGI 0x18CA + +#endif + +#ifndef PCI_DEVICE_ID_XP5 +#define PCI_DEVICE_ID_XP5 0x2200 +#endif + +#ifndef PCI_DEVICE_ID_XG47 +#define PCI_DEVICE_ID_XG47 0x0047 +#endif + +/* Macros to make printk easier */ +#define XGI_ERROR(fmt, arg...) \ + printk(KERN_ERR "[" XGI_DRV_NAME ":%s] *ERROR* " fmt, __FUNCTION__, ##arg) + +#define XGI_MEM_ERROR(area, fmt, arg...) \ + printk(KERN_ERR "[" XGI_DRV_NAME ":%s] *ERROR* " fmt, __FUNCTION__, ##arg) + +/* #define XGI_DEBUG */ + +#ifdef XGI_DEBUG +#define XGI_INFO(fmt, arg...) \ + printk(KERN_ALERT "[" XGI_DRV_NAME ":%s] " fmt, __FUNCTION__, ##arg) +/* printk(KERN_INFO "[" XGI_DRV_NAME ":%s] " fmt, __FUNCTION__, ##arg) */ +#else +#define XGI_INFO(fmt, arg...) do { } while (0) +#endif + +/* device name length; must be atleast 8 */ +#define XGI_DEVICE_NAME_LENGTH 40 + +/* need a fake device number for control device; just to flag it for msgs */ +#define XGI_CONTROL_DEVICE_NUMBER 100 + +typedef struct { + U32 base; // pcie base is different from fb base + U32 size; + U8 *vbase; +} xgi_aperture_t; + +typedef struct xgi_screen_info_s { + U32 scrn_start; + U32 scrn_xres; + U32 scrn_yres; + U32 scrn_bpp; + U32 scrn_pitch; +} xgi_screen_info_t; + +typedef struct xgi_sarea_info_s { + U32 bus_addr; + U32 size; +} xgi_sarea_info_t; + +typedef struct xgi_info_s { + struct pci_dev *dev; + int flags; + int device_number; + int bus; /* PCI config info */ + int slot; + int vendor_id; + U32 device_id; + U8 revision_id; + + /* physical characteristics */ + xgi_aperture_t mmio; + xgi_aperture_t fb; + xgi_aperture_t pcie; + xgi_screen_info_t scrn_info; + xgi_sarea_info_t sarea_info; + + /* look up table parameters */ + U32 *lut_base; + U32 lutPageSize; + U32 lutPageOrder; + U32 isLUTInLFB; + U32 sdfbPageSize; + + U32 pcie_config; + U32 pcie_status; + U32 irq; + + atomic_t use_count; + + /* keep track of any pending bottom halfes */ + struct tasklet_struct tasklet; + + spinlock_t info_lock; + + struct semaphore info_sem; + struct semaphore fb_sem; + struct semaphore pcie_sem; +} xgi_info_t; + +typedef struct xgi_ioctl_post_vbios { + U32 bus; + U32 slot; +} xgi_ioctl_post_vbios_t; + +typedef enum xgi_mem_location_s +{ + NON_LOCAL = 0, + LOCAL = 1, + INVALID = 0x7fffffff +} xgi_mem_location_t; + +enum PcieOwner +{ + PCIE_2D = 0, + /* + PCIE_3D should not begin with 1, + 2D alloc pcie memory will use owner 1. + */ + PCIE_3D = 11,/*vetex buf*/ + PCIE_3D_CMDLIST = 12, + PCIE_3D_SCRATCHPAD = 13, + PCIE_3D_TEXTURE = 14, + PCIE_INVALID = 0x7fffffff +}; + +typedef struct xgi_mem_req_s { + xgi_mem_location_t location; + unsigned long size; + unsigned long is_front; + enum PcieOwner owner; + unsigned long pid; +} xgi_mem_req_t; + +typedef struct xgi_mem_alloc_s { + xgi_mem_location_t location; + unsigned long size; + unsigned long bus_addr; + unsigned long hw_addr; + unsigned long pid; +} xgi_mem_alloc_t; + +typedef struct xgi_chip_info_s { + U32 device_id; + char device_name[32]; + U32 vendor_id; + U32 curr_display_mode; //Singe, DualView(Contained), MHS + U32 fb_size; + U32 sarea_bus_addr; + U32 sarea_size; +} xgi_chip_info_t; + +typedef struct xgi_opengl_cmd_s { + U32 cmd; +} xgi_opengl_cmd_t; + +typedef struct xgi_mmio_info_s { + xgi_opengl_cmd_t cmd_head; + void *mmioBase; + int size; +} xgi_mmio_info_t; + +typedef enum { + BTYPE_2D = 0, + BTYPE_3D = 1, + BTYPE_FLIP = 2, + BTYPE_CTRL = 3, + BTYPE_NONE = 0x7fffffff +}BATCH_TYPE; + +typedef struct xgi_cmd_info_s { + BATCH_TYPE _firstBeginType; + U32 _firstBeginAddr; + U32 _firstSize; + U32 _curDebugID; + U32 _lastBeginAddr; + U32 _beginCount; +} xgi_cmd_info_t; + +typedef struct xgi_state_info_s { + U32 _fromState; + U32 _toState; +} xgi_state_info_t; + +typedef struct cpu_info_s { + U32 _eax; + U32 _ebx; + U32 _ecx; + U32 _edx; +} cpu_info_t; + +typedef struct xgi_mem_pid_s { + struct list_head list; + xgi_mem_location_t location; + unsigned long bus_addr; + unsigned long pid; +} xgi_mem_pid_t; + +/* + * Ioctl definitions + */ + +#define XGI_IOCTL_MAGIC 'x' /* use 'x' as magic number */ + +#define XGI_IOCTL_BASE 0 +#define XGI_ESC_DEVICE_INFO (XGI_IOCTL_BASE + 0) +#define XGI_ESC_POST_VBIOS (XGI_IOCTL_BASE + 1) + +#define XGI_ESC_FB_INIT (XGI_IOCTL_BASE + 2) +#define XGI_ESC_FB_ALLOC (XGI_IOCTL_BASE + 3) +#define XGI_ESC_FB_FREE (XGI_IOCTL_BASE + 4) +#define XGI_ESC_PCIE_INIT (XGI_IOCTL_BASE + 5) +#define XGI_ESC_PCIE_ALLOC (XGI_IOCTL_BASE + 6) +#define XGI_ESC_PCIE_FREE (XGI_IOCTL_BASE + 7) +#define XGI_ESC_SUBMIT_CMDLIST (XGI_IOCTL_BASE + 8) +#define XGI_ESC_PUT_SCREEN_INFO (XGI_IOCTL_BASE + 9) +#define XGI_ESC_GET_SCREEN_INFO (XGI_IOCTL_BASE + 10) +#define XGI_ESC_GE_RESET (XGI_IOCTL_BASE + 11) +#define XGI_ESC_SAREA_INFO (XGI_IOCTL_BASE + 12) +#define XGI_ESC_DUMP_REGISTER (XGI_IOCTL_BASE + 13) +#define XGI_ESC_DEBUG_INFO (XGI_IOCTL_BASE + 14) +#define XGI_ESC_TEST_RWINKERNEL (XGI_IOCTL_BASE + 16) +#define XGI_ESC_STATE_CHANGE (XGI_IOCTL_BASE + 17) +#define XGI_ESC_MMIO_INFO (XGI_IOCTL_BASE + 18) +#define XGI_ESC_PCIE_CHECK (XGI_IOCTL_BASE + 19) +#define XGI_ESC_CPUID (XGI_IOCTL_BASE + 20) +#define XGI_ESC_MEM_COLLECT (XGI_IOCTL_BASE + 21) + +#define XGI_IOCTL_DEVICE_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_DEVICE_INFO, xgi_chip_info_t) +#define XGI_IOCTL_POST_VBIOS _IO(XGI_IOCTL_MAGIC, XGI_ESC_POST_VBIOS) + +#define XGI_IOCTL_FB_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_FB_INIT) +#define XGI_IOCTL_FB_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_FB_ALLOC, xgi_mem_req_t) +#define XGI_IOCTL_FB_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_FB_FREE, unsigned long) + +#define XGI_IOCTL_PCIE_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_INIT) +#define XGI_IOCTL_PCIE_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_ALLOC, xgi_mem_req_t) +#define XGI_IOCTL_PCIE_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_FREE, unsigned long) + +#define XGI_IOCTL_PUT_SCREEN_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PUT_SCREEN_INFO, xgi_screen_info_t) +#define XGI_IOCTL_GET_SCREEN_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_GET_SCREEN_INFO, xgi_screen_info_t) + +#define XGI_IOCTL_GE_RESET _IO(XGI_IOCTL_MAGIC, XGI_ESC_GE_RESET) +#define XGI_IOCTL_SAREA_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_SAREA_INFO, xgi_sarea_info_t) +#define XGI_IOCTL_DUMP_REGISTER _IO(XGI_IOCTL_MAGIC, XGI_ESC_DUMP_REGISTER) +#define XGI_IOCTL_DEBUG_INFO _IO(XGI_IOCTL_MAGIC, XGI_ESC_DEBUG_INFO) +#define XGI_IOCTL_MMIO_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_MMIO_INFO, xgi_mmio_info_t) + +#define XGI_IOCTL_SUBMIT_CMDLIST _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_SUBMIT_CMDLIST, xgi_cmd_info_t) +#define XGI_IOCTL_TEST_RWINKERNEL _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_TEST_RWINKERNEL, unsigned long) +#define XGI_IOCTL_STATE_CHANGE _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_STATE_CHANGE, xgi_state_info_t) + +#define XGI_IOCTL_PCIE_CHECK _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_CHECK) +#define XGI_IOCTL_CPUID _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_CPUID, cpu_info_t) +#define XGI_IOCTL_MAXNR 30 + +/* + * flags + */ +#define XGI_FLAG_OPEN 0x0001 +#define XGI_FLAG_NEEDS_POSTING 0x0002 +#define XGI_FLAG_WAS_POSTED 0x0004 +#define XGI_FLAG_CONTROL 0x0010 +#define XGI_FLAG_MAP_REGS_EARLY 0x0200 + +/* mmap(2) offsets */ + +#define IS_IO_OFFSET(info, offset, length) \ + (((offset) >= (info)->mmio.base) \ + && (((offset) + (length)) <= (info)->mmio.base + (info)->mmio.size)) + +/* Jong 06/14/2006 */ +/* (info)->fb.base is a base address for physical (bus) address space */ +/* what's the definition of offest? on physical (bus) address space or HW address space */ +/* Jong 06/15/2006; use HW address space */ +#define IS_FB_OFFSET(info, offset, length) \ + (((offset) >= 0) \ + && (((offset) + (length)) <= (info)->fb.size)) +#if 0 +#define IS_FB_OFFSET(info, offset, length) \ + (((offset) >= (info)->fb.base) \ + && (((offset) + (length)) <= (info)->fb.base + (info)->fb.size)) +#endif + +#define IS_PCIE_OFFSET(info, offset, length) \ + (((offset) >= (info)->pcie.base) \ + && (((offset) + (length)) <= (info)->pcie.base + (info)->pcie.size)) + +extern int xgi_fb_heap_init(xgi_info_t *info); +extern void xgi_fb_heap_cleanup(xgi_info_t *info); + +extern void xgi_fb_alloc(xgi_info_t *info, xgi_mem_req_t *req, xgi_mem_alloc_t *alloc); +extern void xgi_fb_free(xgi_info_t *info, unsigned long offset); +extern void xgi_mem_collect(xgi_info_t *info, unsigned int *pcnt); + +extern int xgi_pcie_heap_init(xgi_info_t *info); +extern void xgi_pcie_heap_cleanup(xgi_info_t *info); + +extern void xgi_pcie_alloc(xgi_info_t *info, unsigned long size, enum PcieOwner owner, xgi_mem_alloc_t *alloc); +extern void xgi_pcie_free(xgi_info_t *info, unsigned long offset); +extern void xgi_pcie_heap_check(void); +extern void *xgi_find_pcie_block(xgi_info_t *info, unsigned long address); +extern void *xgi_find_pcie_virt(xgi_info_t *info, unsigned long address); + +extern void xgi_read_pcie_mem(xgi_info_t *info, xgi_mem_req_t *req); +extern void xgi_write_pcie_mem(xgi_info_t *info, xgi_mem_req_t *req); + +extern void xgi_test_rwinkernel(xgi_info_t *info, unsigned long address); + +#endif diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c new file mode 100644 index 00000000..67fdfe17 --- /dev/null +++ b/linux-core/xgi_fb.c @@ -0,0 +1,528 @@ + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#include "xgi_types.h" +#include "xgi_linux.h" +#include "xgi_drv.h" +#include "xgi_fb.h" + +#define XGI_FB_HEAP_START 0x1000000 + +static xgi_mem_heap_t *xgi_fb_heap; +static kmem_cache_t *xgi_fb_cache_block = NULL; +extern struct list_head xgi_mempid_list; + +static xgi_mem_block_t *xgi_mem_new_node(void); +static xgi_mem_block_t *xgi_mem_alloc(xgi_info_t *info, unsigned long size); +static xgi_mem_block_t *xgi_mem_free(xgi_info_t *info, unsigned long offset); + +void xgi_fb_alloc(xgi_info_t *info, + xgi_mem_req_t *req, + xgi_mem_alloc_t *alloc) +{ + xgi_mem_block_t *block; + xgi_mem_pid_t *mempid_block; + + if (req->is_front) + { + alloc->location = LOCAL; + alloc->bus_addr = info->fb.base; + alloc->hw_addr = 0; + XGI_INFO("Video RAM allocation on front buffer successfully! \n"); + } + else + { + xgi_down(info->fb_sem); + block = xgi_mem_alloc(info, req->size); + xgi_up(info->fb_sem); + + if (block == NULL) + { + alloc->location = LOCAL; + alloc->size = 0; + alloc->bus_addr = 0; + alloc->hw_addr = 0; + XGI_ERROR("Video RAM allocation failed\n"); + } + else + { + XGI_INFO("Video RAM allocation succeeded: 0x%p\n", + (char *) block->offset); + alloc->location = LOCAL; + alloc->size = block->size; + alloc->bus_addr = info->fb.base + block->offset; + alloc->hw_addr = block->offset; + + /* manage mempid */ + mempid_block = kmalloc(sizeof(xgi_mem_pid_t), GFP_KERNEL); + mempid_block->location = LOCAL; + mempid_block->bus_addr = alloc->bus_addr; + mempid_block->pid = alloc->pid; + + if (!mempid_block) + XGI_ERROR("mempid_block alloc failed\n"); + + XGI_INFO("Memory ProcessID add one fb block pid:%ld successfully! \n", mempid_block->pid); + list_add(&mempid_block->list, &xgi_mempid_list); + } + } +} + +void xgi_fb_free(xgi_info_t *info, unsigned long bus_addr) +{ + xgi_mem_block_t *block; + unsigned long offset = bus_addr - info->fb.base; + xgi_mem_pid_t *mempid_block; + xgi_mem_pid_t *mempid_freeblock = NULL; + struct list_head *mempid_list; + + if (offset < 0) + { + XGI_INFO("free onscreen frame buffer successfully !\n"); + } + else + { + xgi_down(info->fb_sem); + block = xgi_mem_free(info, offset); + xgi_up(info->fb_sem); + + if (block == NULL) + { + XGI_ERROR("xgi_mem_free() failed at base 0x%lx\n", offset); + } + + /* manage mempid */ + mempid_list = xgi_mempid_list.next; + while (mempid_list != &xgi_mempid_list) + { + mempid_block = list_entry(mempid_list, struct xgi_mem_pid_s, list); + if (mempid_block->location == LOCAL && mempid_block->bus_addr == bus_addr) + { + mempid_freeblock = mempid_block; + break; + } + mempid_list = mempid_list->next; + } + if (mempid_freeblock) + { + list_del(&mempid_freeblock->list); + XGI_INFO("Memory ProcessID delete one fb block pid:%ld successfully! \n", mempid_freeblock->pid); + kfree(mempid_freeblock); + } + } +} + +int xgi_fb_heap_init(xgi_info_t *info) +{ + xgi_mem_block_t *block; + + xgi_fb_heap = kmalloc(sizeof(xgi_mem_heap_t), GFP_KERNEL); + if (!xgi_fb_heap) + { + XGI_ERROR("xgi_fb_heap alloc failed\n"); + return 0; + } + + INIT_LIST_HEAD(&xgi_fb_heap->free_list); + INIT_LIST_HEAD(&xgi_fb_heap->used_list); + INIT_LIST_HEAD(&xgi_fb_heap->sort_list); + + xgi_fb_cache_block = kmem_cache_create("xgi_fb_block", sizeof(xgi_mem_block_t), + 0, SLAB_HWCACHE_ALIGN, NULL, NULL); + + if (NULL == xgi_fb_cache_block) + { + XGI_ERROR("Fail to creat xgi_fb_block\n"); + goto fail1; + } + + block = (xgi_mem_block_t *)kmem_cache_alloc(xgi_fb_cache_block, GFP_KERNEL); + if (!block) + { + XGI_ERROR("kmem_cache_alloc failed\n"); + goto fail2; + } + + block->offset = XGI_FB_HEAP_START; + block->size = info->fb.size - XGI_FB_HEAP_START; + + list_add(&block->list, &xgi_fb_heap->free_list); + + xgi_fb_heap->max_freesize = info->fb.size - XGI_FB_HEAP_START; + + XGI_INFO("fb start offset: 0x%lx, memory size : 0x%lx\n", block->offset, block->size); + XGI_INFO("xgi_fb_heap->max_freesize: 0x%lx \n", xgi_fb_heap->max_freesize); + + return 1; + +fail2: + if (xgi_fb_cache_block) + { + kmem_cache_destroy(xgi_fb_cache_block); + xgi_fb_cache_block = NULL; + } +fail1: + if(xgi_fb_heap) + { + kfree(xgi_fb_heap); + xgi_fb_heap = NULL; + } + return 0; +} + +void xgi_fb_heap_cleanup(xgi_info_t *info) +{ + struct list_head *free_list, *temp; + xgi_mem_block_t *block; + int i; + + if (xgi_fb_heap) + { + free_list = &xgi_fb_heap->free_list; + for (i = 0; i < 3; i++, free_list++) + { + temp = free_list->next; + while (temp != free_list) + { + block = list_entry(temp, struct xgi_mem_block_s, list); + temp = temp->next; + + XGI_INFO("No. %d block->offset: 0x%lx block->size: 0x%lx \n", + i, block->offset, block->size); + //XGI_INFO("No. %d free block: 0x%p \n", i, block); + kmem_cache_free(xgi_fb_cache_block, block); + block = NULL; + } + } + XGI_INFO("xgi_fb_heap: 0x%p \n", xgi_fb_heap); + kfree(xgi_fb_heap); + xgi_fb_heap = NULL; + } + + if (xgi_fb_cache_block) + { + kmem_cache_destroy(xgi_fb_cache_block); + xgi_fb_cache_block = NULL; + } +} + +static xgi_mem_block_t * xgi_mem_new_node(void) +{ + xgi_mem_block_t *block; + + block = (xgi_mem_block_t *)kmem_cache_alloc(xgi_fb_cache_block, GFP_KERNEL); + if (!block) + { + XGI_ERROR("kmem_cache_alloc failed\n"); + return NULL; + } + + return block; +} + +#if 0 +static void xgi_mem_insert_node_after(xgi_mem_list_t *list, + xgi_mem_block_t *current, + xgi_mem_block_t *block); +static void xgi_mem_insert_node_before(xgi_mem_list_t *list, + xgi_mem_block_t *current, + xgi_mem_block_t *block); +static void xgi_mem_insert_node_head(xgi_mem_list_t *list, + xgi_mem_block_t *block); +static void xgi_mem_insert_node_tail(xgi_mem_list_t *list, + xgi_mem_block_t *block); +static void xgi_mem_delete_node(xgi_mem_list_t *list, + xgi_mem_block_t *block); +/* + * insert node:block after node:current + */ +static void xgi_mem_insert_node_after(xgi_mem_list_t *list, + xgi_mem_block_t *current, + xgi_mem_block_t *block) +{ + block->prev = current; + block->next = current->next; + current->next = block; + + if (current == list->tail) + { + list->tail = block; + } + else + { + block->next->prev = block; + } +} + +/* + * insert node:block before node:current + */ +static void xgi_mem_insert_node_before(xgi_mem_list_t *list, + xgi_mem_block_t *current, + xgi_mem_block_t *block) +{ + block->prev = current->prev; + block->next = current; + current->prev = block; + if (current == list->head) + { + list->head = block; + } + else + { + block->prev->next = block; + } +} +void xgi_mem_insert_node_head(xgi_mem_list_t *list, + xgi_mem_block_t *block) +{ + block->next = list->head; + block->prev = NULL; + + if (NULL == list->head) + { + list->tail = block; + } + else + { + list->head->prev = block; + } + list->head = block; +} + +static void xgi_mem_insert_node_tail(xgi_mem_list_t *list, + xgi_mem_block_t *block) + +{ + block->next = NULL; + block->prev = list->tail; + if (NULL == list->tail) + { + list->head = block; + } + else + { + list->tail->next = block; + } + list->tail = block; +} + +static void xgi_mem_delete_node(xgi_mem_list_t *list, + xgi_mem_block_t *block) +{ + if (block == list->head) + { + list->head = block->next; + } + if (block == list->tail) + { + list->tail = block->prev; + } + + if (block->prev) + { + block->prev->next = block->next; + } + if (block->next) + { + block->next->prev = block->prev; + } + + block->next = block->prev = NULL; +} +#endif +static xgi_mem_block_t *xgi_mem_alloc(xgi_info_t *info, unsigned long originalSize) +{ + struct list_head *free_list; + xgi_mem_block_t *block, *free_block, *used_block; + + unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK; + + XGI_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n", originalSize, size); + + if (size == 0) + { + XGI_ERROR("size == 0\n"); + return (NULL); + } + XGI_INFO("max_freesize: 0x%lx \n", xgi_fb_heap->max_freesize); + if (size > xgi_fb_heap->max_freesize) + { + XGI_ERROR("size: 0x%lx is bigger than frame buffer total free size: 0x%lx !\n", + size, xgi_fb_heap->max_freesize); + return (NULL); + } + + free_list = xgi_fb_heap->free_list.next; + + while (free_list != &xgi_fb_heap->free_list) + { + XGI_INFO("free_list: 0x%px \n", free_list); + block = list_entry(free_list, struct xgi_mem_block_s, list); + if (size <= block->size) + { + break; + } + free_list = free_list->next; + } + + if (free_list == &xgi_fb_heap->free_list) + { + XGI_ERROR("Can't allocate %ldk size from frame buffer memory !\n", size/1024); + return (NULL); + } + + free_block = block; + XGI_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n", + size, free_block->offset, free_block->size); + + if (size == free_block->size) + { + used_block = free_block; + XGI_INFO("size == free_block->size: free_block = 0x%p\n", free_block); + list_del(&free_block->list); + } + else + { + used_block = xgi_mem_new_node(); + + if (used_block == NULL) return (NULL); + + if (used_block == free_block) + { + XGI_ERROR("used_block == free_block = 0x%p\n", used_block); + } + + used_block->offset = free_block->offset; + used_block->size = size; + + free_block->offset += size; + free_block->size -= size; + } + + xgi_fb_heap->max_freesize -= size; + + list_add(&used_block->list, &xgi_fb_heap->used_list); + + return (used_block); +} + +static xgi_mem_block_t *xgi_mem_free(xgi_info_t *info, unsigned long offset) +{ + struct list_head *free_list, *used_list; + xgi_mem_block_t *used_block = NULL, *block = NULL; + xgi_mem_block_t *prev, *next; + + unsigned long upper; + unsigned long lower; + + used_list = xgi_fb_heap->used_list.next; + while (used_list != &xgi_fb_heap->used_list) + { + block = list_entry(used_list, struct xgi_mem_block_s, list); + if (block->offset == offset) + { + break; + } + used_list = used_list->next; + } + + if (used_list == &xgi_fb_heap->used_list) + { + XGI_ERROR("can't find block: 0x%lx to free!\n", offset); + return (NULL); + } + + used_block = block; + XGI_INFO("used_block: 0x%p, offset = 0x%lx, size = 0x%lx\n", + used_block, used_block->offset, used_block->size); + + xgi_fb_heap->max_freesize += used_block->size; + + prev = next = NULL; + upper = used_block->offset + used_block->size; + lower = used_block->offset; + + free_list = xgi_fb_heap->free_list.next; + while (free_list != &xgi_fb_heap->free_list) + { + block = list_entry(free_list, struct xgi_mem_block_s, list); + + if (block->offset == upper) + { + next = block; + } + else if ((block->offset + block->size) == lower) + { + prev = block; + } + free_list = free_list->next; + } + + XGI_INFO("next = 0x%p, prev = 0x%p\n", next, prev); + list_del(&used_block->list); + + if (prev && next) + { + prev->size += (used_block->size + next->size); + list_del(&next->list); + XGI_INFO("free node 0x%p\n", next); + kmem_cache_free(xgi_fb_cache_block, next); + kmem_cache_free(xgi_fb_cache_block, used_block); + + next = NULL; + used_block = NULL; + return (prev); + } + + if (prev) + { + prev->size += used_block->size; + XGI_INFO("free node 0x%p\n", used_block); + kmem_cache_free(xgi_fb_cache_block, used_block); + used_block = NULL; + return (prev); + } + + if (next) + { + next->size += used_block->size; + next->offset = used_block->offset; + XGI_INFO("free node 0x%p\n", used_block); + kmem_cache_free(xgi_fb_cache_block, used_block); + used_block = NULL; + return (next); + } + + list_add(&used_block->list, &xgi_fb_heap->free_list); + XGI_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n", + used_block, used_block->offset, used_block->size); + + return (used_block); +} + diff --git a/linux-core/xgi_fb.h b/linux-core/xgi_fb.h new file mode 100644 index 00000000..4b7ec2f2 --- /dev/null +++ b/linux-core/xgi_fb.h @@ -0,0 +1,71 @@ + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#ifndef _XGI_FB_H_ +#define _XGI_FB_H_ + +typedef struct xgi_mem_block_s { + struct list_head list; + unsigned long offset; + unsigned long size; + atomic_t use_count; +} xgi_mem_block_t; + +typedef struct xgi_mem_heap_s { + struct list_head free_list; + struct list_head used_list; + struct list_head sort_list; + unsigned long max_freesize; + spinlock_t lock; +} xgi_mem_heap_t; + +#if 0 +typedef struct xgi_mem_block_s { + struct xgi_mem_block_s *next; + struct xgi_mem_block_s *prev; + unsigned long offset; + unsigned long size; + atomic_t use_count; +} xgi_mem_block_t; + +typedef struct xgi_mem_list_s { + xgi_mem_block_t *head; + xgi_mem_block_t *tail; +} xgi_mem_list_t; + +typedef struct xgi_mem_heap_s { + xgi_mem_list_t *free_list; + xgi_mem_list_t *used_list; + xgi_mem_list_t *sort_list; + unsigned long max_freesize; + spinlock_t lock; +} xgi_mem_heap_t; +#endif + +#endif + diff --git a/linux-core/xgi_linux.h b/linux-core/xgi_linux.h new file mode 100644 index 00000000..f207a4f6 --- /dev/null +++ b/linux-core/xgi_linux.h @@ -0,0 +1,596 @@ + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + + +#ifndef _XGI_LINUX_H_ +#define _XGI_LINUX_H_ + +#include + +#ifndef LINUX_VERSION_CODE +#include +#endif + +#ifndef KERNEL_VERSION /* pre-2.1.90 didn't have it */ +#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) +# error "This driver does not support pre-2.4 kernels!" +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) +#define KERNEL_2_4 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) +# error "This driver does not support 2.5 kernels!" +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 7, 0) +#define KERNEL_2_6 +#else +# error "This driver does not support development kernels!" +#endif + +#if defined (CONFIG_SMP) && !defined (__SMP__) +#define __SMP__ +#endif + +#if defined (CONFIG_MODVERSIONS) && !defined (MODVERSIONS) +#define MODVERSIONS +#endif + +#if defined (MODVERSIONS) && !defined (KERNEL_2_6) +#include +#endif + +#include /* printk */ +#include + +#include /* module_init, module_exit */ +#include /* pic_t, size_t, __u32, etc */ +#include /* error codes */ +#include /* circular linked list */ +#include /* NULL, offsetof */ +#include /* wait queues */ + +#include /* kmalloc, kfree, etc */ +#include /* vmalloc, vfree, etc */ + +#include /* poll_wait */ +#include /* mdelay, udelay */ +#include /* rdtsc rdtscl */ + +#include /* suser(), capable() replacement + for_each_task, for_each_process */ +#ifdef for_each_process +#define XGI_SCAN_PROCESS(p) for_each_process(p) +#else +#define XGI_SCAN_PROCESS(p) for_each_task(p) +#endif + +#ifdef KERNEL_2_6 +#include /* module_param() */ +#include /* kernel_locked */ +#include /* flush_tlb(), flush_tlb_all() */ +#include /* page table entry lookup */ +#endif + +#include /* pci_find_class, etc */ +#include /* tasklets, interrupt helpers */ +#include + +#include /* cli, sli, save_flags */ +#include /* ioremap, virt_to_phys */ +#include /* access_ok */ +#include /* PAGE_OFFSET */ +#include /* pte bit definitions */ + +#include +#include +#include + +#ifdef CONFIG_PROC_FS +#include +#endif + +#ifdef CONFIG_DEVFS_FS +#include +#endif + +#ifdef CONFIG_KMOD +#include +#endif + +#ifdef CONFIG_PM +#include +#endif + +#ifdef CONFIG_MTRR +#include +#endif + +#ifdef CONFIG_KDB +#include +#include +#endif + +#if defined (CONFIG_AGP) || defined (CONFIG_AGP_MODULE) +#define AGPGART +#include +#include +#endif + +#ifndef MAX_ORDER +#ifdef KERNEL_2_4 +#define MAX_ORDER 10 +#endif +#ifdef KERNEL_2_6 +#define MAX_ORDER 11 +#endif +#endif + +#ifndef module_init +#define module_init(x) int init_module(void) { return x(); } +#define module_exit(x) void cleanup_module(void) { x(); } +#endif + +#ifndef minor +#define minor(x) MINOR(x) +#endif + +#ifndef IRQ_HANDLED +typedef void irqreturn_t; +#define IRQ_NONE +#define IRQ_HANDLED +#define IRQ_RETVAL(x) +#endif + +#if !defined (list_for_each) +#define list_for_each(pos, head) \ + for (pos = (head)->next, prefetch(pos->next); pos != (head); \ + pos = pos->next, prefetch(pos->next)) +#endif + +#ifdef KERNEL_2_4 +#define XGI_PCI_FOR_EACH_DEV(dev) pci_for_each_dev(dev) +#endif +#ifdef KERNEL_2_6 +extern struct list_head pci_devices; /* list of all devices */ +#define XGI_PCI_FOR_EACH_DEV(dev) \ + for(dev = pci_dev_g(pci_devices.next); dev != pci_dev_g(&pci_devices); dev = pci_dev_g(dev->global_list.next)) +#endif + +/* + * the following macro causes problems when used in the same module + * as module_param(); undef it so we don't accidentally mix the two + */ +#if defined (KERNEL_2_6) +#undef MODULE_PARM +#endif + +#ifdef EXPORT_NO_SYMBOLS +EXPORT_NO_SYMBOLS; +#endif + +#if defined (KERNEL_2_4) +#define XGI_IS_SUSER() suser() +#define XGI_PCI_DEVICE_NAME(dev) ((dev)->name) +#define XGI_NUM_CPUS() smp_num_cpus +#define XGI_CLI() __cli() +#define XGI_SAVE_FLAGS(eflags) __save_flags(eflags) +#define XGI_RESTORE_FLAGS(eflags) __restore_flags(eflags) +#define XGI_MAY_SLEEP() (!in_interrupt()) +#define XGI_MODULE_PARAMETER(x) MODULE_PARM(x, "i") +#endif + +#if defined (KERNEL_2_6) +#define XGI_IS_SUSER() capable(CAP_SYS_ADMIN) +#define XGI_PCI_DEVICE_NAME(dev) ((dev)->pretty_name) +#define XGI_NUM_CPUS() num_online_cpus() +#define XGI_CLI() local_irq_disable() +#define XGI_SAVE_FLAGS(eflags) local_save_flags(eflags) +#define XGI_RESTORE_FLAGS(eflags) local_irq_restore(eflags) +#define XGI_MAY_SLEEP() (!in_interrupt() && !in_atomic()) +#define XGI_MODULE_PARAMETER(x) module_param(x, int, 0) +#endif + +/* Earlier 2.4.x kernels don't have pci_disable_device() */ +#ifdef XGI_PCI_DISABLE_DEVICE_PRESENT +#define XGI_PCI_DISABLE_DEVICE(dev) pci_disable_device(dev) +#else +#define XGI_PCI_DISABLE_DEVICE(dev) +#endif + +/* common defines */ +#define GET_MODULE_SYMBOL(mod,sym) (const void *) inter_module_get(sym) +#define PUT_MODULE_SYMBOL(sym) inter_module_put((char *) sym) + +#define XGI_GET_PAGE_STRUCT(phys_page) virt_to_page(__va(phys_page)) +#define XGI_VMA_OFFSET(vma) (((vma)->vm_pgoff) << PAGE_SHIFT) +#define XGI_VMA_PRIVATE(vma) ((vma)->vm_private_data) + +#define XGI_DEVICE_NUMBER(x) minor((x)->i_rdev) +#define XGI_IS_CONTROL_DEVICE(x) (minor((x)->i_rdev) == 255) + +#define XGI_PCI_RESOURCE_START(dev, bar) ((dev)->resource[bar].start) +#define XGI_PCI_RESOURCE_SIZE(dev, bar) ((dev)->resource[bar].end - (dev)->resource[bar].start + 1) + +#define XGI_PCI_BUS_NUMBER(dev) (dev)->bus->number +#define XGI_PCI_SLOT_NUMBER(dev) PCI_SLOT((dev)->devfn) + +#ifdef XGI_PCI_GET_CLASS_PRESENT +#define XGI_PCI_DEV_PUT(dev) pci_dev_put(dev) +#define XGI_PCI_GET_DEVICE(vendor,device,from) pci_get_device(vendor,device,from) +#define XGI_PCI_GET_SLOT(bus,devfn) pci_get_slot(pci_find_bus(0,bus),devfn) +#define XGI_PCI_GET_CLASS(class,from) pci_get_class(class,from) +#else +#define XGI_PCI_DEV_PUT(dev) +#define XGI_PCI_GET_DEVICE(vendor,device,from) pci_find_device(vendor,device,from) +#define XGI_PCI_GET_SLOT(bus,devfn) pci_find_slot(bus,devfn) +#define XGI_PCI_GET_CLASS(class,from) pci_find_class(class,from) +#endif + +/* + * acpi support has been back-ported to the 2.4 kernel, but the 2.4 driver + * model is not sufficient for full acpi support. it may work in some cases, + * but not enough for us to officially support this configuration. + */ +#if defined(CONFIG_ACPI) && defined(KERNEL_2_6) +#define XGI_PM_SUPPORT_ACPI +#endif + +#if defined(CONFIG_APM) || defined(CONFIG_APM_MODULE) +#define XGI_PM_SUPPORT_APM +#endif + + +#if defined(CONFIG_DEVFS_FS) +#if defined(KERNEL_2_6) +typedef void* devfs_handle_t; +#define XGI_DEVFS_REGISTER(_name, _minor) \ + ({ \ + devfs_handle_t __handle = NULL; \ + if (devfs_mk_cdev(MKDEV(XGI_DEV_MAJOR, _minor), \ + S_IFCHR | S_IRUGO | S_IWUGO, _name) == 0) \ + { \ + __handle = (void *) 1; /* XXX Fix me! (boolean) */ \ + } \ + __handle; \ + }) +/* +#define XGI_DEVFS_REMOVE_DEVICE(i) devfs_remove("xgi%d", i) +*/ +#define XGI_DEVFS_REMOVE_CONTROL() devfs_remove("xgi_ctl") +#define XGI_DEVFS_REMOVE_DEVICE(i) devfs_remove("xgi") +#else // defined(KERNEL_2_4) +#define XGI_DEVFS_REGISTER(_name, _minor) \ + ({ \ + devfs_handle_t __handle = devfs_register(NULL, _name, DEVFS_FL_AUTO_DEVNUM, \ + XGI_DEV_MAJOR, _minor, \ + S_IFCHR | S_IRUGO | S_IWUGO, &xgi_fops, NULL); \ + __handle; \ + }) + +#define XGI_DEVFS_REMOVE_DEVICE(i) \ + ({ \ + if (xgi_devfs_handles[i] != NULL) \ + { \ + devfs_unregister(xgi_devfs_handles[i]); \ + } \ + }) +#define XGI_DEVFS_REMOVE_CONTROL() \ + ({ \ + if (xgi_devfs_handles[0] != NULL) \ + { \ + devfs_unregister(xgi_devfs_handles[0]); \ + } \ + }) +#endif /* defined(KERNEL_2_4) */ +#endif /* defined(CONFIG_DEVFS_FS) */ + +#if defined(CONFIG_DEVFS_FS) && !defined(KERNEL_2_6) +#define XGI_REGISTER_CHRDEV(x...) devfs_register_chrdev(x) +#define XGI_UNREGISTER_CHRDEV(x...) devfs_unregister_chrdev(x) +#else +#define XGI_REGISTER_CHRDEV(x...) register_chrdev(x) +#define XGI_UNREGISTER_CHRDEV(x...) unregister_chrdev(x) +#endif + +#if defined(XGI_REMAP_PFN_RANGE_PRESENT) +#define XGI_REMAP_PAGE_RANGE(from, offset, x...) \ + remap_pfn_range(vma, from, ((offset) >> PAGE_SHIFT), x) +#elif defined(XGI_REMAP_PAGE_RANGE_5) +#define XGI_REMAP_PAGE_RANGE(x...) remap_page_range(vma, x) +#elif defined(XGI_REMAP_PAGE_RANGE_4) +#define XGI_REMAP_PAGE_RANGE(x...) remap_page_range(x) +#else +#warning "xgi_configure.sh failed, assuming remap_page_range(5)!" +#define XGI_REMAP_PAGE_RANGE(x...) remap_page_range(vma, x) +#endif + +#if defined(pmd_offset_map) +#define XGI_PMD_OFFSET(addres, pg_dir, pg_mid_dir) \ + { \ + pg_mid_dir = pmd_offset_map(pg_dir, address); \ + } +#define XGI_PMD_UNMAP(pg_mid_dir) \ + { \ + pmd_unmap(pg_mid_dir); \ + } +#else +#define XGI_PMD_OFFSET(addres, pg_dir, pg_mid_dir) \ + { \ + pg_mid_dir = pmd_offset(pg_dir, address); \ + } +#define XGI_PMD_UNMAP(pg_mid_dir) +#endif + +#define XGI_PMD_PRESENT(pg_mid_dir) \ + ({ \ + if ((pg_mid_dir) && (pmd_none(*pg_mid_dir))) \ + { \ + XGI_PMD_UNMAP(pg_mid_dir); \ + pg_mid_dir = NULL; \ + } \ + pg_mid_dir != NULL; \ + }) + +#if defined(pte_offset_atomic) +#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \ + { \ + pte = pte_offset_atomic(pg_mid_dir, address); \ + XGI_PMD_UNMAP(pg_mid_dir); \ + } +#define XGI_PTE_UNMAP(pte) \ + { \ + pte_kunmap(pte); \ + } +#elif defined(pte_offset) +#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \ + { \ + pte = pte_offset(pg_mid_dir, address); \ + XGI_PMD_UNMAP(pg_mid_dir); \ + } +#define XGI_PTE_UNMAP(pte) +#else +#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \ + { \ + pte = pte_offset_map(pg_mid_dir, address); \ + XGI_PMD_UNMAP(pg_mid_dir); \ + } +#define XGI_PTE_UNMAP(pte) \ + { \ + pte_unmap(pte); \ + } +#endif + +#define XGI_PTE_PRESENT(pte) \ + ({ \ + if (pte) \ + { \ + if (!pte_present(*pte)) \ + { \ + XGI_PTE_UNMAP(pte); pte = NULL; \ + } \ + } \ + pte != NULL; \ + }) + +#define XGI_PTE_VALUE(pte) \ + ({ \ + unsigned long __pte_value = pte_val(*pte); \ + XGI_PTE_UNMAP(pte); \ + __pte_value; \ + }) + +#define XGI_PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) / PAGE_SIZE) +#define XGI_MASK_OFFSET(addr) ((addr) & (PAGE_SIZE - 1)) + +#if !defined (pgprot_noncached) +static inline pgprot_t pgprot_noncached(pgprot_t old_prot) + { + pgprot_t new_prot = old_prot; + if (boot_cpu_data.x86 > 3) + new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_PCD); + return new_prot; + } +#endif + +#if defined(XGI_BUILD_XGI_PAT_SUPPORT) && !defined (pgprot_writecombined) +/* Added define for write combining page, only valid if pat enabled. */ +#define _PAGE_WRTCOMB _PAGE_PWT +#define __PAGE_KERNEL_WRTCOMB \ + (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_WRTCOMB | _PAGE_ACCESSED) +#define PAGE_KERNEL_WRTCOMB MAKE_GLOBAL(__PAGE_KERNEL_WRTCOMB) + +static inline pgprot_t pgprot_writecombined(pgprot_t old_prot) + { + pgprot_t new_prot = old_prot; + if (boot_cpu_data.x86 > 3) + { + pgprot_val(old_prot) &= ~(_PAGE_PCD | _PAGE_PWT); + new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_WRTCOMB); + } + return new_prot; + } +#endif + +#if !defined(page_to_pfn) +#define page_to_pfn(page) ((page) - mem_map) +#endif + +#define XGI_VMALLOC(ptr, size) \ + { \ + (ptr) = vmalloc_32(size); \ + } + +#define XGI_VFREE(ptr, size) \ + { \ + vfree((void *) (ptr)); \ + } + +#define XGI_IOREMAP(ptr, physaddr, size) \ + { \ + (ptr) = ioremap(physaddr, size); \ + } + +#define XGI_IOREMAP_NOCACHE(ptr, physaddr, size) \ + { \ + (ptr) = ioremap_nocache(physaddr, size); \ + } + +#define XGI_IOUNMAP(ptr, size) \ + { \ + iounmap(ptr); \ + } + +/* + * only use this because GFP_KERNEL may sleep.. + * GFP_ATOMIC is ok, it won't sleep + */ +#define XGI_KMALLOC(ptr, size) \ + { \ + (ptr) = kmalloc(size, GFP_KERNEL); \ + } + +#define XGI_KMALLOC_ATOMIC(ptr, size) \ + { \ + (ptr) = kmalloc(size, GFP_ATOMIC); \ + } + +#define XGI_KFREE(ptr, size) \ + { \ + kfree((void *) (ptr)); \ + } + +#define XGI_GET_FREE_PAGES(ptr, order) \ + { \ + (ptr) = __get_free_pages(GFP_KERNEL, order); \ + } + +#define XGI_FREE_PAGES(ptr, order) \ + { \ + free_pages(ptr, order); \ + } + +typedef struct xgi_pte_s { + unsigned long phys_addr; + unsigned long virt_addr; +} xgi_pte_t; + +/* + * AMD Athlon processors expose a subtle bug in the Linux + * kernel, that may lead to AGP memory corruption. Recent + * kernel versions had a workaround for this problem, but + * 2.4.20 is the first kernel to address it properly. The + * page_attr API provides the means to solve the problem. + */ +#if defined(XGI_CHANGE_PAGE_ATTR_PRESENT) +static inline void XGI_SET_PAGE_ATTRIB_UNCACHED(xgi_pte_t *page_ptr) + { + struct page *page = virt_to_page(__va(page_ptr->phys_addr)); + change_page_attr(page, 1, PAGE_KERNEL_NOCACHE); + } +static inline void XGI_SET_PAGE_ATTRIB_CACHED(xgi_pte_t *page_ptr) + { + struct page *page = virt_to_page(__va(page_ptr->phys_addr)); + change_page_attr(page, 1, PAGE_KERNEL); + } +#else +#define XGI_SET_PAGE_ATTRIB_UNCACHED(page_list) +#define XGI_SET_PAGE_ATTRIB_CACHED(page_list) +#endif + +#ifdef KERNEL_2_4 +#define XGI_INC_PAGE_COUNT(page) atomic_inc(&(page)->count) +#define XGI_DEC_PAGE_COUNT(page) atomic_dec(&(page)->count) +#define XGI_PAGE_COUNT(page) atomic_read(&(page)->count) +#define XGI_SET_PAGE_COUNT(page,v) atomic_set(&(page)->count, v) + +#define XGILockPage(page) set_bit(PG_locked, &(page)->flags) +#define XGIUnlockPage(page) clear_bit(PG_locked, &(page)->flags) +#endif + +#ifdef KERNEL_2_6 +/* add for SUSE 9, Jill*/ +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 4) +#define XGI_INC_PAGE_COUNT(page) atomic_inc(&(page)->count) +#define XGI_DEC_PAGE_COUNT(page) atomic_dec(&(page)->count) +#define XGI_PAGE_COUNT(page) atomic_read(&(page)->count) +#define XGI_SET_PAGE_COUNT(page,v) atomic_set(&(page)->count, v) +#else +#define XGI_INC_PAGE_COUNT(page) atomic_inc(&(page)->_count) +#define XGI_DEC_PAGE_COUNT(page) atomic_dec(&(page)->_count) +#define XGI_PAGE_COUNT(page) atomic_read(&(page)->_count) +#define XGI_SET_PAGE_COUNT(page,v) atomic_set(&(page)->_count, v) +#endif +#define XGILockPage(page) SetPageLocked(page) +#define XGIUnlockPage(page) ClearPageLocked(page) +#endif + + +/* + * hide a pointer to struct xgi_info_t in a file-private info + */ + +typedef struct +{ + void *info; + U32 num_events; + spinlock_t fp_lock; + wait_queue_head_t wait_queue; +} xgi_file_private_t; + +#define FILE_PRIVATE(filp) ((filp)->private_data) + +#define XGI_GET_FP(filp) ((xgi_file_private_t *) FILE_PRIVATE(filp)) + +/* for the card devices */ +#define XGI_INFO_FROM_FP(filp) (XGI_GET_FP(filp)->info) + +#ifdef KERNEL_2_0 +#define INODE_FROM_FP(filp) ((filp)->f_inode) +#else +#define INODE_FROM_FP(filp) ((filp)->f_dentry->d_inode) +#endif + +#define XGI_ATOMIC_SET(data,val) atomic_set(&(data), (val)) +#define XGI_ATOMIC_INC(data) atomic_inc(&(data)) +#define XGI_ATOMIC_DEC(data) atomic_dec(&(data)) +#define XGI_ATOMIC_DEC_AND_TEST(data) atomic_dec_and_test(&(data)) +#define XGI_ATOMIC_READ(data) atomic_read(&(data)) + +/* + * lock-related functions that should only be called from this file + */ +#define xgi_init_lock(lock) spin_lock_init(&lock) +#define xgi_lock(lock) spin_lock(&lock) +#define xgi_unlock(lock) spin_unlock(&lock) +#define xgi_down(lock) down(&lock) +#define xgi_up(lock) up(&lock) + +#define xgi_lock_irqsave(lock,flags) spin_lock_irqsave(&lock,flags) +#define xgi_unlock_irqsave(lock,flags) spin_unlock_irqrestore(&lock,flags) + +#endif diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c new file mode 100644 index 00000000..b15c7ecf --- /dev/null +++ b/linux-core/xgi_misc.c @@ -0,0 +1,657 @@ + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#include "xgi_types.h" +#include "xgi_linux.h" +#include "xgi_drv.h" +#include "xgi_regs.h" +#include "xgi_pcie.h" + +void xgi_get_device_info(xgi_info_t *info, xgi_chip_info_t *req) +{ + req->device_id = info->device_id; + req->device_name[0] = 'x'; + req->device_name[1] = 'g'; + req->device_name[2] = '4'; + req->device_name[3] = '7'; + req->vendor_id = info->vendor_id; + req->curr_display_mode = 0; + req->fb_size = info->fb.size; + req->sarea_bus_addr = info->sarea_info.bus_addr; + req->sarea_size = info->sarea_info.size; +} + +void xgi_get_mmio_info(xgi_info_t *info, xgi_mmio_info_t *req) +{ + req->mmioBase = (void *)info->mmio.base; + req->size = info->mmio.size; +} + +void xgi_put_screen_info(xgi_info_t *info, xgi_screen_info_t *req) +{ + info->scrn_info.scrn_start = req->scrn_start; + info->scrn_info.scrn_xres = req->scrn_xres; + info->scrn_info.scrn_yres = req->scrn_yres; + info->scrn_info.scrn_bpp = req->scrn_bpp; + info->scrn_info.scrn_pitch = req->scrn_pitch; + + XGI_INFO("info->scrn_info.scrn_start: 0x%lx" + "info->scrn_info.scrn_xres: 0x%lx" + "info->scrn_info.scrn_yres: 0x%lx" + "info->scrn_info.scrn_bpp: 0x%lx" + "info->scrn_info.scrn_pitch: 0x%lx\n", + info->scrn_info.scrn_start, + info->scrn_info.scrn_xres, + info->scrn_info.scrn_yres, + info->scrn_info.scrn_bpp, + info->scrn_info.scrn_pitch); +} + +void xgi_get_screen_info(xgi_info_t *info, xgi_screen_info_t *req) +{ + req->scrn_start = info->scrn_info.scrn_start; + req->scrn_xres = info->scrn_info.scrn_xres; + req->scrn_yres = info->scrn_info.scrn_yres; + req->scrn_bpp = info->scrn_info.scrn_bpp; + req->scrn_pitch = info->scrn_info.scrn_pitch; + + XGI_INFO("req->scrn_start: 0x%lx" + "req->scrn_xres: 0x%lx" + "req->scrn_yres: 0x%lx" + "req->scrn_bpp: 0x%lx" + "req->scrn_pitch: 0x%lx\n", + req->scrn_start, + req->scrn_xres, + req->scrn_yres, + req->scrn_bpp, + req->scrn_pitch); +} + +void xgi_ge_reset(xgi_info_t *info) +{ + xgi_disable_ge(info); + xgi_enable_ge(info); +} + +void xgi_sarea_info(xgi_info_t *info, xgi_sarea_info_t *req) +{ + info->sarea_info.bus_addr = req->bus_addr; + info->sarea_info.size = req->size; + XGI_INFO("info->sarea_info.bus_addr: 0x%lx" + "info->sarea_info.size: 0x%lx\n", + info->sarea_info.bus_addr, + info->sarea_info.size); +} + +/* + * irq functions + */ +#define STALL_INTERRUPT_RESET_THRESHOLD 0xffff + +static U32 s_invalid_begin = 0; + +BOOL xgi_ge_irq_handler(xgi_info_t *info) +{ + volatile U8 *mmio_vbase = info->mmio.vbase; + volatile U32 *ge_3d_status = (volatile U32 *)(mmio_vbase + 0x2800); + U32 int_status = ge_3d_status[4]; // interrupt status + U32 auto_reset_count = 0; + BOOL is_support_auto_reset = FALSE; + + // Check GE on/off + if (0 == (0xffffc0f0 & int_status)) + { + U32 old_ge_status = ge_3d_status[0x00]; + U32 old_pcie_cmd_fetch_Addr = ge_3d_status[0x0a]; + if (0 != (0x1000 & int_status)) + { + // We got GE stall interrupt. + ge_3d_status[0x04] = int_status | 0x04000000; + + if (TRUE == is_support_auto_reset) + { + BOOL is_wrong_signal = FALSE; + static U32 last_int_tick_low, last_int_tick_high; + static U32 new_int_tick_low, new_int_tick_high; + static U32 continoue_int_count = 0; + // OE II is busy. + while (old_ge_status & 0x001c0000) + { + U16 check; + // Check Read back status + *(mmio_vbase + 0x235c) = 0x80; + check = *((volatile U16*)(mmio_vbase + 0x2360)); + if ((check & 0x3f) != ((check & 0x3f00) >> 8)) + { + is_wrong_signal = TRUE; + break; + } + // Check RO channel + *(mmio_vbase + 0x235c) = 0x83; + check = *((volatile U16*)(mmio_vbase + 0x2360)); + if ((check & 0x0f) != ((check & 0xf0) >> 4)) + { + is_wrong_signal = TRUE; + break; + } + // Check RW channel + *(mmio_vbase + 0x235c) = 0x88; + check = *((volatile U16*)(mmio_vbase + 0x2360)); + if ((check & 0x0f) != ((check & 0xf0) >> 4)) + { + is_wrong_signal = TRUE; + break; + } + // Check RO channel outstanding + *(mmio_vbase + 0x235c) = 0x8f; + check = *((volatile U16*)(mmio_vbase + 0x2360)); + if (0 != (check & 0x3ff)) + { + is_wrong_signal = TRUE; + break; + } + // Check RW channel outstanding + *(mmio_vbase + 0x235c) = 0x90; + check = *((volatile U16*)(mmio_vbase + 0x2360)); + if (0 != (check & 0x3ff)) + { + is_wrong_signal = TRUE; + break; + } + // No pending PCIE request. GE stall. + break; + } + + if (is_wrong_signal) + { + // Nothing but skip. + } + else if (0 == continoue_int_count++) + { + rdtsc(last_int_tick_low, last_int_tick_high); + } + else + { + rdtscl(new_int_tick_low); + if ((new_int_tick_low - last_int_tick_low) > STALL_INTERRUPT_RESET_THRESHOLD) + { + continoue_int_count = 0; + } + else if (continoue_int_count >= 3) + { + continoue_int_count = 0; + + // GE Hung up, need reset. + XGI_INFO("Reset GE!\n"); + + *(mmio_vbase + 0xb057) = 8; + int time_out = 0xffff; + while (0 != (ge_3d_status[0x00] & 0xf0000000)) + { + while (0 != ((--time_out) & 0xfff)); + if (0 == time_out) + { + XGI_INFO("Can not reset back 0x%lx!\n", ge_3d_status[0x00]); + *(mmio_vbase + 0xb057) = 0; + // Have to use 3x5.36 to reset. + // Save and close dynamic gating + U8 old_3ce = *(mmio_vbase + 0x3ce); + *(mmio_vbase + 0x3ce) = 0x2a; + U8 old_3cf = *(mmio_vbase + 0x3cf); + *(mmio_vbase + 0x3cf) = old_3cf & 0xfe; + // Reset GE + U8 old_index = *(mmio_vbase + 0x3d4); + *(mmio_vbase + 0x3d4) = 0x36; + U8 old_36 = *(mmio_vbase + 0x3d5); + *(mmio_vbase + 0x3d5) = old_36 | 0x10; + while (0 != ((--time_out) & 0xfff)); + *(mmio_vbase + 0x3d5) = old_36; + *(mmio_vbase + 0x3d4) = old_index; + // Restore dynamic gating + *(mmio_vbase + 0x3cf) = old_3cf; + *(mmio_vbase + 0x3ce) = old_3ce; + break; + } + } + *(mmio_vbase + 0xb057) = 0; + + // Increase Reset counter + auto_reset_count++; + } + } + } + return TRUE; + } + else if (0 != (0x1 & int_status)) + { + s_invalid_begin++; + ge_3d_status[0x04] = (int_status & ~0x01) | 0x04000000; + return TRUE; + } + } + return FALSE; +} + +BOOL xgi_crt_irq_handler(xgi_info_t *info) +{ + BOOL ret = FALSE; + U8 *mmio_vbase = info->mmio.vbase; + U32 device_status = 0; + U32 hw_status = 0; + U8 save_3ce = bReadReg(0x3ce); + + + if (bIn3cf(0x37) & 0x01) // CRT1 interrupt just happened + { + U8 op3cf_3d; + U8 op3cf_37; + + // What happened? + op3cf_37 = bIn3cf(0x37); + +#if 0 + if (op3cf_37 & 0x04) + device_status |= GDEVST_CONNECT; + else + device_status &= ~GDEVST_CONNECT; + + device_status |= GDEVST_DEVICE_CHANGED; + hw_status |= HWST_DEVICE_CHANGED; +#endif + // Clear CRT interrupt + op3cf_3d = bIn3cf(0x3d); + bOut3cf(0x3d, (op3cf_3d | 0x04)); + bOut3cf(0x3d, (op3cf_3d & ~0x04)); + ret = TRUE; + } + bWriteReg(0x3ce, save_3ce); + + return (ret); +} + +BOOL xgi_dvi_irq_handler(xgi_info_t *info) +{ + BOOL ret = FALSE; + U8 *mmio_vbase = info->mmio.vbase; + U32 device_status = 0; + U32 hw_status = 0; + U8 save_3ce = bReadReg(0x3ce); + + if (bIn3cf(0x38) & 0x20) // DVI interrupt just happened + { + U8 op3cf_39; + U8 op3cf_37; + U8 op3x5_5a; + U8 save_3x4 = bReadReg(0x3d4);; + + // What happened? + op3cf_37 = bIn3cf(0x37); +#if 0 + //Also update our internal flag + if (op3cf_37 & 0x10) // Second Monitor plugged In + { + device_status |= GDEVST_CONNECT; + //Because currenly we cannot determine if DVI digital + //or DVI analog is connected according to DVI interrupt + //We should still call BIOS to check it when utility ask us + device_status &= ~GDEVST_CHECKED; + } + else + { + device_status &= ~GDEVST_CONNECT; + } +#endif + //Notify BIOS that DVI plug/unplug happened + op3x5_5a = bIn3x5(0x5a); + bOut3x5(0x5a, op3x5_5a & 0xf7); + + bWriteReg(0x3d4, save_3x4); + + //device_status |= GDEVST_DEVICE_CHANGED; + //hw_status |= HWST_DEVICE_CHANGED; + + // Clear DVI interrupt + op3cf_39 = bIn3cf(0x39); + bOut3c5(0x39, (op3cf_39 & ~0x01)); //Set 3cf.39 bit 0 to 0 + bOut3c5(0x39, (op3cf_39 | 0x01 )); //Set 3cf.39 bit 0 to 1 + + ret = TRUE; + } + bWriteReg(0x3ce, save_3ce); + + return (ret); +} + +void xgi_dump_register(xgi_info_t *info) +{ + int i, j; + unsigned char temp; + + // 0x3C5 + printk("\r\n=====xgi_dump_register========0x%x===============\r\n", 0x3C5); + + for(i=0; i<0x10; i++) + { + if(i == 0) + { + printk("%5x", i); + } + else + { + printk("%3x", i); + } + } + printk("\r\n"); + + for(i=0; i<0x10; i++) + { + printk("%1x ", i); + + for(j=0; j<0x10; j++) + { + temp = bIn3c5(i*0x10 + j); + printk("%3x", temp); + } + printk("\r\n"); + } + + // 0x3D5 + printk("\r\n====xgi_dump_register=========0x%x===============\r\n", 0x3D5); + for(i=0; i<0x10; i++) + { + if(i == 0) + { + printk("%5x", i); + } + else + { + printk("%3x", i); + } + } + printk("\r\n"); + + for(i=0; i<0x10; i++) + { + printk("%1x ", i); + + for(j=0; j<0x10; j++) + { + temp = bIn3x5(i*0x10 + j); + printk("%3x", temp); + } + printk("\r\n"); + } + + // 0x3CF + printk("\r\n=========xgi_dump_register====0x%x===============\r\n", 0x3CF); + for(i=0; i<0x10; i++) + { + if(i == 0) + { + printk("%5x", i); + } + else + { + printk("%3x", i); + } + } + printk("\r\n"); + + for(i=0; i<0x10; i++) + { + printk("%1x ", i); + + for(j=0; j<0x10; j++) + { + temp = bIn3cf(i*0x10 + j); + printk("%3x", temp); + } + printk("\r\n"); + } + + printk("\r\n=====xgi_dump_register======0x%x===============\r\n", 0xB000); + for(i=0; i<0x10; i++) + { + if(i == 0) + { + printk("%5x", i); + } + else + { + printk("%3x", i); + } + } + printk("\r\n"); + + for(i=0; i<0x5; i++) + { + printk("%1x ", i); + + for(j=0; j<0x10; j++) + { + temp = bReadReg(0xB000 + i*0x10 + j); + printk("%3x", temp); + } + printk("\r\n"); + } + + printk("\r\n==================0x%x===============\r\n", 0x2200); + for(i=0; i<0x10; i++) + { + if(i == 0) + { + printk("%5x", i); + } + else + { + printk("%3x", i); + } + } + printk("\r\n"); + + for(i=0; i<0xB; i++) + { + printk("%1x ", i); + + for(j=0; j<0x10; j++) + { + temp = bReadReg(0x2200 + i*0x10 + j); + printk("%3x", temp); + } + printk("\r\n"); + } + + printk("\r\n==================0x%x===============\r\n", 0x2300); + for(i=0; i<0x10; i++) + { + if(i == 0) + { + printk("%5x", i); + } + else + { + printk("%3x", i); + } + } + printk("\r\n"); + + for(i=0; i<0x7; i++) + { + printk("%1x ", i); + + for(j=0; j<0x10; j++) + { + temp = bReadReg(0x2300 + i*0x10 + j); + printk("%3x", temp); + } + printk("\r\n"); + } + + printk("\r\n==================0x%x===============\r\n", 0x2400); + for(i=0; i<0x10; i++) + { + if(i == 0) + { + printk("%5x", i); + } + else + { + printk("%3x", i); + } + } + printk("\r\n"); + + for(i=0; i<0x10; i++) + { + printk("%1x ", i); + + for(j=0; j<0x10; j++) + { + temp = bReadReg(0x2400 + i*0x10 + j); + printk("%3x", temp); + } + printk("\r\n"); + } + + printk("\r\n==================0x%x===============\r\n", 0x2800); + for(i=0; i<0x10; i++) + { + if(i == 0) + { + printk("%5x", i); + } + else + { + printk("%3x", i); + } + } + printk("\r\n"); + + for(i=0; i<0x10; i++) + { + printk("%1x ", i); + + for(j=0; j<0x10; j++) + { + temp = bReadReg(0x2800 + i*0x10 + j); + printk("%3x", temp); + } + printk("\r\n"); + } +} + +void xgi_restore_registers(xgi_info_t *info) +{ + bOut3x5(0x13, 0); + bOut3x5(0x8b, 2); +} + +void xgi_waitfor_pci_idle(xgi_info_t *info) +{ +#define WHOLD_GE_STATUS 0x2800 +#define IDLE_MASK ~0x90200000 + + int idleCount = 0; + while(idleCount < 5) + { + if (dwReadReg(WHOLD_GE_STATUS) & IDLE_MASK) + { + idleCount = 0; + } + else + { + idleCount ++; + } + } +} + +int xgi_get_cpu_id(struct cpu_info_s *arg) +{ + int op = arg->_eax; + __asm__("cpuid" + : "=a" (arg->_eax), + "=b" (arg->_ebx), + "=c" (arg->_ecx), + "=d" (arg->_edx) + : "0" (op)); + + XGI_INFO("opCode = 0x%x, eax = 0x%x, ebx = 0x%x, ecx = 0x%x, edx = 0x%x \n", + op, arg->_eax, arg->_ebx, arg->_ecx, arg->_edx); +} + +/*memory collect function*/ +extern struct list_head xgi_mempid_list; +void xgi_mem_collect(xgi_info_t *info, unsigned int *pcnt) +{ + xgi_mem_pid_t *mempid_block; + struct list_head *mempid_list; + struct task_struct *p,*find; + unsigned int cnt = 0; + + mempid_list = xgi_mempid_list.next; + + while (mempid_list != &xgi_mempid_list) + { + mempid_block = list_entry(mempid_list, struct xgi_mem_pid_s, list); + mempid_list = mempid_list->next; + + find = NULL; + XGI_SCAN_PROCESS(p) + { + if (p->pid == mempid_block->pid) + { + XGI_INFO("[!]Find active pid:%ld state:%ld location:%d addr:0x%lx! \n", mempid_block->pid, p->state, mempid_block->location, mempid_block->bus_addr); + find = p; + if (mempid_block->bus_addr == 0xFFFFFFFF) + ++cnt; + break; + } + } + if (!find) + { + if (mempid_block->location == LOCAL) + { + XGI_INFO("Memory ProcessID free fb and delete one block pid:%ld addr:0x%lx successfully! \n", mempid_block->pid, mempid_block->bus_addr); + xgi_fb_free(info, mempid_block->bus_addr); + } + else if (mempid_block->bus_addr != 0xFFFFFFFF) + { + XGI_INFO("Memory ProcessID free pcie and delete one block pid:%ld addr:0x%lx successfully! \n", mempid_block->pid, mempid_block->bus_addr); + xgi_pcie_free(info, mempid_block->bus_addr); + } + else + { + /*only delete the memory block*/ + list_del(&mempid_block->list); + XGI_INFO("Memory ProcessID delete one pcie block pid:%ld successfully! \n", mempid_block->pid); + kfree(mempid_block); + } + } + } + *pcnt = cnt; +} diff --git a/linux-core/xgi_misc.h b/linux-core/xgi_misc.h new file mode 100644 index 00000000..ac4daaa1 --- /dev/null +++ b/linux-core/xgi_misc.h @@ -0,0 +1,49 @@ + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + + +#ifndef _XGI_MISC_H_ +#define _XGI_MISC_H_ + +extern void xgi_dump_register(xgi_info_t *info); +extern void xgi_get_device_info(xgi_info_t *info, xgi_chip_info_t * req); +extern void xgi_get_mmio_info(xgi_info_t *info, xgi_mmio_info_t *req); +extern void xgi_get_screen_info(xgi_info_t *info, xgi_screen_info_t *req); +extern void xgi_put_screen_info(xgi_info_t *info, xgi_screen_info_t *req); +extern void xgi_ge_reset(xgi_info_t *info); +extern void xgi_sarea_info(xgi_info_t *info, xgi_sarea_info_t *req); +extern int xgi_get_cpu_id(struct cpu_info_s *arg); + +extern void xgi_restore_registers(xgi_info_t *info); +extern BOOL xgi_ge_irq_handler(xgi_info_t *info); +extern BOOL xgi_crt_irq_handler(xgi_info_t *info); +extern BOOL xgi_dvi_irq_handler(xgi_info_t *info); +extern void xgi_waitfor_pci_idle(xgi_info_t *info); + + +#endif diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c new file mode 100644 index 00000000..62e2323f --- /dev/null +++ b/linux-core/xgi_pcie.c @@ -0,0 +1,1060 @@ + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#include "xgi_types.h" +#include "xgi_linux.h" +#include "xgi_drv.h" +#include "xgi_regs.h" +#include "xgi_pcie.h" +#include "xgi_misc.h" + +static xgi_pcie_heap_t *xgi_pcie_heap = NULL; +static kmem_cache_t *xgi_pcie_cache_block = NULL; +static xgi_pcie_block_t *xgi_pcie_vertex_block = NULL; +static xgi_pcie_block_t *xgi_pcie_cmdlist_block = NULL; +static xgi_pcie_block_t *xgi_pcie_scratchpad_block = NULL; +extern struct list_head xgi_mempid_list; + +static unsigned long xgi_pcie_lut_alloc(unsigned long page_order) +{ + struct page *page; + unsigned long page_addr = 0; + unsigned long page_count = 0; + int i; + + page_count = (1 << page_order); + page_addr = __get_free_pages(GFP_KERNEL, page_order); + + if (page_addr == 0UL) + { + XGI_ERROR("Can't get free pages: 0x%lx from system memory !\n", + page_count); + return 0; + } + + page = virt_to_page(page_addr); + + for (i = 0; i < page_count; i++, page++) + { + XGI_INC_PAGE_COUNT(page); + XGILockPage(page); + } + + XGI_INFO("page_count: 0x%lx page_order: 0x%lx page_addr: 0x%lx \n", + page_count, page_order, page_addr); + return page_addr; +} + +static void xgi_pcie_lut_free(unsigned long page_addr, unsigned long page_order) +{ + struct page *page; + unsigned long page_count = 0; + int i; + + page_count = (1 << page_order); + page = virt_to_page(page_addr); + + for (i = 0; i < page_count; i++, page++) + { + XGI_DEC_PAGE_COUNT(page); + XGIUnlockPage(page); + } + + free_pages(page_addr, page_order); +} + +static int xgi_pcie_lut_init(xgi_info_t *info) +{ + unsigned char *page_addr = NULL; + unsigned long pciePageCount, lutEntryNum, lutPageCount, lutPageOrder; + unsigned long count = 0; + u8 temp = 0; + + /* Jong 06/06/2006 */ + unsigned long pcie_aperture_size; + + info->pcie.size = 128 * 1024 * 1024; + + /* Get current FB aperture size */ + temp = In3x5(0x27); + XGI_INFO("In3x5(0x27): 0x%x \n", temp); + + if (temp & 0x01) /* 256MB; Jong 06/05/2006; 0x10000000 */ + { + /* Jong 06/06/2006; allocate memory */ + pcie_aperture_size=256 * 1024 * 1024; + /* info->pcie.base = 256 * 1024 * 1024; */ /* pcie base is different from fb base */ + } + else /* 128MB; Jong 06/05/2006; 0x08000000 */ + { + /* Jong 06/06/2006; allocate memory */ + pcie_aperture_size=128 * 1024 * 1024; + /* info->pcie.base = 128 * 1024 * 1024; */ + } + + /* Jong 06/06/2006; allocate memory; it can be used for build-in kernel modules */ + /* info->pcie.base=(unsigned long)alloc_bootmem(pcie_mem_size); */ + /* total 496 MB; need 256 MB (0x10000000); start from 240 MB (0x0F000000) */ + /* info->pcie.base=ioremap(0x0F000000, 0x10000000); */ /* Cause system hang */ + info->pcie.base=pcie_aperture_size; /* works */ + /* info->pcie.base=info->fb.base + info->fb.size; */ /* System hang */ + /* info->pcie.base=128 * 1024 * 1024;*/ /* System hang */ + + XGI_INFO("Jong06062006-info->pcie.base: 0x%lx \n", info->pcie.base); + + + /* Get current lookup table page size */ + temp = bReadReg(0xB00C); + if (temp & 0x04) /* 8KB */ + { + info->lutPageSize = 8 * 1024; + } + else /* 4KB */ + { + info->lutPageSize = 4 * 1024; + } + + XGI_INFO("info->lutPageSize: 0x%lx \n", info->lutPageSize); + +#if 0 + /* Get current lookup table location */ + temp = bReadReg(0xB00C); + if (temp & 0x02) /* LFB */ + { + info->isLUTInLFB = TRUE; + /* Current we only support lookup table in LFB */ + temp &= 0xFD; + bWriteReg(0xB00C, temp); + info->isLUTInLFB = FALSE; + } + else /* SFB */ + { + info->isLUTInLFB = FALSE; + } + + XGI_INFO("info->lutPageSize: 0x%lx \n", info->lutPageSize); + + /* Get current SDFB page size */ + temp = bReadReg(0xB00C); + if (temp & 0x08) /* 8MB */ + { + info->sdfbPageSize = 8 * 1024 * 1024; + } + else /* 4MB */ + { + info->sdfbPageSize = 4 * 1024 * 1024; + } +#endif + pciePageCount = (info->pcie.size + PAGE_SIZE - 1) / PAGE_SIZE; + + /* + * Allocate memory for PCIE GART table; + */ + lutEntryNum = pciePageCount; + lutPageCount = (lutEntryNum * 4 + PAGE_SIZE - 1) / PAGE_SIZE; + + /* get page_order base on page_count */ + count = lutPageCount; + for (lutPageOrder = 0; count; count >>= 1, ++lutPageOrder); + + if ((lutPageCount << 1) == (1 << lutPageOrder)) + { + lutPageOrder -= 1; + } + + XGI_INFO("lutEntryNum: 0x%lx lutPageCount: 0x%lx lutPageOrder 0x%lx\n", + lutEntryNum, lutPageCount, lutPageOrder); + + info->lutPageOrder = lutPageOrder; + page_addr = (unsigned char *)xgi_pcie_lut_alloc(lutPageOrder); + + if (!page_addr) + { + XGI_ERROR("cannot allocate PCIE lut page!\n"); + goto fail; + } + info->lut_base = (unsigned long *)page_addr; + + XGI_INFO("page_addr: 0x%p virt_to_phys(page_virtual): 0x%lx \n", + page_addr, virt_to_phys(page_addr)); + + XGI_INFO("info->lut_base: 0x%p __pa(info->lut_base): 0x%lx info->lutPageOrder 0x%lx\n", + info->lut_base, __pa(info->lut_base), info->lutPageOrder); + + /* + * clean all PCIE GART Entry + */ + memset(page_addr, 0, PAGE_SIZE << lutPageOrder); + +#if defined(__i386__) || defined(__x86_64__) + asm volatile ( "wbinvd" ::: "memory" ); +#else + mb(); +#endif + + /* Set GART in SFB */ + bWriteReg(0xB00C, bReadReg(0xB00C) & ~0x02); + /* Set GART base address to HW */ + dwWriteReg(0xB034, __pa(info->lut_base)); + + return 1; +fail: + return 0; +} + +static void xgi_pcie_lut_cleanup(xgi_info_t *info) +{ + if (info->lut_base) + { + XGI_INFO("info->lut_base: 0x%p info->lutPageOrder: 0x%lx \n", + info->lut_base, info->lutPageOrder); + xgi_pcie_lut_free((unsigned long)info->lut_base, info->lutPageOrder); + info->lut_base = NULL; + } +} + +static xgi_pcie_block_t *xgi_pcie_new_node(void) +{ + xgi_pcie_block_t *block = (xgi_pcie_block_t *)kmem_cache_alloc(xgi_pcie_cache_block, GFP_KERNEL); + if (block == NULL) + { + return NULL; + } + + block->offset = 0; /* block's offset in pcie memory, begin from 0 */ + block->size = 0; /* The block size. */ + block->bus_addr = 0; /* CPU access address/bus address */ + block->hw_addr = 0; /* GE access address */ + block->page_count = 0; + block->page_order = 0; + block->page_block = NULL; + block->page_table = NULL; + block->owner = PCIE_INVALID; + + return block; +} + +static void xgi_pcie_block_stuff_free(xgi_pcie_block_t *block) +{ + struct page *page; + xgi_page_block_t *page_block = block->page_block; + xgi_page_block_t *free_block; + unsigned long page_count = 0; + int i; + + //XGI_INFO("block->page_block: 0x%p \n", block->page_block); + while (page_block) + { + page_count = page_block->page_count; + + page = virt_to_page(page_block->virt_addr); + for (i = 0; i < page_count; i++, page++) + { + XGI_DEC_PAGE_COUNT(page); + XGIUnlockPage(page); + } + free_pages(page_block->virt_addr, page_block->page_order); + + page_block->phys_addr = 0; + page_block->virt_addr = 0; + page_block->page_count = 0; + page_block->page_order = 0; + + free_block = page_block; + page_block = page_block->next; + //XGI_INFO("free free_block: 0x%p \n", free_block); + kfree(free_block); + free_block = NULL; + } + + if (block->page_table) + { + //XGI_INFO("free block->page_table: 0x%p \n", block->page_table); + kfree(block->page_table); + block->page_table = NULL; + } +} + +int xgi_pcie_heap_init(xgi_info_t *info) +{ + xgi_pcie_block_t *block; + + if (!xgi_pcie_lut_init(info)) + { + XGI_ERROR("xgi_pcie_lut_init failed\n"); + return 0; + } + + xgi_pcie_heap = (xgi_pcie_heap_t *)kmalloc(sizeof(xgi_pcie_heap_t), GFP_KERNEL); + if(!xgi_pcie_heap) + { + XGI_ERROR("xgi_pcie_heap alloc failed\n"); + goto fail1; + } + INIT_LIST_HEAD(&xgi_pcie_heap->free_list); + INIT_LIST_HEAD(&xgi_pcie_heap->used_list); + INIT_LIST_HEAD(&xgi_pcie_heap->sort_list); + + xgi_pcie_heap->max_freesize = info->pcie.size; + + xgi_pcie_cache_block = kmem_cache_create("xgi_pcie_block", sizeof(xgi_pcie_block_t), + 0, SLAB_HWCACHE_ALIGN, NULL, NULL); + + if (NULL == xgi_pcie_cache_block) + { + XGI_ERROR("Fail to creat xgi_pcie_block\n"); + goto fail2; + } + + block = (xgi_pcie_block_t *)xgi_pcie_new_node(); + if (!block) + { + XGI_ERROR("xgi_pcie_new_node failed\n"); + goto fail3; + } + + block->offset = 0; /* block's offset in pcie memory, begin from 0 */ + block->size = info->pcie.size; + + list_add(&block->list, &xgi_pcie_heap->free_list); + + XGI_INFO("PCIE start address: 0x%lx, memory size : 0x%lx\n", block->offset, block->size); + return 1; +fail3: + if (xgi_pcie_cache_block) + { + kmem_cache_destroy(xgi_pcie_cache_block); + xgi_pcie_cache_block = NULL; + } + +fail2: + if(xgi_pcie_heap) + { + kfree(xgi_pcie_heap); + xgi_pcie_heap = NULL; + } +fail1: + xgi_pcie_lut_cleanup(info); + return 0; +} + +void xgi_pcie_heap_check(void) +{ + struct list_head *useList, *temp; + xgi_pcie_block_t *block; + unsigned int ownerIndex; + char *ownerStr[6] = {"2D", "3D", "3D_CMD", "3D_SCR", "3D_TEX", "ELSE"}; + + if (xgi_pcie_heap) + { + useList = &xgi_pcie_heap->used_list; + temp = useList->next; + XGI_INFO("pcie freemax = 0x%lx\n", xgi_pcie_heap->max_freesize); + while (temp != useList) + { + block = list_entry(temp, struct xgi_pcie_block_s, list); + if (block->owner == PCIE_2D) + ownerIndex = 0; + else if (block->owner > PCIE_3D_TEXTURE || block->owner < PCIE_2D || block->owner < PCIE_3D) + ownerIndex = 5; + else + ownerIndex = block->owner - PCIE_3D + 1; + XGI_INFO("Allocated by %s, block->offset: 0x%lx block->size: 0x%lx \n", + ownerStr[ownerIndex], block->offset, block->size); + temp = temp->next; + } + + } +} + + +void xgi_pcie_heap_cleanup(xgi_info_t *info) +{ + struct list_head *free_list, *temp; + xgi_pcie_block_t *block; + int j; + + xgi_pcie_lut_cleanup(info); + XGI_INFO("xgi_pcie_lut_cleanup scceeded\n"); + + if (xgi_pcie_heap) + { + free_list = &xgi_pcie_heap->free_list; + for (j = 0; j < 3; j++, free_list++) + { + temp = free_list->next; + + while (temp != free_list) + { + block = list_entry(temp, struct xgi_pcie_block_s, list); + XGI_INFO("No. %d block->offset: 0x%lx block->size: 0x%lx \n", + j, block->offset, block->size); + xgi_pcie_block_stuff_free(block); + block->bus_addr = 0; + block->hw_addr = 0; + + temp = temp->next; + //XGI_INFO("No. %d free block: 0x%p \n", j, block); + kmem_cache_free(xgi_pcie_cache_block, block); + block = NULL; + } + } + + XGI_INFO("free xgi_pcie_heap: 0x%p \n", xgi_pcie_heap); + kfree(xgi_pcie_heap); + xgi_pcie_heap = NULL; + } + + if (xgi_pcie_cache_block) + { + kmem_cache_destroy(xgi_pcie_cache_block); + xgi_pcie_cache_block = NULL; + } +} + + +static xgi_pcie_block_t *xgi_pcie_mem_alloc(xgi_info_t *info, + unsigned long originalSize, + enum PcieOwner owner) +{ + struct list_head *free_list; + xgi_pcie_block_t *block, *used_block, *free_block; + xgi_page_block_t *page_block, *prev_page_block; + struct page *page; + unsigned long page_order = 0, count = 0, index =0; + unsigned long page_addr = 0; + unsigned long *lut_addr = NULL; + unsigned long lut_id = 0; + unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK; + int i, j, page_count = 0; + int temp = 0; + + XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-Begin\n"); + XGI_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n", originalSize, size); + + if (owner == PCIE_3D) + { + if (xgi_pcie_vertex_block) + { + XGI_INFO("PCIE Vertex has been created, return directly.\n"); + return xgi_pcie_vertex_block; + } + } + + if (owner == PCIE_3D_CMDLIST) + { + if (xgi_pcie_cmdlist_block) + { + XGI_INFO("PCIE Cmdlist has been created, return directly.\n"); + return xgi_pcie_cmdlist_block; + } + } + + if (owner == PCIE_3D_SCRATCHPAD) + { + if (xgi_pcie_scratchpad_block) + { + XGI_INFO("PCIE Scratchpad has been created, return directly.\n"); + return xgi_pcie_scratchpad_block; + } + } + + if (size == 0) + { + XGI_ERROR("size == 0 \n"); + return (NULL); + } + + XGI_INFO("max_freesize: 0x%lx \n", xgi_pcie_heap->max_freesize); + if (size > xgi_pcie_heap->max_freesize) + { + XGI_ERROR("size: 0x%lx bigger than PCIE total free size: 0x%lx.\n", + size, xgi_pcie_heap->max_freesize); + return (NULL); + } + + /* Jong 05/30/2006; find next free list which has enough space*/ + free_list = xgi_pcie_heap->free_list.next; + while (free_list != &xgi_pcie_heap->free_list) + { + //XGI_INFO("free_list: 0x%px \n", free_list); + block = list_entry(free_list, struct xgi_pcie_block_s, list); + if (size <= block->size) + { + break; + } + free_list = free_list->next; + } + + if (free_list == &xgi_pcie_heap->free_list) + { + XGI_ERROR("Can't allocate %ldk size from PCIE memory !\n", size/1024); + return (NULL); + } + + free_block = block; + XGI_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n", + size, free_block->offset, free_block->size); + + if (size == free_block->size) + { + used_block = free_block; + XGI_INFO("size==free_block->size: free_block = 0x%p\n", free_block); + list_del(&free_block->list); + } + else + { + used_block = xgi_pcie_new_node(); + if (used_block == NULL) + { + return NULL; + } + + if (used_block == free_block) + { + XGI_ERROR("used_block == free_block = 0x%p\n", used_block); + } + + used_block->offset = free_block->offset; + used_block->size = size; + + free_block->offset += size; + free_block->size -= size; + } + + xgi_pcie_heap->max_freesize -= size; + + used_block->bus_addr = info->pcie.base + used_block->offset; + used_block->hw_addr = info->pcie.base + used_block->offset; + used_block->page_count = page_count = size / PAGE_SIZE; + + /* get page_order base on page_count */ + for (used_block->page_order = 0; page_count; page_count >>= 1) + { + ++used_block->page_order; + } + + if ((used_block->page_count << 1) == (1 << used_block->page_order)) + { + used_block->page_order--; + } + XGI_INFO("used_block->offset: 0x%lx, used_block->size: 0x%lx, used_block->bus_addr: 0x%lx, used_block->hw_addr: 0x%lx, used_block->page_count: 0x%lx used_block->page_order: 0x%lx\n", + used_block->offset, used_block->size, used_block->bus_addr, used_block->hw_addr, used_block->page_count, used_block->page_order); + + used_block->page_block = NULL; + //used_block->page_block = (xgi_pages_block_t *)kmalloc(sizeof(xgi_pages_block_t), GFP_KERNEL); + //if (!used_block->page_block) return NULL; + //used_block->page_block->next = NULL; + + used_block->page_table = (xgi_pte_t *)kmalloc(sizeof(xgi_pte_t) * used_block->page_count, GFP_KERNEL); + if (used_block->page_table == NULL) + { + goto fail; + } + + lut_id = (used_block->offset >> PAGE_SHIFT); + lut_addr = info->lut_base; + lut_addr += lut_id; + XGI_INFO("lutAddr: 0x%p lutID: 0x%lx \n", lut_addr, lut_id); + + /* alloc free pages from system */ + page_count = used_block->page_count; + page_block = used_block->page_block; + prev_page_block = used_block->page_block; + for (i = 0; page_count > 0; i++) + { + /* if size is bigger than 2M bytes, it should be split */ + if (page_count > (1 << XGI_PCIE_ALLOC_MAX_ORDER)) + { + page_order = XGI_PCIE_ALLOC_MAX_ORDER; + } + else + { + count = page_count; + for (page_order = 0; count; count >>= 1, ++page_order); + + if ((page_count << 1) == (1 << page_order)) + { + page_order -= 1; + } + } + + count = (1 << page_order); + page_addr = __get_free_pages(GFP_KERNEL, page_order); + XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-page_addr=0x%lx \n", page_addr); + + if (!page_addr) + { + XGI_ERROR("No: %d :Can't get free pages: 0x%lx from system memory !\n", + i, count); + goto fail; + } + + /* Jong 05/30/2006; test */ + memset((unsigned char *)page_addr, 0xFF, PAGE_SIZE << page_order); + /* memset((unsigned char *)page_addr, 0, PAGE_SIZE << page_order); */ + + if (page_block == NULL) + { + page_block = (xgi_page_block_t *)kmalloc(sizeof(xgi_page_block_t), GFP_KERNEL); + if (!page_block) + { + XGI_ERROR("Can't get memory for page_block! \n"); + goto fail; + } + } + + if (prev_page_block == NULL) + { + used_block->page_block = page_block; + prev_page_block = page_block; + } + else + { + prev_page_block->next = page_block; + prev_page_block = page_block; + } + + page_block->next = NULL; + page_block->phys_addr = __pa(page_addr); + page_block->virt_addr = page_addr; + page_block->page_count = count; + page_block->page_order = page_order; + + XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-page_block->phys_addr=0x%lx \n", page_block->phys_addr); + XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-page_block->virt_addr=0x%lx \n", page_block->virt_addr); + + page = virt_to_page(page_addr); + + //XGI_INFO("No: %d page_order: 0x%lx page_count: 0x%x count: 0x%lx index: 0x%lx lut_addr: 0x%p" + // "page_block->phys_addr: 0x%lx page_block->virt_addr: 0x%lx \n", + // i, page_order, page_count, count, index, lut_addr, page_block->phys_addr, page_block->virt_addr); + + for (j = 0 ; j < count; j++, page++, lut_addr++) + { + used_block->page_table[index + j].phys_addr = __pa(page_address(page)); + used_block->page_table[index + j].virt_addr = (unsigned long)page_address(page); + + XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-used_block->page_table[index + j].phys_addr=0x%lx \n", used_block->page_table[index + j].phys_addr); + XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-used_block->page_table[index + j].virt_addr=0x%lx \n", used_block->page_table[index + j].virt_addr); + + *lut_addr = __pa(page_address(page)); + XGI_INC_PAGE_COUNT(page); + XGILockPage(page); + + if (temp) + { + XGI_INFO("__pa(page_address(page)): 0x%lx lutAddr: 0x%p lutAddr No: 0x%x = 0x%lx \n", + __pa(page_address(page)), lut_addr, j, *lut_addr); + temp--; + } + } + + page_block = page_block->next; + page_count -= count; + index += count; + temp = 0; + } + + used_block->owner = owner; + list_add(&used_block->list, &xgi_pcie_heap->used_list); + +#if defined(__i386__) || defined(__x86_64__) + asm volatile ( "wbinvd" ::: "memory" ); +#else + mb(); +#endif + + /* Flush GART Table */ + bWriteReg(0xB03F, 0x40); + bWriteReg(0xB03F, 0x00); + + if (owner == PCIE_3D) + { + xgi_pcie_vertex_block = used_block; + } + + if (owner == PCIE_3D_CMDLIST) + { + xgi_pcie_cmdlist_block = used_block; + } + + if (owner == PCIE_3D_SCRATCHPAD) + { + xgi_pcie_scratchpad_block = used_block; + } + + XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-End \n"); + return (used_block); + +fail: + xgi_pcie_block_stuff_free(used_block); + kmem_cache_free(xgi_pcie_cache_block, used_block); + return NULL; +} + +static xgi_pcie_block_t *xgi_pcie_mem_free(xgi_info_t *info, unsigned long offset) +{ + struct list_head *free_list, *used_list; + xgi_pcie_block_t *used_block, *block = NULL; + xgi_pcie_block_t *prev, *next; + unsigned long upper, lower; + + used_list = xgi_pcie_heap->used_list.next; + while (used_list != &xgi_pcie_heap->used_list) + { + block = list_entry(used_list, struct xgi_pcie_block_s, list); + if (block->offset == offset) + { + break; + } + used_list = used_list->next; + } + + if (used_list == &xgi_pcie_heap->used_list) + { + XGI_ERROR("can't find block: 0x%lx to free!\n", offset); + return (NULL); + } + + used_block = block; + XGI_INFO("used_block: 0x%p, offset = 0x%lx, size = 0x%lx, bus_addr = 0x%lx, hw_addr = 0x%lx\n", + used_block, used_block->offset, used_block->size, used_block->bus_addr, used_block->hw_addr); + + xgi_pcie_block_stuff_free(used_block); + + /* update xgi_pcie_heap */ + xgi_pcie_heap->max_freesize += used_block->size; + + prev = next = NULL; + upper = used_block->offset + used_block->size; + lower = used_block->offset; + + free_list = xgi_pcie_heap->free_list.next; + + while (free_list != &xgi_pcie_heap->free_list) + { + block = list_entry(free_list, struct xgi_pcie_block_s, list); + if (block->offset == upper) + { + next = block; + } + else if ((block->offset + block->size) == lower) + { + prev = block; + } + free_list = free_list->next; + } + + XGI_INFO("next = 0x%p, prev = 0x%p\n", next, prev); + list_del(&used_block->list); + + if (prev && next) + { + prev->size += (used_block->size + next->size); + list_del(&next->list); + XGI_INFO("free node 0x%p\n", next); + kmem_cache_free(xgi_pcie_cache_block, next); + kmem_cache_free(xgi_pcie_cache_block, used_block); + next = NULL; + used_block = NULL; + return (prev); + } + + if (prev) + { + prev->size += used_block->size; + XGI_INFO("free node 0x%p\n", used_block); + kmem_cache_free(xgi_pcie_cache_block, used_block); + used_block = NULL; + return (prev); + } + + if (next) + { + next->size += used_block->size; + next->offset = used_block->offset; + XGI_INFO("free node 0x%p\n", used_block); + kmem_cache_free(xgi_pcie_cache_block, used_block); + used_block = NULL; + return (next); + } + + used_block->bus_addr = 0; + used_block->hw_addr = 0; + used_block->page_count = 0; + used_block->page_order = 0; + list_add(&used_block->list, &xgi_pcie_heap->free_list); + XGI_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n", + used_block, used_block->offset, used_block->size); + return (used_block); +} + +void xgi_pcie_alloc(xgi_info_t *info, unsigned long size, + enum PcieOwner owner, xgi_mem_alloc_t *alloc) +{ + xgi_pcie_block_t *block; + xgi_mem_pid_t *mempid_block; + + xgi_down(info->pcie_sem); + block = xgi_pcie_mem_alloc(info, size, owner); + xgi_up(info->pcie_sem); + + if (block == NULL) + { + alloc->location = INVALID; + alloc->size = 0; + alloc->bus_addr = 0; + alloc->hw_addr = 0; + XGI_ERROR("PCIE RAM allocation failed\n"); + } + else + { + XGI_INFO("PCIE RAM allocation succeeded: offset = 0x%lx, bus_addr = 0x%lx\n", + block->offset, block->bus_addr); + alloc->location = NON_LOCAL; + alloc->size = block->size; + alloc->bus_addr = block->bus_addr; + alloc->hw_addr = block->hw_addr; + + /* + manage mempid, handle PCIE_3D, PCIE_3D_TEXTURE. + PCIE_3D request means a opengl process created. + PCIE_3D_TEXTURE request means texture cannot alloc from fb. + */ + if (owner == PCIE_3D || owner == PCIE_3D_TEXTURE) + { + mempid_block = kmalloc(sizeof(xgi_mem_pid_t), GFP_KERNEL); + if (!mempid_block) + XGI_ERROR("mempid_block alloc failed\n"); + mempid_block->location = NON_LOCAL; + if (owner == PCIE_3D) + mempid_block->bus_addr = 0xFFFFFFFF;/*xgi_pcie_vertex_block has the address*/ + else + mempid_block->bus_addr = alloc->bus_addr; + mempid_block->pid = alloc->pid; + + XGI_INFO("Memory ProcessID add one pcie block pid:%ld successfully! \n", mempid_block->pid); + list_add(&mempid_block->list, &xgi_mempid_list); + } + } +} + +void xgi_pcie_free(xgi_info_t *info, unsigned long bus_addr) +{ + xgi_pcie_block_t *block; + unsigned long offset = bus_addr - info->pcie.base; + xgi_mem_pid_t *mempid_block; + xgi_mem_pid_t *mempid_freeblock = NULL; + struct list_head *mempid_list; + char isvertex = 0; + int processcnt; + + if (xgi_pcie_vertex_block && xgi_pcie_vertex_block->bus_addr == bus_addr) + isvertex = 1; + + if (isvertex) + { + /*check is there any other process using vertex*/ + processcnt = 0; + mempid_list = xgi_mempid_list.next; + while (mempid_list != &xgi_mempid_list) + { + mempid_block = list_entry(mempid_list, struct xgi_mem_pid_s, list); + if (mempid_block->location == NON_LOCAL && mempid_block->bus_addr == 0xFFFFFFFF) + { + ++processcnt; + } + mempid_list = mempid_list->next; + } + if (processcnt > 1) + { + return; + } + } + + xgi_down(info->pcie_sem); + block = xgi_pcie_mem_free(info, offset); + xgi_up(info->pcie_sem); + + if (block == NULL) + { + XGI_ERROR("xgi_pcie_free() failed at base 0x%lx\n", offset); + } + + if (isvertex) + xgi_pcie_vertex_block = NULL; + + /* manage mempid */ + mempid_list = xgi_mempid_list.next; + while (mempid_list != &xgi_mempid_list) + { + mempid_block = list_entry(mempid_list, struct xgi_mem_pid_s, list); + if (mempid_block->location == NON_LOCAL && ((isvertex && mempid_block->bus_addr == 0xFFFFFFFF) || (!isvertex && mempid_block->bus_addr == bus_addr))) + { + mempid_freeblock = mempid_block; + break; + } + mempid_list = mempid_list->next; + } + if (mempid_freeblock) + { + list_del(&mempid_freeblock->list); + XGI_INFO("Memory ProcessID delete one pcie block pid:%ld successfully! \n", mempid_freeblock->pid); + kfree(mempid_freeblock); + } +} + +/* + * given a bus address, fid the pcie mem block + * uses the bus address as the key. + */ +void *xgi_find_pcie_block(xgi_info_t *info, unsigned long address) +{ + struct list_head *used_list; + xgi_pcie_block_t *block; + int i; + + used_list = xgi_pcie_heap->used_list.next; + + while (used_list != &xgi_pcie_heap->used_list) + { + block = list_entry(used_list, struct xgi_pcie_block_s, list); + + if (block->bus_addr == address) + { + return block; + } + + if (block->page_table) + { + for (i = 0; i < block->page_count; i++) + { + unsigned long offset = block->bus_addr; + if ( (address >= offset) && (address < (offset + PAGE_SIZE))) + { + return block; + } + } + } + used_list = used_list->next; + } + + XGI_ERROR("could not find map for vm 0x%lx\n", address); + + return NULL; +} + +/* + address -- GE HW address + return -- CPU virtual address + + assume the CPU VAddr is continuous in not the same block +*/ +void *xgi_find_pcie_virt(xgi_info_t *info, unsigned long address) +{ + struct list_head *used_list; + xgi_pcie_block_t *block; + unsigned long offset_in_page; + unsigned long loc_in_pagetable; + void * ret; + + XGI_INFO("Jong_05292006-xgi_find_pcie_virt-Begin\n"); + + used_list = xgi_pcie_heap->used_list.next; + XGI_INFO("Jong_05292006-used_list=%ul\n", used_list); + + offset_in_page = address & (PAGE_SIZE-1); + XGI_INFO("Jong_05292006-address=0x%px, PAGE_SIZE-1=%ul, offset_in_page=%ul\n", address, PAGE_SIZE-1, offset_in_page); + + while (used_list != &xgi_pcie_heap->used_list) + { + block = list_entry(used_list, struct xgi_pcie_block_s, list); + XGI_INFO("Jong_05292006-block=0x%px\n", block); + XGI_INFO("Jong_05292006-block->hw_addr=0x%px\n", block->hw_addr); + XGI_INFO("Jong_05292006- block->size=%ul\n", block->size); + + if ((address >= block->hw_addr) && (address < (block->hw_addr + block->size))) + { + loc_in_pagetable = (address - block->hw_addr) >> PAGE_SHIFT; + ret = (void*)(block->page_table[loc_in_pagetable].virt_addr + offset_in_page); + + XGI_INFO("Jong_05292006-PAGE_SHIFT=%d\n", PAGE_SHIFT); + XGI_INFO("Jong_05292006-loc_in_pagetable=0x%px\n", loc_in_pagetable); + XGI_INFO("Jong_05292006-block->page_table[loc_in_pagetable].virt_addr=0x%px\n", block->page_table[loc_in_pagetable].virt_addr); + XGI_INFO("Jong_05292006-offset_in_page=%d\n", offset_in_page); + XGI_INFO("Jong_05292006-return(virt_addr)=0x%px\n", ret); + + return ret ; + } + else + { + XGI_INFO("Jong_05292006-used_list = used_list->next;\n"); + used_list = used_list->next; + } + } + + XGI_ERROR("could not find map for vm 0x%lx\n", address); + return NULL; +} + + +void xgi_read_pcie_mem(xgi_info_t *info, xgi_mem_req_t *req) +{ + +} + +void xgi_write_pcie_mem(xgi_info_t *info, xgi_mem_req_t *req) +{ +} + +/* + address -- GE hw address +*/ +void xgi_test_rwinkernel(xgi_info_t *info, unsigned long address) +{ + unsigned long * virtaddr = 0; + if (address == 0) + { + XGI_INFO("[Jong-kd] input GE HW addr is 0x00000000\n"); + return; + } + + virtaddr = (unsigned long *) xgi_find_pcie_virt(info, address); + + XGI_INFO("[Jong-kd] input GE HW addr is 0x%lx\n", address); + XGI_INFO("[Jong-kd] convert to CPU virt addr 0x%px\n", virtaddr); + XGI_INFO("[Jong-kd] origin [virtaddr] = 0x%lx\n", *virtaddr); + if (virtaddr != NULL) + { + *virtaddr = 0x00f00fff; + } + + XGI_INFO("[Jong-kd] modified [virtaddr] = 0x%lx\n", *virtaddr); +} + diff --git a/linux-core/xgi_pcie.h b/linux-core/xgi_pcie.h new file mode 100644 index 00000000..cd5f85b8 --- /dev/null +++ b/linux-core/xgi_pcie.h @@ -0,0 +1,73 @@ + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#ifndef _XGI_PCIE_H_ +#define _XGI_PCIE_H_ + +#ifndef XGI_PCIE_ALLOC_MAX_ORDER +#define XGI_PCIE_ALLOC_MAX_ORDER 1 /* 8K in Kernel 2.4.* */ +#endif + +typedef struct xgi_page_block_s { + struct xgi_page_block_s *next; + unsigned long phys_addr; + unsigned long virt_addr; + unsigned long page_count; + unsigned long page_order; +} xgi_page_block_t; + +typedef struct xgi_pcie_block_s { + struct list_head list; + unsigned long offset; /* block's offset in pcie memory, begin from 0 */ + unsigned long size; /* The block size. */ + unsigned long bus_addr; /* CPU access address/bus address */ + unsigned long hw_addr; /* GE access address */ + + unsigned long page_count; + unsigned long page_order; + xgi_page_block_t *page_block; + xgi_pte_t *page_table; /* list of physical pages allocated */ + + atomic_t use_count; + enum PcieOwner owner; + unsigned long processID; +} xgi_pcie_block_t; + +typedef struct xgi_pcie_list_s { + xgi_pcie_block_t *head; + xgi_pcie_block_t *tail; +} xgi_pcie_list_t; + +typedef struct xgi_pcie_heap_s { + struct list_head free_list; + struct list_head used_list; + struct list_head sort_list; + unsigned long max_freesize; +} xgi_pcie_heap_t; + +#endif diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h new file mode 100644 index 00000000..18448139 --- /dev/null +++ b/linux-core/xgi_regs.h @@ -0,0 +1,410 @@ + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + + +#ifndef _XGI_REGS_H_ +#define _XGI_REGS_H_ + +#ifndef XGI_MMIO + #define XGI_MMIO 1 +#endif + +#if XGI_MMIO +#define OUTB(port, value) writeb(value, info->mmio.vbase + port) +#define INB(port) readb(info->mmio.vbase + port) +#define OUTW(port, value) writew(value, info->mmio.vbase + port) +#define INW(port) readw(info->mmio.vbase + port) +#define OUTDW(port, value) writel(value, info->mmio.vbase + port) +#define INDW(port) readl(info->mmio.vbase + port) +#else +#define OUTB(port, value) outb(value, port) +#define INB(port) inb(port) +#define OUTW(port, value) outw(value, port) +#define INW(port) inw(port) +#define OUTDW(port, value) outl(value, port) +#define INDW(port) inl(port) +#endif + +/* Hardware access functions */ +static inline void OUT3C5B(xgi_info_t *info, u8 index, u8 data) +{ + OUTB(0x3C4, index); + OUTB(0x3C5, data); +} + +static inline void OUT3X5B(xgi_info_t *info, u8 index, u8 data) +{ + OUTB(0x3D4, index); + OUTB(0x3D5, data); +} + +static inline void OUT3CFB(xgi_info_t *info, u8 index, u8 data) +{ + OUTB(0x3CE, index); + OUTB(0x3CF, data); +} + +static inline u8 IN3C5B(xgi_info_t *info, u8 index) +{ + volatile u8 data=0; + OUTB(0x3C4, index); + data = INB(0x3C5); + return data; +} + +static inline u8 IN3X5B(xgi_info_t *info, u8 index) +{ + volatile u8 data=0; + OUTB(0x3D4, index); + data = INB(0x3D5); + return data; +} + +static inline u8 IN3CFB(xgi_info_t *info, u8 index) +{ + volatile u8 data=0; + OUTB(0x3CE, index); + data = INB(0x3CF); + return data; +} + +static inline void OUT3C5W(xgi_info_t *info, u8 index, u16 data) +{ + OUTB(0x3C4, index); + OUTB(0x3C5, data); +} + +static inline void OUT3X5W(xgi_info_t *info, u8 index, u16 data) +{ + OUTB(0x3D4, index); + OUTB(0x3D5, data); +} + +static inline void OUT3CFW(xgi_info_t *info, u8 index, u8 data) +{ + OUTB(0x3CE, index); + OUTB(0x3CF, data); +} + +static inline u8 IN3C5W(xgi_info_t *info, u8 index) +{ + volatile u8 data=0; + OUTB(0x3C4, index); + data = INB(0x3C5); + return data; +} + +static inline u8 IN3X5W(xgi_info_t *info, u8 index) +{ + volatile u8 data=0; + OUTB(0x3D4, index); + data = INB(0x3D5); + return data; +} + +static inline u8 IN3CFW(xgi_info_t *info, u8 index) +{ + volatile u8 data=0; + OUTB(0x3CE, index); + data = INB(0x3CF); + return data; +} + +static inline u8 readAttr(xgi_info_t *info, u8 index) +{ + INB(0x3DA); /* flip-flop to index */ + OUTB(0x3C0, index); + return INB(0x3C1); +} + +static inline void writeAttr(xgi_info_t *info, u8 index, u8 value) +{ + INB(0x3DA); /* flip-flop to index */ + OUTB(0x3C0, index); + OUTB(0x3C0, value); +} + +/* + * Graphic engine register (2d/3d) acessing interface + */ +static inline void WriteRegDWord(xgi_info_t *info, u32 addr, u32 data) +{ + /* Jong 05/25/2006 */ + XGI_INFO("Jong-WriteRegDWord()-Begin \n"); + XGI_INFO("Jong-WriteRegDWord()-info->mmio.vbase=0x%lx \n", info->mmio.vbase); + XGI_INFO("Jong-WriteRegDWord()-addr=0x%lx \n", addr); + XGI_INFO("Jong-WriteRegDWord()-data=0x%lx \n", data); + /* return; */ + + *(volatile u32*)(info->mmio.vbase + addr) = (data); + XGI_INFO("Jong-WriteRegDWord()-End \n"); +} + +static inline void WriteRegWord(xgi_info_t *info, u32 addr, u16 data) +{ + *(volatile u16*)(info->mmio.vbase + addr) = (data); +} + +static inline void WriteRegByte(xgi_info_t *info, u32 addr, u8 data) +{ + *(volatile u8*)(info->mmio.vbase + addr) = (data); +} + +static inline u32 ReadRegDWord(xgi_info_t *info, u32 addr) +{ + volatile u32 data; + data = *(volatile u32*)(info->mmio.vbase + addr); + return data; +} + +static inline u16 ReadRegWord(xgi_info_t *info, u32 addr) +{ + volatile u16 data; + data = *(volatile u16*)(info->mmio.vbase + addr); + return data; +} + +static inline u8 ReadRegByte(xgi_info_t *info, u32 addr) +{ + volatile u8 data; + data = *(volatile u8*)(info->mmio.vbase + addr); + return data; +} +#if 0 +extern void OUT3C5B(xgi_info_t *info, u8 index, u8 data); +extern void OUT3X5B(xgi_info_t *info, u8 index, u8 data); +extern void OUT3CFB(xgi_info_t *info, u8 index, u8 data); +extern u8 IN3C5B(xgi_info_t *info, u8 index); +extern u8 IN3X5B(xgi_info_t *info, u8 index); +extern u8 IN3CFB(xgi_info_t *info, u8 index); +extern void OUT3C5W(xgi_info_t *info, u8 index, u8 data); +extern void OUT3X5W(xgi_info_t *info, u8 index, u8 data); +extern void OUT3CFW(xgi_info_t *info, u8 index, u8 data); +extern u8 IN3C5W(xgi_info_t *info, u8 index); +extern u8 IN3X5W(xgi_info_t *info, u8 index); +extern u8 IN3CFW(xgi_info_t *info, u8 index); + +extern void WriteRegDWord(xgi_info_t *info, u32 addr, u32 data); +extern void WriteRegWord(xgi_info_t *info, u32 addr, u16 data); +extern void WriteRegByte(xgi_info_t *info, u32 addr, u8 data); +extern u32 ReadRegDWord(xgi_info_t *info, u32 addr); +extern u16 ReadRegWord(xgi_info_t *info, u32 addr); +extern u8 ReadRegByte(xgi_info_t *info, u32 addr); + +extern void EnableProtect(); +extern void DisableProtect(); +#endif + +#define Out(port, data) OUTB(port, data) +#define bOut(port, data) OUTB(port, data) +#define wOut(port, data) OUTW(port, data) +#define dwOut(port, data) OUTDW(port, data) + +#define Out3x5(index, data) OUT3X5B(info, index, data) +#define bOut3x5(index, data) OUT3X5B(info, index, data) +#define wOut3x5(index, data) OUT3X5W(info, index, data) + +#define Out3c5(index, data) OUT3C5B(info, index, data) +#define bOut3c5(index, data) OUT3C5B(info, index, data) +#define wOut3c5(index, data) OUT3C5W(info, index, data) + +#define Out3cf(index, data) OUT3CFB(info, index, data) +#define bOut3cf(index, data) OUT3CFB(info, index, data) +#define wOut3cf(index, data) OUT3CFW(info, index, data) + +#define In(port) INB(port) +#define bIn(port) INB(port) +#define wIn(port) INW(port) +#define dwIn(port) INDW(port) + +#define In3x5(index) IN3X5B(info, index) +#define bIn3x5(index) IN3X5B(info, index) +#define wIn3x5(index) IN3X5W(info, index) + +#define In3c5(index) IN3C5B(info, index) +#define bIn3c5(index) IN3C5B(info, index) +#define wIn3c5(index) IN3C5W(info, index) + +#define In3cf(index) IN3CFB(info, index) +#define bIn3cf(index) IN3CFB(info, index) +#define wIn3cf(index) IN3CFW(info, index) + +#define dwWriteReg(addr, data) WriteRegDWord(info, addr, data) +#define wWriteReg(addr, data) WriteRegWord(info, addr, data) +#define bWriteReg(addr, data) WriteRegByte(info, addr, data) +#define dwReadReg(addr) ReadRegDWord(info, addr) +#define wReadReg(addr) ReadRegWord(info, addr) +#define bReadReg(addr) ReadRegByte(info, addr) + +static inline void xgi_protect_all(xgi_info_t *info) +{ + OUTB(0x3C4, 0x11); + OUTB(0x3C5, 0x92); +} + +static inline void xgi_unprotect_all(xgi_info_t *info) +{ + OUTB(0x3C4, 0x11); + OUTB(0x3C5, 0x92); +} + +static inline void xgi_enable_mmio(xgi_info_t *info) +{ + u8 protect = 0; + + /* Unprotect registers */ + outb(0x11, 0x3C4); + protect = inb(0x3C5); + outb(0x92, 0x3C5); + + outb(0x3A, 0x3D4); + outb(inb(0x3D5) | 0x20, 0x3D5); + + /* Enable MMIO */ + outb(0x39, 0x3D4); + outb(inb(0x3D5) | 0x01, 0x3D5); + + OUTB(0x3C4, 0x11); + OUTB(0x3C5, protect); +} + +static inline void xgi_disable_mmio(xgi_info_t *info) +{ + u8 protect = 0; + + /* unprotect registers */ + OUTB(0x3C4, 0x11); + protect = INB(0x3C5); + OUTB(0x3C5, 0x92); + + /* Disable MMIO access */ + OUTB(0x3D4, 0x39); + OUTB(0x3D5, INB(0x3D5) & 0xFE); + + /* Protect registers */ + outb(0x11, 0x3C4); + outb(protect, 0x3C5); +} + +static inline void xgi_enable_ge(xgi_info_t *info) +{ + unsigned char bOld3cf2a = 0; + int wait = 0; + + // Enable GE + OUTW(0x3C4, 0x9211); + + // Save and close dynamic gating + bOld3cf2a = bIn3cf(0x2a); + bOut3cf(0x2a, bOld3cf2a & 0xfe); + + // Reset both 3D and 2D engine + bOut3x5(0x36, 0x84); + wait = 10; + while (wait--) + { + bIn(0x36); + } + bOut3x5(0x36, 0x94); + wait = 10; + while (wait--) + { + bIn(0x36); + } + bOut3x5(0x36, 0x84); + wait = 10; + while (wait--) + { + bIn(0x36); + } + // Enable 2D engine only + bOut3x5(0x36, 0x80); + + // Enable 2D+3D engine + bOut3x5(0x36, 0x84); + + // Restore dynamic gating + bOut3cf(0x2a, bOld3cf2a); +} + +static inline void xgi_disable_ge(xgi_info_t *info) +{ + int wait = 0; + + // Reset both 3D and 2D engine + bOut3x5(0x36, 0x84); + + wait = 10; + while (wait--) + { + bIn(0x36); + } + bOut3x5(0x36, 0x94); + + wait = 10; + while (wait--) + { + bIn(0x36); + } + bOut3x5(0x36, 0x84); + + wait = 10; + while (wait--) + { + bIn(0x36); + } + + // Disable 2D engine only + bOut3x5(0x36, 0); +} + +static inline void xgi_enable_dvi_interrupt(xgi_info_t *info) +{ + Out3cf(0x39, In3cf(0x39) & ~0x01); //Set 3cf.39 bit 0 to 0 + Out3cf(0x39, In3cf(0x39) | 0x01); //Set 3cf.39 bit 0 to 1 + Out3cf(0x39, In3cf(0x39) | 0x02); +} +static inline void xgi_disable_dvi_interrupt(xgi_info_t *info) +{ + Out3cf(0x39,In3cf(0x39) & ~0x02); +} + +static inline void xgi_enable_crt1_interrupt(xgi_info_t *info) +{ + Out3cf(0x3d,In3cf(0x3d) | 0x04); + Out3cf(0x3d,In3cf(0x3d) & ~0x04); + Out3cf(0x3d,In3cf(0x3d) | 0x08); +} + +static inline void xgi_disable_crt1_interrupt(xgi_info_t *info) +{ + Out3cf(0x3d,In3cf(0x3d) & ~0x08); +} + +#endif + diff --git a/linux-core/xgi_types.h b/linux-core/xgi_types.h new file mode 100644 index 00000000..24cb8f3c --- /dev/null +++ b/linux-core/xgi_types.h @@ -0,0 +1,68 @@ + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#ifndef _XGI_TYPES_H_ +#define _XGI_TYPES_H_ + +/**************************************************************************** + * Typedefs * + ***************************************************************************/ + +typedef unsigned char V8; /* "void": enumerated or multiple fields */ +typedef unsigned short V16; /* "void": enumerated or multiple fields */ +typedef unsigned char U8; /* 0 to 255 */ +typedef unsigned short U16; /* 0 to 65535 */ +typedef signed char S8; /* -128 to 127 */ +typedef signed short S16; /* -32768 to 32767 */ +typedef float F32; /* IEEE Single Precision (S1E8M23) */ +typedef double F64; /* IEEE Double Precision (S1E11M52) */ +typedef unsigned long BOOL; +/* + * mainly for 64-bit linux, where long is 64 bits + * and win9x, where int is 16 bit. + */ +#if defined(vxworks) +typedef unsigned int V32; /* "void": enumerated or multiple fields */ +typedef unsigned int U32; /* 0 to 4294967295 */ +typedef signed int S32; /* -2147483648 to 2147483647 */ +#else +typedef unsigned long V32; /* "void": enumerated or multiple fields */ +typedef unsigned long U32; /* 0 to 4294967295 */ +typedef signed long S32; /* -2147483648 to 2147483647 */ +#endif + +#ifndef TRUE +#define TRUE 1UL +#endif + +#ifndef FALSE +#define FALSE 0UL +#endif + +#endif + -- cgit v1.2.3 From 434657a2582362367ba2a94f827511252001368f Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 26 Jun 2007 13:10:30 -0700 Subject: dos2unix and Lindent --- linux-core/xgi_cmdlist.c | 705 +++++----- linux-core/xgi_cmdlist.h | 155 ++- linux-core/xgi_drv.c | 3174 +++++++++++++++++++++++----------------------- linux-core/xgi_drv.h | 728 +++++------ linux-core/xgi_fb.c | 1019 +++++++-------- linux-core/xgi_fb.h | 141 +- linux-core/xgi_linux.h | 1187 +++++++++-------- linux-core/xgi_misc.c | 1287 +++++++++---------- linux-core/xgi_misc.h | 96 +- linux-core/xgi_pcie.c | 2091 +++++++++++++++--------------- linux-core/xgi_pcie.h | 146 +-- linux-core/xgi_regs.h | 814 ++++++------ linux-core/xgi_types.h | 135 +- 13 files changed, 5765 insertions(+), 5913 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 024b021c..e00ea228 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -1,348 +1,357 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ - - -#include "xgi_types.h" -#include "xgi_linux.h" -#include "xgi_drv.h" -#include "xgi_regs.h" -#include "xgi_misc.h" -#include "xgi_cmdlist.h" - - - -U32 s_emptyBegin[AGPCMDLIST_BEGIN_SIZE] = -{ - 0x10000000, // 3D Type Begin, Invalid - 0x80000004, // Length = 4; - 0x00000000, - 0x00000000 -}; - -U32 s_flush2D[AGPCMDLIST_FLUSH_CMD_LEN] = -{ - FLUSH_2D, - FLUSH_2D, - FLUSH_2D, - FLUSH_2D -}; - -xgi_cmdring_info_t s_cmdring; - -static void addFlush2D(xgi_info_t *info); -static U32 getCurBatchBeginPort(xgi_cmd_info_t *pCmdInfo); -static void triggerHWCommandList(xgi_info_t *info, U32 triggerCounter); -static void xgi_cmdlist_reset(void); - -int xgi_cmdlist_initialize(xgi_info_t *info, U32 size) -{ - //xgi_mem_req_t mem_req; - xgi_mem_alloc_t mem_alloc; - - //mem_req.size = size; - - xgi_pcie_alloc(info, size, PCIE_2D, &mem_alloc); - - if ((mem_alloc.size == 0) && (mem_alloc.hw_addr == 0)) - { - return -1; - } - - s_cmdring._cmdRingSize = mem_alloc.size; - s_cmdring._cmdRingBuffer = mem_alloc.hw_addr; - s_cmdring._cmdRingBusAddr = mem_alloc.bus_addr; - s_cmdring._lastBatchStartAddr = 0; - s_cmdring._cmdRingOffset = 0; - - return 1; -} - -void xgi_submit_cmdlist(xgi_info_t *info, xgi_cmd_info_t *pCmdInfo) -{ - U32 beginPort; - /** XGI_INFO("Jong-xgi_submit_cmdlist-Begin \n"); **/ - - /* Jong 05/25/2006 */ - /* return; */ - - beginPort = getCurBatchBeginPort(pCmdInfo); - XGI_INFO("Jong-xgi_submit_cmdlist-After getCurBatchBeginPort() \n"); - - /* Jong 05/25/2006 */ - /* return; */ - - if (s_cmdring._lastBatchStartAddr == 0) - { - U32 portOffset; - - /* Jong 06/13/2006; remove marked for system hang test */ - /* xgi_waitfor_pci_idle(info); */ - - /* Jong 06132006; BASE_3D_ENG=0x2800 */ - /* beginPort: 2D: 0x30 */ - portOffset = BASE_3D_ENG + beginPort; - - // Enable PCI Trigger Mode - XGI_INFO("Jong-xgi_submit_cmdlist-Enable PCI Trigger Mode \n"); - - /* Jong 05/25/2006 */ - /* return; */ - - /* Jong 06/13/2006; M2REG_AUTO_LINK_SETTING_ADDRESS=0x10 */ - XGI_INFO("Jong-M2REG_AUTO_LINK_SETTING_ADDRESS=0x%lx \n", M2REG_AUTO_LINK_SETTING_ADDRESS); - XGI_INFO("Jong-M2REG_CLEAR_COUNTERS_MASK=0x%lx \n", M2REG_CLEAR_COUNTERS_MASK); - XGI_INFO("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)=0x%lx \n", (M2REG_AUTO_LINK_SETTING_ADDRESS << 22)); - XGI_INFO("Jong-M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n\n", M2REG_PCI_TRIGGER_MODE_MASK); - - /* Jong 06/14/2006; 0x400001a */ - XGI_INFO("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|M2REG_CLEAR_COUNTERS_MASK|0x08|M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n", - (M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|M2REG_CLEAR_COUNTERS_MASK|0x08|M2REG_PCI_TRIGGER_MODE_MASK); - dwWriteReg(BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, - (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | - M2REG_CLEAR_COUNTERS_MASK | - 0x08 | - M2REG_PCI_TRIGGER_MODE_MASK); - - /* Jong 05/25/2006 */ - XGI_INFO("Jong-xgi_submit_cmdlist-After dwWriteReg() \n"); - /* return; */ /* OK */ - - /* Jong 06/14/2006; 0x400000a */ - XGI_INFO("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|0x08|M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n", - (M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|0x08|M2REG_PCI_TRIGGER_MODE_MASK); - dwWriteReg(BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, - (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | - 0x08 | - M2REG_PCI_TRIGGER_MODE_MASK); - - // Send PCI begin command - XGI_INFO("Jong-xgi_submit_cmdlist-Send PCI begin command \n"); - /* return; */ - - XGI_INFO("Jong-xgi_submit_cmdlist-portOffset=%d \n", portOffset); - XGI_INFO("Jong-xgi_submit_cmdlist-beginPort=%d \n", beginPort); - - /* beginPort = 48; */ - /* 0xc100000 */ - dwWriteReg(portOffset, (beginPort<<22) + (BEGIN_VALID_MASK) + pCmdInfo->_curDebugID); - XGI_INFO("Jong-(beginPort<<22)=0x%lx \n", (beginPort<<22)); - XGI_INFO("Jong-(BEGIN_VALID_MASK)=0x%lx \n", BEGIN_VALID_MASK); - XGI_INFO("Jong- pCmdInfo->_curDebugID=0x%lx \n", pCmdInfo->_curDebugID); - XGI_INFO("Jong- (beginPort<<22) + (BEGIN_VALID_MASK) + pCmdInfo->_curDebugID=0x%lx \n", (beginPort<<22) + (BEGIN_VALID_MASK) + pCmdInfo->_curDebugID); - XGI_INFO("Jong-xgi_submit_cmdlist-Send PCI begin command- After \n"); - /* return; */ /* OK */ - - /* 0x80000024 */ - dwWriteReg(portOffset+4, BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize); - XGI_INFO("Jong- BEGIN_LINK_ENABLE_MASK=0x%lx \n", BEGIN_LINK_ENABLE_MASK); - XGI_INFO("Jong- pCmdInfo->_firstSize=0x%lx \n", pCmdInfo->_firstSize); - XGI_INFO("Jong- BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize=0x%lx \n", BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize); - XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-1 \n"); - - /* 0x1010000 */ - dwWriteReg(portOffset+8, (pCmdInfo->_firstBeginAddr >> 4)); - XGI_INFO("Jong- pCmdInfo->_firstBeginAddr=0x%lx \n", pCmdInfo->_firstBeginAddr); - XGI_INFO("Jong- (pCmdInfo->_firstBeginAddr >> 4)=0x%lx \n", (pCmdInfo->_firstBeginAddr >> 4)); - XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-2 \n"); - - /* Jong 06/13/2006 */ - xgi_dump_register(info); - - /* Jong 06/12/2006; system hang; marked for test */ - dwWriteReg(portOffset+12, 0); - XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-3 \n"); - - /* Jong 06/13/2006; remove marked for system hang test */ - /* xgi_waitfor_pci_idle(info); */ - } - else - { - XGI_INFO("Jong-xgi_submit_cmdlist-s_cmdring._lastBatchStartAddr != 0 \n"); - U32 *lastBatchVirtAddr; - - /* Jong 05/25/2006 */ - /* return; */ - - if (pCmdInfo->_firstBeginType == BTYPE_3D) - { - addFlush2D(info); - } - - lastBatchVirtAddr = (U32*) xgi_find_pcie_virt(info, s_cmdring._lastBatchStartAddr); - - lastBatchVirtAddr[1] = BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize; - lastBatchVirtAddr[2] = pCmdInfo->_firstBeginAddr >> 4; - lastBatchVirtAddr[3] = 0; - //barrier(); - lastBatchVirtAddr[0] = (beginPort<<22) + (BEGIN_VALID_MASK) + (0xffff & pCmdInfo->_curDebugID); - - /* Jong 06/12/2006; system hang; marked for test */ - triggerHWCommandList(info, pCmdInfo->_beginCount); - - XGI_INFO("Jong-xgi_submit_cmdlist-s_cmdring._lastBatchStartAddr != 0 - End\n"); - } - - s_cmdring._lastBatchStartAddr = pCmdInfo->_lastBeginAddr; - XGI_INFO("Jong-xgi_submit_cmdlist-End \n"); -} - - -/* - state: 0 - console - 1 - graphic - 2 - fb - 3 - logout -*/ -void xgi_state_change(xgi_info_t *info, xgi_state_info_t *pStateInfo) -{ -#define STATE_CONSOLE 0 -#define STATE_GRAPHIC 1 -#define STATE_FBTERM 2 -#define STATE_LOGOUT 3 -#define STATE_REBOOT 4 -#define STATE_SHUTDOWN 5 - - if ((pStateInfo->_fromState == STATE_GRAPHIC) - && (pStateInfo->_toState == STATE_CONSOLE)) - { - XGI_INFO("[kd] I see, now is to leaveVT\n"); - // stop to received batch - } - else if ((pStateInfo->_fromState == STATE_CONSOLE) - && (pStateInfo->_toState == STATE_GRAPHIC)) - { - XGI_INFO("[kd] I see, now is to enterVT\n"); - xgi_cmdlist_reset(); - } - else if ((pStateInfo->_fromState == STATE_GRAPHIC) - && ( (pStateInfo->_toState == STATE_LOGOUT) - ||(pStateInfo->_toState == STATE_REBOOT) - ||(pStateInfo->_toState == STATE_SHUTDOWN))) - { - XGI_INFO("[kd] I see, not is to exit from X\n"); - // stop to received batch - } - else - { - XGI_ERROR("[kd] Should not happen\n"); - } - -} - -void xgi_cmdlist_reset(void) -{ - s_cmdring._lastBatchStartAddr = 0; - s_cmdring._cmdRingOffset = 0; -} - -void xgi_cmdlist_cleanup(xgi_info_t *info) -{ - if (s_cmdring._cmdRingBuffer != 0) - { - xgi_pcie_free(info, s_cmdring._cmdRingBusAddr); - s_cmdring._cmdRingBuffer = 0; - s_cmdring._cmdRingOffset = 0; - s_cmdring._cmdRingSize = 0; - } -} - -static void triggerHWCommandList(xgi_info_t *info, U32 triggerCounter) -{ - static U32 s_triggerID = 1; - - //Fix me, currently we just trigger one time - while (triggerCounter--) - { - dwWriteReg(BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS, - 0x05000000 + (0xffff & s_triggerID++)); - // xgi_waitfor_pci_idle(info); - } -} - -static U32 getCurBatchBeginPort(xgi_cmd_info_t *pCmdInfo) -{ - // Convert the batch type to begin port ID - switch(pCmdInfo->_firstBeginType) - { - case BTYPE_2D: - return 0x30; - case BTYPE_3D: - return 0x40; - case BTYPE_FLIP: - return 0x50; - case BTYPE_CTRL: - return 0x20; - default: - //ASSERT(0); - return 0xff; - } -} - -static void addFlush2D(xgi_info_t *info) -{ - U32 *flushBatchVirtAddr; - U32 flushBatchHWAddr; - - U32 *lastBatchVirtAddr; - - /* check buf is large enough to contain a new flush batch */ - if ((s_cmdring._cmdRingOffset + 0x20) >= s_cmdring._cmdRingSize) - { - s_cmdring._cmdRingOffset = 0; - } - - flushBatchHWAddr = s_cmdring._cmdRingBuffer + s_cmdring._cmdRingOffset; - flushBatchVirtAddr = (U32*) xgi_find_pcie_virt(info, flushBatchHWAddr); - - /* not using memcpy for I assume the address is discrete */ - *(flushBatchVirtAddr + 0) = 0x10000000; - *(flushBatchVirtAddr + 1) = 0x80000004; /* size = 0x04 dwords */ - *(flushBatchVirtAddr + 2) = 0x00000000; - *(flushBatchVirtAddr + 3) = 0x00000000; - *(flushBatchVirtAddr + 4) = FLUSH_2D; - *(flushBatchVirtAddr + 5) = FLUSH_2D; - *(flushBatchVirtAddr + 6) = FLUSH_2D; - *(flushBatchVirtAddr + 7) = FLUSH_2D; - - // ASSERT(s_cmdring._lastBatchStartAddr != NULL); - lastBatchVirtAddr = (U32*) xgi_find_pcie_virt(info, s_cmdring._lastBatchStartAddr); - - lastBatchVirtAddr[1] = BEGIN_LINK_ENABLE_MASK + 0x08; - lastBatchVirtAddr[2] = flushBatchHWAddr >> 4; - lastBatchVirtAddr[3] = 0; - - //barrier(); - - // BTYPE_CTRL & NO debugID - lastBatchVirtAddr[0] = (0x20<<22) + (BEGIN_VALID_MASK); - - triggerHWCommandList(info, 1); - - s_cmdring._cmdRingOffset += 0x20; - s_cmdring._lastBatchStartAddr = flushBatchHWAddr; -} + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#include "xgi_types.h" +#include "xgi_linux.h" +#include "xgi_drv.h" +#include "xgi_regs.h" +#include "xgi_misc.h" +#include "xgi_cmdlist.h" + +U32 s_emptyBegin[AGPCMDLIST_BEGIN_SIZE] = { + 0x10000000, // 3D Type Begin, Invalid + 0x80000004, // Length = 4; + 0x00000000, + 0x00000000 +}; + +U32 s_flush2D[AGPCMDLIST_FLUSH_CMD_LEN] = { + FLUSH_2D, + FLUSH_2D, + FLUSH_2D, + FLUSH_2D +}; + +xgi_cmdring_info_t s_cmdring; + +static void addFlush2D(xgi_info_t * info); +static U32 getCurBatchBeginPort(xgi_cmd_info_t * pCmdInfo); +static void triggerHWCommandList(xgi_info_t * info, U32 triggerCounter); +static void xgi_cmdlist_reset(void); + +int xgi_cmdlist_initialize(xgi_info_t * info, U32 size) +{ + //xgi_mem_req_t mem_req; + xgi_mem_alloc_t mem_alloc; + + //mem_req.size = size; + + xgi_pcie_alloc(info, size, PCIE_2D, &mem_alloc); + + if ((mem_alloc.size == 0) && (mem_alloc.hw_addr == 0)) { + return -1; + } + + s_cmdring._cmdRingSize = mem_alloc.size; + s_cmdring._cmdRingBuffer = mem_alloc.hw_addr; + s_cmdring._cmdRingBusAddr = mem_alloc.bus_addr; + s_cmdring._lastBatchStartAddr = 0; + s_cmdring._cmdRingOffset = 0; + + return 1; +} + +void xgi_submit_cmdlist(xgi_info_t * info, xgi_cmd_info_t * pCmdInfo) +{ + U32 beginPort; + /** XGI_INFO("Jong-xgi_submit_cmdlist-Begin \n"); **/ + + /* Jong 05/25/2006 */ + /* return; */ + + beginPort = getCurBatchBeginPort(pCmdInfo); + XGI_INFO("Jong-xgi_submit_cmdlist-After getCurBatchBeginPort() \n"); + + /* Jong 05/25/2006 */ + /* return; */ + + if (s_cmdring._lastBatchStartAddr == 0) { + U32 portOffset; + + /* Jong 06/13/2006; remove marked for system hang test */ + /* xgi_waitfor_pci_idle(info); */ + + /* Jong 06132006; BASE_3D_ENG=0x2800 */ + /* beginPort: 2D: 0x30 */ + portOffset = BASE_3D_ENG + beginPort; + + // Enable PCI Trigger Mode + XGI_INFO("Jong-xgi_submit_cmdlist-Enable PCI Trigger Mode \n"); + + /* Jong 05/25/2006 */ + /* return; */ + + /* Jong 06/13/2006; M2REG_AUTO_LINK_SETTING_ADDRESS=0x10 */ + XGI_INFO("Jong-M2REG_AUTO_LINK_SETTING_ADDRESS=0x%lx \n", + M2REG_AUTO_LINK_SETTING_ADDRESS); + XGI_INFO("Jong-M2REG_CLEAR_COUNTERS_MASK=0x%lx \n", + M2REG_CLEAR_COUNTERS_MASK); + XGI_INFO + ("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)=0x%lx \n", + (M2REG_AUTO_LINK_SETTING_ADDRESS << 22)); + XGI_INFO("Jong-M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n\n", + M2REG_PCI_TRIGGER_MODE_MASK); + + /* Jong 06/14/2006; 0x400001a */ + XGI_INFO + ("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|M2REG_CLEAR_COUNTERS_MASK|0x08|M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n", + (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | + M2REG_CLEAR_COUNTERS_MASK | 0x08 | + M2REG_PCI_TRIGGER_MODE_MASK); + dwWriteReg(BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, + (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | + M2REG_CLEAR_COUNTERS_MASK | 0x08 | + M2REG_PCI_TRIGGER_MODE_MASK); + + /* Jong 05/25/2006 */ + XGI_INFO("Jong-xgi_submit_cmdlist-After dwWriteReg() \n"); + /* return; *//* OK */ + + /* Jong 06/14/2006; 0x400000a */ + XGI_INFO + ("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|0x08|M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n", + (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | 0x08 | + M2REG_PCI_TRIGGER_MODE_MASK); + dwWriteReg(BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, + (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | 0x08 | + M2REG_PCI_TRIGGER_MODE_MASK); + + // Send PCI begin command + XGI_INFO("Jong-xgi_submit_cmdlist-Send PCI begin command \n"); + /* return; */ + + XGI_INFO("Jong-xgi_submit_cmdlist-portOffset=%d \n", + portOffset); + XGI_INFO("Jong-xgi_submit_cmdlist-beginPort=%d \n", beginPort); + + /* beginPort = 48; */ + /* 0xc100000 */ + dwWriteReg(portOffset, + (beginPort << 22) + (BEGIN_VALID_MASK) + + pCmdInfo->_curDebugID); + XGI_INFO("Jong-(beginPort<<22)=0x%lx \n", (beginPort << 22)); + XGI_INFO("Jong-(BEGIN_VALID_MASK)=0x%lx \n", BEGIN_VALID_MASK); + XGI_INFO("Jong- pCmdInfo->_curDebugID=0x%lx \n", + pCmdInfo->_curDebugID); + XGI_INFO + ("Jong- (beginPort<<22) + (BEGIN_VALID_MASK) + pCmdInfo->_curDebugID=0x%lx \n", + (beginPort << 22) + (BEGIN_VALID_MASK) + + pCmdInfo->_curDebugID); + XGI_INFO + ("Jong-xgi_submit_cmdlist-Send PCI begin command- After \n"); + /* return; *//* OK */ + + /* 0x80000024 */ + dwWriteReg(portOffset + 4, + BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize); + XGI_INFO("Jong- BEGIN_LINK_ENABLE_MASK=0x%lx \n", + BEGIN_LINK_ENABLE_MASK); + XGI_INFO("Jong- pCmdInfo->_firstSize=0x%lx \n", + pCmdInfo->_firstSize); + XGI_INFO + ("Jong- BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize=0x%lx \n", + BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize); + XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-1 \n"); + + /* 0x1010000 */ + dwWriteReg(portOffset + 8, (pCmdInfo->_firstBeginAddr >> 4)); + XGI_INFO("Jong- pCmdInfo->_firstBeginAddr=0x%lx \n", + pCmdInfo->_firstBeginAddr); + XGI_INFO("Jong- (pCmdInfo->_firstBeginAddr >> 4)=0x%lx \n", + (pCmdInfo->_firstBeginAddr >> 4)); + XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-2 \n"); + + /* Jong 06/13/2006 */ + xgi_dump_register(info); + + /* Jong 06/12/2006; system hang; marked for test */ + dwWriteReg(portOffset + 12, 0); + XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-3 \n"); + + /* Jong 06/13/2006; remove marked for system hang test */ + /* xgi_waitfor_pci_idle(info); */ + } else { + XGI_INFO + ("Jong-xgi_submit_cmdlist-s_cmdring._lastBatchStartAddr != 0 \n"); + U32 *lastBatchVirtAddr; + + /* Jong 05/25/2006 */ + /* return; */ + + if (pCmdInfo->_firstBeginType == BTYPE_3D) { + addFlush2D(info); + } + + lastBatchVirtAddr = + (U32 *) xgi_find_pcie_virt(info, + s_cmdring._lastBatchStartAddr); + + lastBatchVirtAddr[1] = + BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize; + lastBatchVirtAddr[2] = pCmdInfo->_firstBeginAddr >> 4; + lastBatchVirtAddr[3] = 0; + //barrier(); + lastBatchVirtAddr[0] = + (beginPort << 22) + (BEGIN_VALID_MASK) + + (0xffff & pCmdInfo->_curDebugID); + + /* Jong 06/12/2006; system hang; marked for test */ + triggerHWCommandList(info, pCmdInfo->_beginCount); + + XGI_INFO + ("Jong-xgi_submit_cmdlist-s_cmdring._lastBatchStartAddr != 0 - End\n"); + } + + s_cmdring._lastBatchStartAddr = pCmdInfo->_lastBeginAddr; + XGI_INFO("Jong-xgi_submit_cmdlist-End \n"); +} + +/* + state: 0 - console + 1 - graphic + 2 - fb + 3 - logout +*/ +void xgi_state_change(xgi_info_t * info, xgi_state_info_t * pStateInfo) +{ +#define STATE_CONSOLE 0 +#define STATE_GRAPHIC 1 +#define STATE_FBTERM 2 +#define STATE_LOGOUT 3 +#define STATE_REBOOT 4 +#define STATE_SHUTDOWN 5 + + if ((pStateInfo->_fromState == STATE_GRAPHIC) + && (pStateInfo->_toState == STATE_CONSOLE)) { + XGI_INFO("[kd] I see, now is to leaveVT\n"); + // stop to received batch + } else if ((pStateInfo->_fromState == STATE_CONSOLE) + && (pStateInfo->_toState == STATE_GRAPHIC)) { + XGI_INFO("[kd] I see, now is to enterVT\n"); + xgi_cmdlist_reset(); + } else if ((pStateInfo->_fromState == STATE_GRAPHIC) + && ((pStateInfo->_toState == STATE_LOGOUT) + || (pStateInfo->_toState == STATE_REBOOT) + || (pStateInfo->_toState == STATE_SHUTDOWN))) { + XGI_INFO("[kd] I see, not is to exit from X\n"); + // stop to received batch + } else { + XGI_ERROR("[kd] Should not happen\n"); + } + +} + +void xgi_cmdlist_reset(void) +{ + s_cmdring._lastBatchStartAddr = 0; + s_cmdring._cmdRingOffset = 0; +} + +void xgi_cmdlist_cleanup(xgi_info_t * info) +{ + if (s_cmdring._cmdRingBuffer != 0) { + xgi_pcie_free(info, s_cmdring._cmdRingBusAddr); + s_cmdring._cmdRingBuffer = 0; + s_cmdring._cmdRingOffset = 0; + s_cmdring._cmdRingSize = 0; + } +} + +static void triggerHWCommandList(xgi_info_t * info, U32 triggerCounter) +{ + static U32 s_triggerID = 1; + + //Fix me, currently we just trigger one time + while (triggerCounter--) { + dwWriteReg(BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS, + 0x05000000 + (0xffff & s_triggerID++)); + // xgi_waitfor_pci_idle(info); + } +} + +static U32 getCurBatchBeginPort(xgi_cmd_info_t * pCmdInfo) +{ + // Convert the batch type to begin port ID + switch (pCmdInfo->_firstBeginType) { + case BTYPE_2D: + return 0x30; + case BTYPE_3D: + return 0x40; + case BTYPE_FLIP: + return 0x50; + case BTYPE_CTRL: + return 0x20; + default: + //ASSERT(0); + return 0xff; + } +} + +static void addFlush2D(xgi_info_t * info) +{ + U32 *flushBatchVirtAddr; + U32 flushBatchHWAddr; + + U32 *lastBatchVirtAddr; + + /* check buf is large enough to contain a new flush batch */ + if ((s_cmdring._cmdRingOffset + 0x20) >= s_cmdring._cmdRingSize) { + s_cmdring._cmdRingOffset = 0; + } + + flushBatchHWAddr = s_cmdring._cmdRingBuffer + s_cmdring._cmdRingOffset; + flushBatchVirtAddr = (U32 *) xgi_find_pcie_virt(info, flushBatchHWAddr); + + /* not using memcpy for I assume the address is discrete */ + *(flushBatchVirtAddr + 0) = 0x10000000; + *(flushBatchVirtAddr + 1) = 0x80000004; /* size = 0x04 dwords */ + *(flushBatchVirtAddr + 2) = 0x00000000; + *(flushBatchVirtAddr + 3) = 0x00000000; + *(flushBatchVirtAddr + 4) = FLUSH_2D; + *(flushBatchVirtAddr + 5) = FLUSH_2D; + *(flushBatchVirtAddr + 6) = FLUSH_2D; + *(flushBatchVirtAddr + 7) = FLUSH_2D; + + // ASSERT(s_cmdring._lastBatchStartAddr != NULL); + lastBatchVirtAddr = + (U32 *) xgi_find_pcie_virt(info, s_cmdring._lastBatchStartAddr); + + lastBatchVirtAddr[1] = BEGIN_LINK_ENABLE_MASK + 0x08; + lastBatchVirtAddr[2] = flushBatchHWAddr >> 4; + lastBatchVirtAddr[3] = 0; + + //barrier(); + + // BTYPE_CTRL & NO debugID + lastBatchVirtAddr[0] = (0x20 << 22) + (BEGIN_VALID_MASK); + + triggerHWCommandList(info, 1); + + s_cmdring._cmdRingOffset += 0x20; + s_cmdring._lastBatchStartAddr = flushBatchHWAddr; +} diff --git a/linux-core/xgi_cmdlist.h b/linux-core/xgi_cmdlist.h index 1b0c4965..5fe1de71 100644 --- a/linux-core/xgi_cmdlist.h +++ b/linux-core/xgi_cmdlist.h @@ -1,79 +1,76 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ - -#ifndef _XGI_CMDLIST_H_ -#define _XGI_CMDLIST_H_ - -#define ONE_BIT_MASK 0x1 -#define TWENTY_BIT_MASK 0xfffff -#define M2REG_FLUSH_2D_ENGINE_MASK (ONE_BIT_MASK<<20) -#define M2REG_FLUSH_3D_ENGINE_MASK TWENTY_BIT_MASK -#define M2REG_FLUSH_FLIP_ENGINE_MASK (ONE_BIT_MASK<<21) -#define BASE_3D_ENG 0x2800 -#define M2REG_AUTO_LINK_SETTING_ADDRESS 0x10 -#define M2REG_CLEAR_COUNTERS_MASK (ONE_BIT_MASK<<4) -#define M2REG_PCI_TRIGGER_MODE_MASK (ONE_BIT_MASK<<1) -#define BEGIN_VALID_MASK (ONE_BIT_MASK<<20) -#define BEGIN_LINK_ENABLE_MASK (ONE_BIT_MASK<<31) -#define M2REG_PCI_TRIGGER_REGISTER_ADDRESS 0x14 - -typedef enum -{ - FLUSH_2D = M2REG_FLUSH_2D_ENGINE_MASK, - FLUSH_3D = M2REG_FLUSH_3D_ENGINE_MASK, - FLUSH_FLIP = M2REG_FLUSH_FLIP_ENGINE_MASK -}FLUSH_CODE; - -typedef enum -{ - AGPCMDLIST_SCRATCH_SIZE = 0x100, - AGPCMDLIST_BEGIN_SIZE = 0x004, - AGPCMDLIST_3D_SCRATCH_CMD_SIZE = 0x004, - AGPCMDLIST_2D_SCRATCH_CMD_SIZE = 0x00c, - AGPCMDLIST_FLUSH_CMD_LEN = 0x004, - AGPCMDLIST_DUMY_END_BATCH_LEN = AGPCMDLIST_BEGIN_SIZE -}CMD_SIZE; - -typedef struct xgi_cmdring_info_s -{ - U32 _cmdRingSize; - U32 _cmdRingBuffer; - U32 _cmdRingBusAddr; - U32 _lastBatchStartAddr; - U32 _cmdRingOffset; -}xgi_cmdring_info_t; - -extern int xgi_cmdlist_initialize(xgi_info_t *info, U32 size); - -extern void xgi_submit_cmdlist(xgi_info_t *info, xgi_cmd_info_t * pCmdInfo); - -extern void xgi_state_change(xgi_info_t *info, xgi_state_info_t * pStateInfo); - -extern void xgi_cmdlist_cleanup(xgi_info_t *info); - -#endif /* _XGI_CMDLIST_H_ */ + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#ifndef _XGI_CMDLIST_H_ +#define _XGI_CMDLIST_H_ + +#define ONE_BIT_MASK 0x1 +#define TWENTY_BIT_MASK 0xfffff +#define M2REG_FLUSH_2D_ENGINE_MASK (ONE_BIT_MASK<<20) +#define M2REG_FLUSH_3D_ENGINE_MASK TWENTY_BIT_MASK +#define M2REG_FLUSH_FLIP_ENGINE_MASK (ONE_BIT_MASK<<21) +#define BASE_3D_ENG 0x2800 +#define M2REG_AUTO_LINK_SETTING_ADDRESS 0x10 +#define M2REG_CLEAR_COUNTERS_MASK (ONE_BIT_MASK<<4) +#define M2REG_PCI_TRIGGER_MODE_MASK (ONE_BIT_MASK<<1) +#define BEGIN_VALID_MASK (ONE_BIT_MASK<<20) +#define BEGIN_LINK_ENABLE_MASK (ONE_BIT_MASK<<31) +#define M2REG_PCI_TRIGGER_REGISTER_ADDRESS 0x14 + +typedef enum { + FLUSH_2D = M2REG_FLUSH_2D_ENGINE_MASK, + FLUSH_3D = M2REG_FLUSH_3D_ENGINE_MASK, + FLUSH_FLIP = M2REG_FLUSH_FLIP_ENGINE_MASK +} FLUSH_CODE; + +typedef enum { + AGPCMDLIST_SCRATCH_SIZE = 0x100, + AGPCMDLIST_BEGIN_SIZE = 0x004, + AGPCMDLIST_3D_SCRATCH_CMD_SIZE = 0x004, + AGPCMDLIST_2D_SCRATCH_CMD_SIZE = 0x00c, + AGPCMDLIST_FLUSH_CMD_LEN = 0x004, + AGPCMDLIST_DUMY_END_BATCH_LEN = AGPCMDLIST_BEGIN_SIZE +} CMD_SIZE; + +typedef struct xgi_cmdring_info_s { + U32 _cmdRingSize; + U32 _cmdRingBuffer; + U32 _cmdRingBusAddr; + U32 _lastBatchStartAddr; + U32 _cmdRingOffset; +} xgi_cmdring_info_t; + +extern int xgi_cmdlist_initialize(xgi_info_t * info, U32 size); + +extern void xgi_submit_cmdlist(xgi_info_t * info, xgi_cmd_info_t * pCmdInfo); + +extern void xgi_state_change(xgi_info_t * info, xgi_state_info_t * pStateInfo); + +extern void xgi_cmdlist_cleanup(xgi_info_t * info); + +#endif /* _XGI_CMDLIST_H_ */ diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 5e80d417..0c37d00e 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -1,1610 +1,1564 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ -#include "xgi_types.h" -#include "xgi_linux.h" -#include "xgi_drv.h" -#include "xgi_regs.h" -#include "xgi_pcie.h" -#include "xgi_misc.h" -#include "xgi_cmdlist.h" - -/* for debug */ -static int xgi_temp = 1; -/* - * global parameters - */ -static struct xgi_dev { - u16 vendor; - u16 device; - const char *name; -} xgidev_list[] = { - {PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XP5, "XP5"}, - {PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XG47, "XG47"}, - {0, 0, NULL} -}; - -int xgi_major = XGI_DEV_MAJOR; /* xgi reserved major device number. */ - -static int xgi_num_devices = 0; - -xgi_info_t xgi_devices[XGI_MAX_DEVICES]; - -#if defined(XGI_PM_SUPPORT_APM) -static struct pm_dev *apm_xgi_dev[XGI_MAX_DEVICES] = { 0 }; -#endif - -/* add one for the control device */ -xgi_info_t xgi_ctl_device; -wait_queue_head_t xgi_ctl_waitqueue; - -#ifdef CONFIG_PROC_FS -struct proc_dir_entry *proc_xgi; -#endif - -#ifdef CONFIG_DEVFS_FS -devfs_handle_t xgi_devfs_handles[XGI_MAX_DEVICES]; -#endif - -struct list_head xgi_mempid_list; - -/* xgi_ functions.. do not take a state device parameter */ -static int xgi_post_vbios(xgi_ioctl_post_vbios_t *info); -static void xgi_proc_create(void); -static void xgi_proc_remove_all(struct proc_dir_entry *); -static void xgi_proc_remove(void); - -/* xgi_kern_ functions, interfaces used by linux kernel */ -int xgi_kern_probe(struct pci_dev *, const struct pci_device_id *); - -unsigned int xgi_kern_poll(struct file *, poll_table *); -int xgi_kern_ioctl(struct inode *, struct file *, unsigned int, unsigned long); -int xgi_kern_mmap(struct file *, struct vm_area_struct *); -int xgi_kern_open(struct inode *, struct file *); -int xgi_kern_release(struct inode *inode, struct file *filp); - -void xgi_kern_vma_open(struct vm_area_struct *vma); -void xgi_kern_vma_release(struct vm_area_struct *vma); -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1)) -struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, - unsigned long address, int *type); -#else -struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, - unsigned long address, int write_access); -#endif - -int xgi_kern_read_card_info(char *, char **, off_t off, int, int *, void *); -int xgi_kern_read_status(char *, char **, off_t off, int, int *, void *); -int xgi_kern_read_pcie_info(char *, char **, off_t off, int, int *, void *); -int xgi_kern_read_version(char *, char **, off_t off, int, int *, void *); - -int xgi_kern_ctl_open(struct inode *, struct file *); -int xgi_kern_ctl_close(struct inode *, struct file *); -unsigned int xgi_kern_ctl_poll(struct file *, poll_table *); - -void xgi_kern_isr_bh(unsigned long); -irqreturn_t xgi_kern_isr(int, void *, struct pt_regs *); - -static void xgi_lock_init(xgi_info_t *info); - -#if defined(XGI_PM_SUPPORT_ACPI) -int xgi_kern_acpi_standby(struct pci_dev *, u32); -int xgi_kern_acpi_resume(struct pci_dev *); -#endif - -/* - * verify access to pci config space wasn't disabled behind our back - * unfortunately, XFree86 enables/disables memory access in pci config space at - * various times (such as restoring initial pci config space settings during vt - * switches or when doing mulicard). As a result, all of our register accesses - * are garbage at this point. add a check to see if access was disabled and - * reenable any such access. - */ -#define XGI_CHECK_PCI_CONFIG(xgi) \ - xgi_check_pci_config(xgi, __LINE__) - -static inline void xgi_check_pci_config(xgi_info_t *info, int line) -{ - unsigned short cmd, flag = 0; - - // don't do this on the control device, only the actual devices - if (info->flags & XGI_FLAG_CONTROL) - return; - - pci_read_config_word(info->dev, PCI_COMMAND, &cmd); - if (!(cmd & PCI_COMMAND_MASTER)) - { - XGI_INFO("restoring bus mastering! (%d)\n", line); - cmd |= PCI_COMMAND_MASTER; - flag = 1; - } - - if (!(cmd & PCI_COMMAND_MEMORY)) - { - XGI_INFO("restoring MEM access! (%d)\n", line); - cmd |= PCI_COMMAND_MEMORY; - flag = 1; - } - - if (flag) - pci_write_config_word(info->dev, PCI_COMMAND, cmd); -} - -static int xgi_post_vbios(xgi_ioctl_post_vbios_t *info) -{ - return 1; -} - -/* - * struct pci_device_id { - * unsigned int vendor, device; // Vendor and device ID or PCI_ANY_ID - * unsigned int subvendor, subdevice; // Subsystem ID's or PCI_ANY_ID - * unsigned int class, class_mask; // (class,subclass,prog-if) triplet - * unsigned long driver_data; // Data private to the driver - * }; - */ - -static struct pci_device_id xgi_dev_table[] = { - { - .vendor = PCI_VENDOR_ID_XGI, - .device = PCI_ANY_ID, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .class = (PCI_CLASS_DISPLAY_VGA << 8), - .class_mask = ~0, - }, - { } -}; - -/* - * #define MODULE_DEVICE_TABLE(type,name) \ - * MODULE_GENERIC_TABLE(type##_device,name) - */ - MODULE_DEVICE_TABLE(pci, xgi_dev_table); - -/* - * struct pci_driver { - * struct list_head node; - * char *name; - * const struct pci_device_id *id_table; // NULL if wants all devices - * int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); // New device inserted - * void (*remove)(struct pci_dev *dev); // Device removed (NULL if not a hot-plug capable driver) - * int (*save_state)(struct pci_dev *dev, u32 state); // Save Device Context - * int (*suspend)(struct pci_dev *dev, u32 state); // Device suspended - * int (*resume)(struct pci_dev *dev); // Device woken up - * int (*enable_wake)(struct pci_dev *dev, u32 state, int enable); // Enable wake event - * }; - */ -static struct pci_driver xgi_pci_driver = { - .name = "xgi", - .id_table = xgi_dev_table, - .probe = xgi_kern_probe, -#if defined(XGI_SUPPORT_ACPI) - .suspend = xgi_kern_acpi_standby, - .resume = xgi_kern_acpi_resume, -#endif -}; - -/* - * find xgi devices and set initial state - */ -int xgi_kern_probe(struct pci_dev *dev, const struct pci_device_id *id_table) -{ - xgi_info_t *info; - - if ((dev->vendor != PCI_VENDOR_ID_XGI) - || (dev->class != (PCI_CLASS_DISPLAY_VGA << 8))) - { - return -1; - } - - if (xgi_num_devices == XGI_MAX_DEVICES) - { - XGI_INFO("maximum device number (%d) reached!\n", xgi_num_devices); - return -1; - } - - /* enable io, mem, and bus-mastering in pci config space */ - if (pci_enable_device(dev) != 0) - { - XGI_INFO("pci_enable_device failed, aborting\n"); - return -1; - } - - XGI_INFO("maximum device number (%d) reached \n", xgi_num_devices); - - pci_set_master(dev); - - info = &xgi_devices[xgi_num_devices]; - info->dev = dev; - info->vendor_id = dev->vendor; - info->device_id = dev->device; - info->bus = dev->bus->number; - info->slot = PCI_SLOT((dev)->devfn); - - xgi_lock_init(info); - - info->mmio.base = XGI_PCI_RESOURCE_START(dev, 1); - info->mmio.size = XGI_PCI_RESOURCE_SIZE(dev, 1); - - /* check IO region */ - if (!request_mem_region(info->mmio.base, info->mmio.size, "xgi")) - { - XGI_ERROR("cannot reserve MMIO memory\n"); - goto error_disable_dev; - } - - XGI_INFO("info->mmio.base: 0x%lx \n", info->mmio.base); - XGI_INFO("info->mmio.size: 0x%lx \n", info->mmio.size); - - info->mmio.vbase = (unsigned char *)ioremap_nocache(info->mmio.base, - info->mmio.size); - if (!info->mmio.vbase) - { - release_mem_region(info->mmio.base, info->mmio.size); - XGI_ERROR("info->mmio.vbase failed\n"); - goto error_disable_dev; - } - xgi_enable_mmio(info); - - //xgi_enable_ge(info); - - XGI_INFO("info->mmio.vbase: 0x%p \n", info->mmio.vbase); - - info->fb.base = XGI_PCI_RESOURCE_START(dev, 0); - info->fb.size = XGI_PCI_RESOURCE_SIZE(dev, 0); - - XGI_INFO("info->fb.base: 0x%lx \n", info->fb.base); - XGI_INFO("info->fb.size: 0x%lx \n", info->fb.size); - - info->fb.size = bIn3cf(0x54) * 8 * 1024 * 1024; - XGI_INFO("info->fb.size: 0x%lx \n", info->fb.size); - - /* check frame buffer region - if (!request_mem_region(info->fb.base, info->fb.size, "xgi")) - { - release_mem_region(info->mmio.base, info->mmio.size); - XGI_ERROR("cannot reserve frame buffer memory\n"); - goto error_disable_dev; - } - - - info->fb.vbase = (unsigned char *)ioremap_nocache(info->fb.base, - info->fb.size); - - if (!info->fb.vbase) - { - release_mem_region(info->mmio.base, info->mmio.size); - release_mem_region(info->fb.base, info->fb.size); - XGI_ERROR("info->fb.vbase failed\n"); - goto error_disable_dev; - } - */ - info->fb.vbase = NULL; - XGI_INFO("info->fb.vbase: 0x%p \n", info->fb.vbase); - - info->irq = dev->irq; - - /* check common error condition */ - if (info->irq == 0) - { - XGI_ERROR("Can't find an IRQ for your XGI card! \n"); - goto error_zero_dev; - } - XGI_INFO("info->irq: %lx \n", info->irq); - - //xgi_enable_dvi_interrupt(info); - - /* sanity check the IO apertures */ - if ((info->mmio.base == 0) || (info->mmio.size == 0) - || (info->fb.base == 0) || (info->fb.size == 0)) - { - XGI_ERROR("The IO regions for your XGI card are invalid.\n"); - - if ((info->mmio.base == 0) || (info->mmio.size == 0)) - { - XGI_ERROR("mmio appears to be wrong: 0x%lx 0x%lx\n", - info->mmio.base, - info->mmio.size); - } - - if ((info->fb.base == 0) || (info->fb.size == 0)) - { - XGI_ERROR("frame buffer appears to be wrong: 0x%lx 0x%lx\n", - info->fb.base, - info->fb.size); - } - - goto error_zero_dev; - } - - //xgi_num_devices++; - - return 0; - -error_zero_dev: - release_mem_region(info->fb.base, info->fb.size); - release_mem_region(info->mmio.base, info->mmio.size); - -error_disable_dev: - pci_disable_device(dev); - return -1; - -} - -/* - * vma operations... - * this is only called when the vmas are duplicated. this - * appears to only happen when the process is cloned to create - * a new process, and not when the process is threaded. - * - * increment the usage count for the physical pages, so when - * this clone unmaps the mappings, the pages are not - * deallocated under the original process. - */ -struct vm_operations_struct xgi_vm_ops = { - .open = xgi_kern_vma_open, - .close = xgi_kern_vma_release, - .nopage = xgi_kern_vma_nopage, -}; - -void xgi_kern_vma_open(struct vm_area_struct *vma) -{ - XGI_INFO("VM: vma_open for 0x%lx - 0x%lx, offset 0x%lx\n", - vma->vm_start, - vma->vm_end, - XGI_VMA_OFFSET(vma)); - - if (XGI_VMA_PRIVATE(vma)) - { - xgi_pcie_block_t *block = (xgi_pcie_block_t *)XGI_VMA_PRIVATE(vma); - XGI_ATOMIC_INC(block->use_count); - } -} - -void xgi_kern_vma_release(struct vm_area_struct *vma) -{ - XGI_INFO("VM: vma_release for 0x%lx - 0x%lx, offset 0x%lx\n", - vma->vm_start, - vma->vm_end, - XGI_VMA_OFFSET(vma)); - - if (XGI_VMA_PRIVATE(vma)) - { - xgi_pcie_block_t *block = (xgi_pcie_block_t *)XGI_VMA_PRIVATE(vma); - XGI_ATOMIC_DEC(block->use_count); - - /* - * if use_count is down to 0, the kernel virtual mapping was freed - * but the underlying physical pages were not, we need to clear the - * bit and free the physical pages. - */ - if (XGI_ATOMIC_READ(block->use_count) == 0) - { - // Need TO Finish - XGI_VMA_PRIVATE(vma) = NULL; - } - } -} - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1)) -struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, - unsigned long address, int *type) -{ - xgi_pcie_block_t *block = (xgi_pcie_block_t *)XGI_VMA_PRIVATE(vma); - struct page *page = NOPAGE_SIGBUS; - unsigned long offset = 0; - unsigned long page_addr = 0; -/* - XGI_INFO("VM: mmap([0x%lx-0x%lx] off=0x%lx) address: 0x%lx \n", - vma->vm_start, - vma->vm_end, - XGI_VMA_OFFSET(vma), - address); -*/ - offset = (address - vma->vm_start) + XGI_VMA_OFFSET(vma); - - offset = offset - block->bus_addr; - - offset >>= PAGE_SHIFT; - - page_addr = block->page_table[offset].virt_addr; - - if (xgi_temp) - { - XGI_INFO("block->bus_addr: 0x%lx block->hw_addr: 0x%lx" - "block->page_count: 0x%lx block->page_order: 0x%lx" - "block->page_table[0x%lx].virt_addr: 0x%lx\n", - block->bus_addr, block->hw_addr, - block->page_count, block->page_order, - offset, - block->page_table[offset].virt_addr); - xgi_temp = 0; - } - - if (!page_addr) goto out; /* hole or end-of-file */ - page = virt_to_page(page_addr); - - /* got it, now increment the count */ - get_page(page); -out: - return page; - -} -#else -struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, - unsigned long address, int write_access) -{ - xgi_pcie_block_t *block = (xgi_pcie_block_t *)XGI_VMA_PRIVATE(vma); - struct page *page = NOPAGE_SIGBUS; - unsigned long offset = 0; - unsigned long page_addr = 0; -/* - XGI_INFO("VM: mmap([0x%lx-0x%lx] off=0x%lx) address: 0x%lx \n", - vma->vm_start, - vma->vm_end, - XGI_VMA_OFFSET(vma), - address); -*/ - offset = (address - vma->vm_start) + XGI_VMA_OFFSET(vma); - - offset = offset - block->bus_addr; - - offset >>= PAGE_SHIFT; - - page_addr = block->page_table[offset].virt_addr; - - if (xgi_temp) - { - XGI_INFO("block->bus_addr: 0x%lx block->hw_addr: 0x%lx" - "block->page_count: 0x%lx block->page_order: 0x%lx" - "block->page_table[0x%lx].virt_addr: 0x%lx\n", - block->bus_addr, block->hw_addr, - block->page_count, block->page_order, - offset, - block->page_table[offset].virt_addr); - xgi_temp = 0; - } - - if (!page_addr) goto out; /* hole or end-of-file */ - page = virt_to_page(page_addr); - - /* got it, now increment the count */ - get_page(page); -out: - return page; -} -#endif - -#if 0 -static struct file_operations xgi_fops = { - /* owner: THIS_MODULE, */ - poll: xgi_kern_poll, - ioctl: xgi_kern_ioctl, - mmap: xgi_kern_mmap, - open: xgi_kern_open, - release: xgi_kern_release, -}; -#endif - -static struct file_operations xgi_fops = { - .owner = THIS_MODULE, - .poll = xgi_kern_poll, - .ioctl = xgi_kern_ioctl, - .mmap = xgi_kern_mmap, - .open = xgi_kern_open, - .release = xgi_kern_release, -}; - -static xgi_file_private_t * xgi_alloc_file_private(void) -{ - xgi_file_private_t *fp; - - XGI_KMALLOC(fp, sizeof(xgi_file_private_t)); - if (!fp) - return NULL; - - memset(fp, 0, sizeof(xgi_file_private_t)); - - /* initialize this file's event queue */ - init_waitqueue_head(&fp->wait_queue); - - xgi_init_lock(fp->fp_lock); - - return fp; -} - -static void xgi_free_file_private(xgi_file_private_t *fp) -{ - if (fp == NULL) - return; - - XGI_KFREE(fp, sizeof(xgi_file_private_t)); -} - -int xgi_kern_open(struct inode *inode, struct file *filp) -{ - xgi_info_t *info = NULL; - int dev_num; - int result = 0, status; - - /* - * the type and num values are only valid if we are not using devfs. - * However, since we use them to retrieve the device pointer, we - * don't need them with devfs as filp->private_data is already - * initialized - */ - filp->private_data = xgi_alloc_file_private(); - if (filp->private_data == NULL) - return -ENOMEM; - - XGI_INFO("filp->private_data %p\n", filp->private_data); - /* - * for control device, just jump to its open routine - * after setting up the private data - */ - if (XGI_IS_CONTROL_DEVICE(inode)) - return xgi_kern_ctl_open(inode, filp); - - /* what device are we talking about? */ - dev_num = XGI_DEVICE_NUMBER(inode); - if (dev_num >= XGI_MAX_DEVICES) - { - xgi_free_file_private(filp->private_data); - filp->private_data = NULL; - return -ENODEV; - } - - info = &xgi_devices[dev_num]; - - XGI_INFO("Jong-xgi_kern_open on device %d\n", dev_num); - - xgi_down(info->info_sem); - XGI_CHECK_PCI_CONFIG(info); - - XGI_INFO_FROM_FP(filp) = info; - - /* - * map the memory and allocate isr on first open - */ - - if (!(info->flags & XGI_FLAG_OPEN)) - { - XGI_INFO("info->flags & XGI_FLAG_OPEN \n"); - - if (info->device_id == 0) - { - XGI_INFO("open of nonexistent device %d\n", dev_num); - result = -ENXIO; - goto failed; - } - - /* initialize struct irqaction */ - status = request_irq(info->irq, xgi_kern_isr, - SA_INTERRUPT | SA_SHIRQ, "xgi", - (void *) info); - if (status != 0) - { - if (info->irq && (status == -EBUSY)) - { - XGI_ERROR("Tried to get irq %d, but another driver", - (unsigned int) info->irq); - XGI_ERROR("has it and is not sharing it.\n"); - } - XGI_ERROR("isr request failed 0x%x\n", status); - result = -EIO; - goto failed; - } - - /* - * #define DECLARE_TASKLET(name, func, data) \ - * struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } - */ - info->tasklet.func = xgi_kern_isr_bh; - info->tasklet.data = (unsigned long) info; - tasklet_enable(&info->tasklet); - - /* Alloc 1M bytes for cmdbuffer which is flush2D batch array */ - xgi_cmdlist_initialize(info, 0x100000); - - info->flags |= XGI_FLAG_OPEN; - } - - XGI_ATOMIC_INC(info->use_count); - -failed: - xgi_up(info->info_sem); - - if ((result) && filp->private_data) - { - xgi_free_file_private(filp->private_data); - filp->private_data = NULL; - } - - return result; -} - -int xgi_kern_release(struct inode *inode, struct file *filp) -{ - xgi_info_t *info = XGI_INFO_FROM_FP(filp); - - XGI_CHECK_PCI_CONFIG(info); - - /* - * for control device, just jump to its open routine - * after setting up the private data - */ - if (XGI_IS_CONTROL_DEVICE(inode)) - return xgi_kern_ctl_close(inode, filp); - - XGI_INFO("Jong-xgi_kern_release on device %d\n", XGI_DEVICE_NUMBER(inode)); - - xgi_down(info->info_sem); - if (XGI_ATOMIC_DEC_AND_TEST(info->use_count)) - { - - /* - * The usage count for this device has dropped to zero, it can be shut - * down safely; disable its interrupts. - */ - - /* - * Disable this device's tasklet to make sure that no bottom half will - * run with undefined device state. - */ - tasklet_disable(&info->tasklet); - - /* - * Free the IRQ, which may block until all pending interrupt processing - * has completed. - */ - free_irq(info->irq, (void *)info); - - xgi_cmdlist_cleanup(info); - - /* leave INIT flag alone so we don't reinit every time */ - info->flags &= ~XGI_FLAG_OPEN; - } - - xgi_up(info->info_sem); - - if (FILE_PRIVATE(filp)) - { - xgi_free_file_private(FILE_PRIVATE(filp)); - FILE_PRIVATE(filp) = NULL; - } - - return 0; -} - -int xgi_kern_mmap(struct file *filp, struct vm_area_struct *vma) -{ - //struct inode *inode = INODE_FROM_FP(filp); - xgi_info_t *info = XGI_INFO_FROM_FP(filp); - xgi_pcie_block_t *block; - int pages = 0; - unsigned long prot; - - XGI_INFO("Jong-VM: mmap([0x%lx-0x%lx] off=0x%lx)\n", - vma->vm_start, - vma->vm_end, - XGI_VMA_OFFSET(vma)); - - XGI_CHECK_PCI_CONFIG(info); - - if (XGI_MASK_OFFSET(vma->vm_start) - || XGI_MASK_OFFSET(vma->vm_end)) - { - XGI_ERROR("VM: bad mmap range: %lx - %lx\n", - vma->vm_start, vma->vm_end); - return -ENXIO; - } - - pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; - - vma->vm_ops = &xgi_vm_ops; - - /* XGI IO(reg) space */ - if (IS_IO_OFFSET(info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) - { - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - if (XGI_REMAP_PAGE_RANGE(vma->vm_start, - XGI_VMA_OFFSET(vma), - vma->vm_end - vma->vm_start, - vma->vm_page_prot)) - return -EAGAIN; - - /* mark it as IO so that we don't dump it on core dump */ - vma->vm_flags |= VM_IO; - XGI_INFO("VM: mmap io space \n"); - } - /* XGI fb space */ - /* Jong 06/14/2006; moved behind PCIE or modify IS_FB_OFFSET */ - else if (IS_FB_OFFSET(info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) - { - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - if (XGI_REMAP_PAGE_RANGE(vma->vm_start, - XGI_VMA_OFFSET(vma), - vma->vm_end - vma->vm_start, - vma->vm_page_prot)) - return -EAGAIN; - - // mark it as IO so that we don't dump it on core dump - vma->vm_flags |= VM_IO; - XGI_INFO("VM: mmap fb space \n"); - } - /* PCIE allocator */ - /* XGI_VMA_OFFSET(vma) is offset based on pcie.base (HW address space) */ - else if (IS_PCIE_OFFSET(info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) - { - xgi_down(info->pcie_sem); - - block = (xgi_pcie_block_t *)xgi_find_pcie_block(info, XGI_VMA_OFFSET(vma)); - - if (block == NULL) - { - XGI_ERROR("couldn't find pre-allocated PCIE memory!\n"); - xgi_up(info->pcie_sem); - return -EAGAIN; - } - - if (block->page_count != pages) - { - XGI_ERROR("pre-allocated PCIE memory has wrong number of pages!\n"); - xgi_up(info->pcie_sem); - return -EAGAIN; - } - - vma->vm_private_data = block; - XGI_ATOMIC_INC(block->use_count); - xgi_up(info->pcie_sem); - - /* - * prevent the swapper from swapping it out - * mark the memory i/o so the buffers aren't - * dumped on core dumps */ - vma->vm_flags |= (VM_LOCKED | VM_IO); - - /* un-cached */ - prot = pgprot_val(vma->vm_page_prot); - /* - if (boot_cpu_data.x86 > 3) - prot |= _PAGE_PCD | _PAGE_PWT; - */ - vma->vm_page_prot = __pgprot(prot); - - XGI_INFO("VM: mmap pcie space \n"); - } -#if 0 - else if (IS_FB_OFFSET(info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) - { - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - if (XGI_REMAP_PAGE_RANGE(vma->vm_start, - XGI_VMA_OFFSET(vma), - vma->vm_end - vma->vm_start, - vma->vm_page_prot)) - return -EAGAIN; - - // mark it as IO so that we don't dump it on core dump - vma->vm_flags |= VM_IO; - XGI_INFO("VM: mmap fb space \n"); - } -#endif - else - { - vma->vm_flags |= (VM_IO | VM_LOCKED); - XGI_ERROR("VM: mmap wrong range \n"); - } - - vma->vm_file = filp; - - return 0; -} - -unsigned int xgi_kern_poll(struct file *filp, struct poll_table_struct *wait) -{ - xgi_file_private_t *fp; - xgi_info_t *info; - unsigned int mask = 0; - unsigned long eflags; - - info = XGI_INFO_FROM_FP(filp); - - if (info->device_number == XGI_CONTROL_DEVICE_NUMBER) - return xgi_kern_ctl_poll(filp, wait); - - fp = XGI_GET_FP(filp); - - if (!(filp->f_flags & O_NONBLOCK)) - { - /* add us to the list */ - poll_wait(filp, &fp->wait_queue, wait); - } - - xgi_lock_irqsave(fp->fp_lock, eflags); - - /* wake the user on any event */ - if (fp->num_events) - { - XGI_INFO("Hey, an event occured!\n"); - /* - * trigger the client, when they grab the event, - * we'll decrement the event count - */ - mask |= (POLLPRI|POLLIN); - } - xgi_unlock_irqsave(fp->fp_lock, eflags); - - return mask; -} - -int xgi_kern_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) -{ - xgi_info_t *info; - xgi_mem_alloc_t *alloc = NULL; - - int status = 0; - void *arg_copy; - int arg_size; - int err = 0; - - info = XGI_INFO_FROM_FP(filp); - - XGI_INFO("Jong-ioctl(0x%x, 0x%x, 0x%lx, 0x%x)\n", _IOC_TYPE(cmd), _IOC_NR(cmd), arg, _IOC_SIZE(cmd)); - /* - * extract the type and number bitfields, and don't decode - * wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok() - */ - if (_IOC_TYPE(cmd) != XGI_IOCTL_MAGIC) return -ENOTTY; - if (_IOC_NR(cmd) > XGI_IOCTL_MAXNR) return -ENOTTY; - - /* - * the direction is a bitmask, and VERIFY_WRITE catches R/W - * transfers. `Type' is user-oriented, while - * access_ok is kernel-oriented, so the concept of "read" and - * "write" is reversed - */ - if (_IOC_DIR(cmd) & _IOC_READ) - { - err = !access_ok(VERIFY_WRITE, (void *)arg, _IOC_SIZE(cmd)); - } - else if (_IOC_DIR(cmd) & _IOC_WRITE) - { - err = !access_ok(VERIFY_READ, (void *)arg, _IOC_SIZE(cmd)); - } - if (err) return -EFAULT; - - XGI_CHECK_PCI_CONFIG(info); - - arg_size = _IOC_SIZE(cmd); - XGI_KMALLOC(arg_copy, arg_size); - if (arg_copy == NULL) - { - XGI_ERROR("failed to allocate ioctl memory\n"); - return -ENOMEM; - } - - /* Jong 05/25/2006 */ - /* copy_from_user(arg_copy, (void *)arg, arg_size); */ - if(copy_from_user(arg_copy, (void *)arg, arg_size)) - { - XGI_ERROR("failed to copyin ioctl data\n"); - XGI_INFO("Jong-copy_from_user-fail! \n"); - } - else - XGI_INFO("Jong-copy_from_user-OK! \n"); - - alloc = (xgi_mem_alloc_t *)arg_copy; - XGI_INFO("Jong-succeeded in copy_from_user 0x%lx, 0x%x bytes.\n", arg, arg_size); - - switch (_IOC_NR(cmd)) - { - case XGI_ESC_DEVICE_INFO: - XGI_INFO("Jong-xgi_ioctl_get_device_info \n"); - xgi_get_device_info(info, (struct xgi_chip_info_s *) arg_copy); - break; - case XGI_ESC_POST_VBIOS: - XGI_INFO("Jong-xgi_ioctl_post_vbios \n"); - break; - case XGI_ESC_FB_ALLOC: - XGI_INFO("Jong-xgi_ioctl_fb_alloc \n"); - xgi_fb_alloc(info, (struct xgi_mem_req_s *)arg_copy, alloc); - break; - case XGI_ESC_FB_FREE: - XGI_INFO("Jong-xgi_ioctl_fb_free \n"); - xgi_fb_free(info, *(unsigned long *) arg_copy); - break; - case XGI_ESC_MEM_COLLECT: - XGI_INFO("Jong-xgi_ioctl_mem_collect \n"); - xgi_mem_collect(info, (unsigned int *) arg_copy); - break; - case XGI_ESC_PCIE_ALLOC: - XGI_INFO("Jong-xgi_ioctl_pcie_alloc \n"); - xgi_pcie_alloc(info, ((xgi_mem_req_t *)arg_copy)->size, - ((xgi_mem_req_t *)arg_copy)->owner, alloc); - break; - case XGI_ESC_PCIE_FREE: - XGI_INFO("Jong-xgi_ioctl_pcie_free: bus_addr = 0x%lx \n", *((unsigned long *) arg_copy)); - xgi_pcie_free(info, *((unsigned long *) arg_copy)); - break; - case XGI_ESC_PCIE_CHECK: - XGI_INFO("Jong-xgi_pcie_heap_check \n"); - xgi_pcie_heap_check(); - break; - case XGI_ESC_GET_SCREEN_INFO: - XGI_INFO("Jong-xgi_get_screen_info \n"); - xgi_get_screen_info(info, (struct xgi_screen_info_s *) arg_copy); - break; - case XGI_ESC_PUT_SCREEN_INFO: - XGI_INFO("Jong-xgi_put_screen_info \n"); - xgi_put_screen_info(info, (struct xgi_screen_info_s *) arg_copy); - break; - case XGI_ESC_MMIO_INFO: - XGI_INFO("Jong-xgi_ioctl_get_mmio_info \n"); - xgi_get_mmio_info(info, (struct xgi_mmio_info_s *) arg_copy); - break; - case XGI_ESC_GE_RESET: - XGI_INFO("Jong-xgi_ioctl_ge_reset \n"); - xgi_ge_reset(info); - break; - case XGI_ESC_SAREA_INFO: - XGI_INFO("Jong-xgi_ioctl_sarea_info \n"); - xgi_sarea_info(info, (struct xgi_sarea_info_s *) arg_copy); - break; - case XGI_ESC_DUMP_REGISTER: - XGI_INFO("Jong-xgi_ioctl_dump_register \n"); - xgi_dump_register(info); - break; - case XGI_ESC_DEBUG_INFO: - XGI_INFO("Jong-xgi_ioctl_restore_registers \n"); - xgi_restore_registers(info); - //xgi_write_pcie_mem(info, (struct xgi_mem_req_s *) arg_copy); - //xgi_read_pcie_mem(info, (struct xgi_mem_req_s *) arg_copy); - break; - case XGI_ESC_SUBMIT_CMDLIST: - XGI_INFO("Jong-xgi_ioctl_submit_cmdlist \n"); - xgi_submit_cmdlist(info, (xgi_cmd_info_t *) arg_copy); - break; - case XGI_ESC_TEST_RWINKERNEL: - XGI_INFO("Jong-xgi_test_rwinkernel \n"); - xgi_test_rwinkernel(info, *(unsigned long*) arg_copy); - break; - case XGI_ESC_STATE_CHANGE: - XGI_INFO("Jong-xgi_state_change \n"); - xgi_state_change(info, (xgi_state_info_t *) arg_copy); - break; - case XGI_ESC_CPUID: - XGI_INFO("Jong-XGI_ESC_CPUID \n"); - xgi_get_cpu_id((struct cpu_info_s*) arg_copy); - break; - default: - XGI_INFO("Jong-xgi_ioctl_default \n"); - status = -EINVAL; - break; - } - - if (copy_to_user((void *)arg, arg_copy, arg_size)) - { - XGI_ERROR("failed to copyout ioctl data\n"); - XGI_INFO("Jong-copy_to_user-fail! \n"); - } - else - XGI_INFO("Jong-copy_to_user-OK! \n"); - - XGI_KFREE(arg_copy, arg_size); - return status; -} - - -/* - * xgi control driver operations defined here - */ -int xgi_kern_ctl_open(struct inode *inode, struct file *filp) -{ - xgi_info_t *info = &xgi_ctl_device; - - int rc = 0; - - XGI_INFO("Jong-xgi_kern_ctl_open\n"); - - xgi_down(info->info_sem); - info->device_number = XGI_CONTROL_DEVICE_NUMBER; - - /* save the xgi info in file->private_data */ - filp->private_data = info; - - if (XGI_ATOMIC_READ(info->use_count) == 0) - { - init_waitqueue_head(&xgi_ctl_waitqueue); - } - - info->flags |= XGI_FLAG_OPEN + XGI_FLAG_CONTROL; - - XGI_ATOMIC_INC(info->use_count); - xgi_up(info->info_sem); - - return rc; -} - -int xgi_kern_ctl_close(struct inode *inode, struct file *filp) -{ - xgi_info_t *info = XGI_INFO_FROM_FP(filp); - - XGI_INFO("Jong-xgi_kern_ctl_close\n"); - - xgi_down(info->info_sem); - if (XGI_ATOMIC_DEC_AND_TEST(info->use_count)) - { - info->flags = 0; - } - xgi_up(info->info_sem); - - if (FILE_PRIVATE(filp)) - { - xgi_free_file_private(FILE_PRIVATE(filp)); - FILE_PRIVATE(filp) = NULL; - } - - return 0; -} - -unsigned int xgi_kern_ctl_poll(struct file *filp, poll_table *wait) -{ - //xgi_info_t *info = XGI_INFO_FROM_FP(filp);; - unsigned int ret = 0; - - if (!(filp->f_flags & O_NONBLOCK)) - { - poll_wait(filp, &xgi_ctl_waitqueue, wait); - } - - return ret; -} - -/* - * xgi proc system - */ -static u8 xgi_find_pcie_capability(struct pci_dev *dev) -{ - u16 status; - u8 cap_ptr, cap_id; - - pci_read_config_word(dev, PCI_STATUS, &status); - status &= PCI_STATUS_CAP_LIST; - if (!status) - return 0; - - switch (dev->hdr_type) - { - case PCI_HEADER_TYPE_NORMAL: - case PCI_HEADER_TYPE_BRIDGE: - pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &cap_ptr); - break; - default: - return 0; - } - - do - { - cap_ptr &= 0xFC; - pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_ID, &cap_id); - pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_NEXT, &cap_ptr); - } while (cap_ptr && cap_id != 0xFF); - - return 0; -} - -static struct pci_dev* xgi_get_pci_device(xgi_info_t *info) -{ - struct pci_dev *dev; - - dev = XGI_PCI_GET_DEVICE(info->vendor_id, info->device_id, NULL); - while (dev) - { - if (XGI_PCI_SLOT_NUMBER(dev) == info->slot - && XGI_PCI_BUS_NUMBER(dev) == info->bus) - return dev; - dev = XGI_PCI_GET_DEVICE(info->vendor_id, info->device_id, dev); - } - - return NULL; -} - -int xgi_kern_read_card_info(char *page, char **start, off_t off, - int count, int *eof, void *data) -{ - struct pci_dev *dev; - char *type; - int len = 0; - - xgi_info_t *info; - info = (xgi_info_t *) data; - - dev = xgi_get_pci_device(info); - if (!dev) - return 0; - - type = xgi_find_pcie_capability(dev) ? "PCIE" : "PCI"; - len += sprintf(page+len, "Card Type: \t %s\n", type); - - XGI_PCI_DEV_PUT(dev); - return len; -} - -int xgi_kern_read_version(char *page, char **start, off_t off, - int count, int *eof, void *data) -{ - int len = 0; - - len += sprintf(page+len, "XGI version: %s\n", "1.0"); - len += sprintf(page+len, "GCC version: %s\n", "3.0"); - - return len; -} - -int xgi_kern_read_pcie_info(char *page, char **start, off_t off, - int count, int *eof, void *data) -{ - return 0; -} - -int xgi_kern_read_status(char *page, char **start, off_t off, - int count, int *eof, void *data) -{ - return 0; -} - - -static void xgi_proc_create(void) -{ -#ifdef CONFIG_PROC_FS - - struct pci_dev *dev; - int i = 0; - char name[6]; - - struct proc_dir_entry *entry; - struct proc_dir_entry *proc_xgi_pcie, *proc_xgi_cards; - - xgi_info_t *info; - xgi_info_t *xgi_max_devices; - - /* world readable directory */ - int flags = S_IFDIR | S_IRUGO | S_IXUGO; - - proc_xgi = create_proc_entry("xgi", flags, proc_root_driver); - if (!proc_xgi) - goto failed; - - proc_xgi_cards = create_proc_entry("cards", flags, proc_xgi); - if (!proc_xgi_cards) - goto failed; - - proc_xgi_pcie = create_proc_entry("pcie", flags, proc_xgi); - if (!proc_xgi_pcie) - goto failed; - - /* - * Set the module owner to ensure that the reference - * count reflects accesses to the proc files. - */ - proc_xgi->owner = THIS_MODULE; - proc_xgi_cards->owner = THIS_MODULE; - proc_xgi_pcie->owner = THIS_MODULE; - - xgi_max_devices = xgi_devices + XGI_MAX_DEVICES; - for (info = xgi_devices; info < xgi_max_devices; info++) - { - if (info->device_id == 0) - break; - - /* world readable file */ - flags = S_IFREG | S_IRUGO; - - dev = xgi_get_pci_device(info); - if (!dev) - break; - - sprintf(name, "%d", i++); - entry = create_proc_entry(name, flags, proc_xgi_cards); - if (!entry) - { - XGI_PCI_DEV_PUT(dev); - goto failed; - } - - entry->data = info; - entry->read_proc = xgi_kern_read_card_info; - entry->owner = THIS_MODULE; - - if (xgi_find_pcie_capability(dev)) - { - entry = create_proc_entry("status", flags, proc_xgi_pcie); - if (!entry) - { - XGI_PCI_DEV_PUT(dev); - goto failed; - } - - entry->data = info; - entry->read_proc = xgi_kern_read_status; - entry->owner = THIS_MODULE; - - entry = create_proc_entry("card", flags, proc_xgi_pcie); - if (!entry) - { - XGI_PCI_DEV_PUT(dev); - goto failed; - } - - entry->data = info; - entry->read_proc = xgi_kern_read_pcie_info; - entry->owner = THIS_MODULE; - } - - XGI_PCI_DEV_PUT(dev); - } - - entry = create_proc_entry("version", flags, proc_xgi); - if (!entry) - goto failed; - - entry->read_proc = xgi_kern_read_version; - entry->owner = THIS_MODULE; - - entry = create_proc_entry("host-bridge", flags, proc_xgi_pcie); - if (!entry) - goto failed; - - entry->data = NULL; - entry->read_proc = xgi_kern_read_pcie_info; - entry->owner = THIS_MODULE; - - return; - -failed: - XGI_ERROR("failed to create /proc entries!\n"); - xgi_proc_remove_all(proc_xgi); -#endif -} - -#ifdef CONFIG_PROC_FS -static void xgi_proc_remove_all(struct proc_dir_entry *entry) -{ - while (entry) - { - struct proc_dir_entry *next = entry->next; - if (entry->subdir) - xgi_proc_remove_all(entry->subdir); - remove_proc_entry(entry->name, entry->parent); - if (entry == proc_xgi) - break; - entry = next; - } -} -#endif - -static void xgi_proc_remove(void) -{ -#ifdef CONFIG_PROC_FS - xgi_proc_remove_all(proc_xgi); -#endif -} - -/* - * driver receives an interrupt if someone waiting, then hand it off. - */ -irqreturn_t xgi_kern_isr(int irq, void *dev_id, struct pt_regs *regs) -{ - xgi_info_t *info = (xgi_info_t *) dev_id; - u32 need_to_run_bottom_half = 0; - - //XGI_INFO("xgi_kern_isr \n"); - - //XGI_CHECK_PCI_CONFIG(info); - - //xgi_dvi_irq_handler(info); - - if (need_to_run_bottom_half) - { - tasklet_schedule(&info->tasklet); - } - - return IRQ_HANDLED; -} - -void xgi_kern_isr_bh(unsigned long data) -{ - xgi_info_t *info = (xgi_info_t *) data; - - XGI_INFO("xgi_kern_isr_bh \n"); - - //xgi_dvi_irq_handler(info); - - XGI_CHECK_PCI_CONFIG(info); -} - -static void xgi_lock_init(xgi_info_t *info) -{ - if (info == NULL) return; - - spin_lock_init(&info->info_lock); - - sema_init(&info->info_sem, 1); - sema_init(&info->fb_sem, 1); - sema_init(&info->pcie_sem, 1); - - XGI_ATOMIC_SET(info->use_count, 0); -} - -static void xgi_dev_init(xgi_info_t *info) -{ - struct pci_dev *pdev = NULL; - struct xgi_dev *dev; - int found = 0; - u16 pci_cmd; - - XGI_INFO("Enter xgi_dev_init \n"); - - //XGI_PCI_FOR_EACH_DEV(pdev) - { - for (dev = xgidev_list; dev->vendor; dev++) - { - if ((dev->vendor == pdev->vendor) && (dev->device == pdev->device)) - { - XGI_INFO("dev->vendor = pdev->vendor= %x \n", dev->vendor); - XGI_INFO("dev->device = pdev->device= %x \n", dev->device); - - xgi_devices[found].device_id = pdev->device; - - pci_read_config_byte(pdev, PCI_REVISION_ID, &xgi_devices[found].revision_id); - - XGI_INFO("PCI_REVISION_ID= %x \n", xgi_devices[found].revision_id); - - pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); - - XGI_INFO("PCI_COMMAND = %x \n", pci_cmd); - - break; - } - } - } -} -/* - * Export to Linux Kernel - */ - -static int __init xgi_init_module(void) -{ - xgi_info_t *info = &xgi_devices[xgi_num_devices]; - int i, result; - - XGI_INFO("Jong-xgi kernel driver %s initializing\n", XGI_DRV_VERSION); - //SET_MODULE_OWNER(&xgi_fops); - - memset(xgi_devices, 0, sizeof(xgi_devices)); - - if (pci_register_driver(&xgi_pci_driver) < 0) - { - pci_unregister_driver(&xgi_pci_driver); - XGI_ERROR("no XGI graphics adapter found\n"); - return -ENODEV; - } - - XGI_INFO("Jong-xgi_devices[%d].fb.base.: 0x%lx \n", xgi_num_devices, xgi_devices[xgi_num_devices].fb.base); - XGI_INFO("Jong-xgi_devices[%d].fb.size.: 0x%lx \n", xgi_num_devices, xgi_devices[xgi_num_devices].fb.size); - -/* Jong 07/27/2006; test for ubuntu */ -/* -#ifdef CONFIG_DEVFS_FS - - XGI_INFO("Jong-Use devfs \n"); - do - { - xgi_devfs_handles[0] = XGI_DEVFS_REGISTER("xgi", 0); - if (xgi_devfs_handles[0] == NULL) - { - result = -ENOMEM; - XGI_ERROR("devfs register failed\n"); - goto failed; - } - } while(0); -#else */ /* no devfs, do it the "classic" way */ - - - XGI_INFO("Jong-Use non-devfs \n"); - /* - * Register your major, and accept a dynamic number. This is the - * first thing to do, in order to avoid releasing other module's - * fops in scull_cleanup_module() - */ - result = XGI_REGISTER_CHRDEV(xgi_major, "xgi", &xgi_fops); - if (result < 0) - { - XGI_ERROR("register chrdev failed\n"); - pci_unregister_driver(&xgi_pci_driver); - return result; - } - if (xgi_major == 0) xgi_major = result; /* dynamic */ - -/* #endif */ /* CONFIG_DEVFS_FS */ - - XGI_INFO("Jong-major number %d\n", xgi_major); - - /* instantiate tasklets */ - for (i = 0; i < XGI_MAX_DEVICES; i++) - { - /* - * We keep one tasklet per card to avoid latency issues with more - * than one device; no two instances of a single tasklet are ever - * executed concurrently. - */ - XGI_ATOMIC_SET(xgi_devices[i].tasklet.count, 1); - } - - /* init the xgi control device */ - { - xgi_info_t *info_ctl = &xgi_ctl_device; - xgi_lock_init(info_ctl); - } - - /* Init the resource manager */ - INIT_LIST_HEAD(&xgi_mempid_list); - if (!xgi_fb_heap_init(info)) - { - XGI_ERROR("xgi_fb_heap_init() failed\n"); - result = -EIO; - goto failed; - } - - /* Init the resource manager */ - if (!xgi_pcie_heap_init(info)) - { - XGI_ERROR("xgi_pcie_heap_init() failed\n"); - result = -EIO; - goto failed; - } - - /* create /proc/driver/xgi */ - xgi_proc_create(); - -#if defined(DEBUG) - inter_module_register("xgi_devices", THIS_MODULE, xgi_devices); -#endif - - return 0; - -failed: -#ifdef CONFIG_DEVFS_FS - XGI_DEVFS_REMOVE_CONTROL(); - XGI_DEVFS_REMOVE_DEVICE(xgi_num_devices); -#endif - - if (XGI_UNREGISTER_CHRDEV(xgi_major, "xgi") < 0) - XGI_ERROR("unregister xgi chrdev failed\n"); - - for (i = 0; i < xgi_num_devices; i++) - { - if (xgi_devices[i].dev) - { - release_mem_region(xgi_devices[i].fb.base, xgi_devices[i].fb.size); - release_mem_region(xgi_devices[i].mmio.base, xgi_devices[i].mmio.size); - } - } - - pci_unregister_driver(&xgi_pci_driver); - return result; - - return 1; -} - -void __exit xgi_exit_module(void) -{ - int i; - xgi_info_t *info, *max_devices; - -#ifdef CONFIG_DEVFS_FS - /* - XGI_DEVFS_REMOVE_CONTROL(); - for (i = 0; i < XGI_MAX_DEVICES; i++) - XGI_DEVFS_REMOVE_DEVICE(i); - */ - XGI_DEVFS_REMOVE_DEVICE(xgi_num_devices); -#endif - - if (XGI_UNREGISTER_CHRDEV(xgi_major, "xgi") < 0) - XGI_ERROR("unregister xgi chrdev failed\n"); - - XGI_INFO("Jong-unregister xgi chrdev scceeded\n"); - for (i = 0; i < XGI_MAX_DEVICES; i++) - { - if (xgi_devices[i].dev) - { - /* clean up the flush2D batch array */ - xgi_cmdlist_cleanup(&xgi_devices[i]); - - if(xgi_devices[i].fb.vbase != NULL) - { - iounmap((void *)xgi_devices[i].fb.vbase); - xgi_devices[i].fb.vbase = NULL; - } - if(xgi_devices[i].mmio.vbase != NULL) - { - iounmap((void *)xgi_devices[i].mmio.vbase); - xgi_devices[i].mmio.vbase = NULL; - } - - //release_mem_region(xgi_devices[i].fb.base, xgi_devices[i].fb.size); - //XGI_INFO("release frame buffer mem region scceeded\n"); - - release_mem_region(xgi_devices[i].mmio.base, xgi_devices[i].mmio.size); - XGI_INFO("release MMIO mem region scceeded\n"); - - xgi_fb_heap_cleanup(&xgi_devices[i]); - XGI_INFO("xgi_fb_heap_cleanup scceeded\n"); - - xgi_pcie_heap_cleanup(&xgi_devices[i]); - XGI_INFO("xgi_pcie_heap_cleanup scceeded\n"); - - XGI_PCI_DISABLE_DEVICE(xgi_devices[i].dev); - } - } - - pci_unregister_driver(&xgi_pci_driver); - - /* remove /proc/driver/xgi */ - xgi_proc_remove(); - -#if defined(DEBUG) - inter_module_unregister("xgi_devices"); -#endif -} - -module_init(xgi_init_module); -module_exit(xgi_exit_module); - -#if defined(XGI_PM_SUPPORT_ACPI) -int xgi_acpi_event(struct pci_dev *dev, u32 state) -{ - return 1; -} - -int xgi_kern_acpi_standby(struct pci_dev *dev, u32 state) -{ - return 1; -} - -int xgi_kern_acpi_resume(struct pci_dev *dev) -{ - return 1; -} -#endif - -MODULE_AUTHOR("Andrea Zhang "); -MODULE_DESCRIPTION("xgi kernel driver for xgi cards"); -MODULE_LICENSE("GPL"); + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ +#include "xgi_types.h" +#include "xgi_linux.h" +#include "xgi_drv.h" +#include "xgi_regs.h" +#include "xgi_pcie.h" +#include "xgi_misc.h" +#include "xgi_cmdlist.h" + +/* for debug */ +static int xgi_temp = 1; +/* + * global parameters + */ +static struct xgi_dev { + u16 vendor; + u16 device; + const char *name; +} xgidev_list[] = { + { + PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XP5, "XP5"}, { + PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XG47, "XG47"}, { + 0, 0, NULL} +}; + +int xgi_major = XGI_DEV_MAJOR; /* xgi reserved major device number. */ + +static int xgi_num_devices = 0; + +xgi_info_t xgi_devices[XGI_MAX_DEVICES]; + +#if defined(XGI_PM_SUPPORT_APM) +static struct pm_dev *apm_xgi_dev[XGI_MAX_DEVICES] = { 0 }; +#endif + +/* add one for the control device */ +xgi_info_t xgi_ctl_device; +wait_queue_head_t xgi_ctl_waitqueue; + +#ifdef CONFIG_PROC_FS +struct proc_dir_entry *proc_xgi; +#endif + +#ifdef CONFIG_DEVFS_FS +devfs_handle_t xgi_devfs_handles[XGI_MAX_DEVICES]; +#endif + +struct list_head xgi_mempid_list; + +/* xgi_ functions.. do not take a state device parameter */ +static int xgi_post_vbios(xgi_ioctl_post_vbios_t * info); +static void xgi_proc_create(void); +static void xgi_proc_remove_all(struct proc_dir_entry *); +static void xgi_proc_remove(void); + +/* xgi_kern_ functions, interfaces used by linux kernel */ +int xgi_kern_probe(struct pci_dev *, const struct pci_device_id *); + +unsigned int xgi_kern_poll(struct file *, poll_table *); +int xgi_kern_ioctl(struct inode *, struct file *, unsigned int, unsigned long); +int xgi_kern_mmap(struct file *, struct vm_area_struct *); +int xgi_kern_open(struct inode *, struct file *); +int xgi_kern_release(struct inode *inode, struct file *filp); + +void xgi_kern_vma_open(struct vm_area_struct *vma); +void xgi_kern_vma_release(struct vm_area_struct *vma); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1)) +struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, + unsigned long address, int *type); +#else +struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, + unsigned long address, int write_access); +#endif + +int xgi_kern_read_card_info(char *, char **, off_t off, int, int *, void *); +int xgi_kern_read_status(char *, char **, off_t off, int, int *, void *); +int xgi_kern_read_pcie_info(char *, char **, off_t off, int, int *, void *); +int xgi_kern_read_version(char *, char **, off_t off, int, int *, void *); + +int xgi_kern_ctl_open(struct inode *, struct file *); +int xgi_kern_ctl_close(struct inode *, struct file *); +unsigned int xgi_kern_ctl_poll(struct file *, poll_table *); + +void xgi_kern_isr_bh(unsigned long); +irqreturn_t xgi_kern_isr(int, void *, struct pt_regs *); + +static void xgi_lock_init(xgi_info_t * info); + +#if defined(XGI_PM_SUPPORT_ACPI) +int xgi_kern_acpi_standby(struct pci_dev *, u32); +int xgi_kern_acpi_resume(struct pci_dev *); +#endif + +/* + * verify access to pci config space wasn't disabled behind our back + * unfortunately, XFree86 enables/disables memory access in pci config space at + * various times (such as restoring initial pci config space settings during vt + * switches or when doing mulicard). As a result, all of our register accesses + * are garbage at this point. add a check to see if access was disabled and + * reenable any such access. + */ +#define XGI_CHECK_PCI_CONFIG(xgi) \ + xgi_check_pci_config(xgi, __LINE__) + +static inline void xgi_check_pci_config(xgi_info_t * info, int line) +{ + unsigned short cmd, flag = 0; + + // don't do this on the control device, only the actual devices + if (info->flags & XGI_FLAG_CONTROL) + return; + + pci_read_config_word(info->dev, PCI_COMMAND, &cmd); + if (!(cmd & PCI_COMMAND_MASTER)) { + XGI_INFO("restoring bus mastering! (%d)\n", line); + cmd |= PCI_COMMAND_MASTER; + flag = 1; + } + + if (!(cmd & PCI_COMMAND_MEMORY)) { + XGI_INFO("restoring MEM access! (%d)\n", line); + cmd |= PCI_COMMAND_MEMORY; + flag = 1; + } + + if (flag) + pci_write_config_word(info->dev, PCI_COMMAND, cmd); +} + +static int xgi_post_vbios(xgi_ioctl_post_vbios_t * info) +{ + return 1; +} + +/* + * struct pci_device_id { + * unsigned int vendor, device; // Vendor and device ID or PCI_ANY_ID + * unsigned int subvendor, subdevice; // Subsystem ID's or PCI_ANY_ID + * unsigned int class, class_mask; // (class,subclass,prog-if) triplet + * unsigned long driver_data; // Data private to the driver + * }; + */ + +static struct pci_device_id xgi_dev_table[] = { + { + .vendor = PCI_VENDOR_ID_XGI, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_DISPLAY_VGA << 8), + .class_mask = ~0, + }, + {} +}; + +/* + * #define MODULE_DEVICE_TABLE(type,name) \ + * MODULE_GENERIC_TABLE(type##_device,name) + */ +MODULE_DEVICE_TABLE(pci, xgi_dev_table); + +/* + * struct pci_driver { + * struct list_head node; + * char *name; + * const struct pci_device_id *id_table; // NULL if wants all devices + * int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); // New device inserted + * void (*remove)(struct pci_dev *dev); // Device removed (NULL if not a hot-plug capable driver) + * int (*save_state)(struct pci_dev *dev, u32 state); // Save Device Context + * int (*suspend)(struct pci_dev *dev, u32 state); // Device suspended + * int (*resume)(struct pci_dev *dev); // Device woken up + * int (*enable_wake)(struct pci_dev *dev, u32 state, int enable); // Enable wake event + * }; + */ +static struct pci_driver xgi_pci_driver = { + .name = "xgi", + .id_table = xgi_dev_table, + .probe = xgi_kern_probe, +#if defined(XGI_SUPPORT_ACPI) + .suspend = xgi_kern_acpi_standby, + .resume = xgi_kern_acpi_resume, +#endif +}; + +/* + * find xgi devices and set initial state + */ +int xgi_kern_probe(struct pci_dev *dev, const struct pci_device_id *id_table) +{ + xgi_info_t *info; + + if ((dev->vendor != PCI_VENDOR_ID_XGI) + || (dev->class != (PCI_CLASS_DISPLAY_VGA << 8))) { + return -1; + } + + if (xgi_num_devices == XGI_MAX_DEVICES) { + XGI_INFO("maximum device number (%d) reached!\n", + xgi_num_devices); + return -1; + } + + /* enable io, mem, and bus-mastering in pci config space */ + if (pci_enable_device(dev) != 0) { + XGI_INFO("pci_enable_device failed, aborting\n"); + return -1; + } + + XGI_INFO("maximum device number (%d) reached \n", xgi_num_devices); + + pci_set_master(dev); + + info = &xgi_devices[xgi_num_devices]; + info->dev = dev; + info->vendor_id = dev->vendor; + info->device_id = dev->device; + info->bus = dev->bus->number; + info->slot = PCI_SLOT((dev)->devfn); + + xgi_lock_init(info); + + info->mmio.base = XGI_PCI_RESOURCE_START(dev, 1); + info->mmio.size = XGI_PCI_RESOURCE_SIZE(dev, 1); + + /* check IO region */ + if (!request_mem_region(info->mmio.base, info->mmio.size, "xgi")) { + XGI_ERROR("cannot reserve MMIO memory\n"); + goto error_disable_dev; + } + + XGI_INFO("info->mmio.base: 0x%lx \n", info->mmio.base); + XGI_INFO("info->mmio.size: 0x%lx \n", info->mmio.size); + + info->mmio.vbase = (unsigned char *)ioremap_nocache(info->mmio.base, + info->mmio.size); + if (!info->mmio.vbase) { + release_mem_region(info->mmio.base, info->mmio.size); + XGI_ERROR("info->mmio.vbase failed\n"); + goto error_disable_dev; + } + xgi_enable_mmio(info); + + //xgi_enable_ge(info); + + XGI_INFO("info->mmio.vbase: 0x%p \n", info->mmio.vbase); + + info->fb.base = XGI_PCI_RESOURCE_START(dev, 0); + info->fb.size = XGI_PCI_RESOURCE_SIZE(dev, 0); + + XGI_INFO("info->fb.base: 0x%lx \n", info->fb.base); + XGI_INFO("info->fb.size: 0x%lx \n", info->fb.size); + + info->fb.size = bIn3cf(0x54) * 8 * 1024 * 1024; + XGI_INFO("info->fb.size: 0x%lx \n", info->fb.size); + + /* check frame buffer region + if (!request_mem_region(info->fb.base, info->fb.size, "xgi")) + { + release_mem_region(info->mmio.base, info->mmio.size); + XGI_ERROR("cannot reserve frame buffer memory\n"); + goto error_disable_dev; + } + + info->fb.vbase = (unsigned char *)ioremap_nocache(info->fb.base, + info->fb.size); + + if (!info->fb.vbase) + { + release_mem_region(info->mmio.base, info->mmio.size); + release_mem_region(info->fb.base, info->fb.size); + XGI_ERROR("info->fb.vbase failed\n"); + goto error_disable_dev; + } + */ + info->fb.vbase = NULL; + XGI_INFO("info->fb.vbase: 0x%p \n", info->fb.vbase); + + info->irq = dev->irq; + + /* check common error condition */ + if (info->irq == 0) { + XGI_ERROR("Can't find an IRQ for your XGI card! \n"); + goto error_zero_dev; + } + XGI_INFO("info->irq: %lx \n", info->irq); + + //xgi_enable_dvi_interrupt(info); + + /* sanity check the IO apertures */ + if ((info->mmio.base == 0) || (info->mmio.size == 0) + || (info->fb.base == 0) || (info->fb.size == 0)) { + XGI_ERROR("The IO regions for your XGI card are invalid.\n"); + + if ((info->mmio.base == 0) || (info->mmio.size == 0)) { + XGI_ERROR("mmio appears to be wrong: 0x%lx 0x%lx\n", + info->mmio.base, info->mmio.size); + } + + if ((info->fb.base == 0) || (info->fb.size == 0)) { + XGI_ERROR + ("frame buffer appears to be wrong: 0x%lx 0x%lx\n", + info->fb.base, info->fb.size); + } + + goto error_zero_dev; + } + //xgi_num_devices++; + + return 0; + + error_zero_dev: + release_mem_region(info->fb.base, info->fb.size); + release_mem_region(info->mmio.base, info->mmio.size); + + error_disable_dev: + pci_disable_device(dev); + return -1; + +} + +/* + * vma operations... + * this is only called when the vmas are duplicated. this + * appears to only happen when the process is cloned to create + * a new process, and not when the process is threaded. + * + * increment the usage count for the physical pages, so when + * this clone unmaps the mappings, the pages are not + * deallocated under the original process. + */ +struct vm_operations_struct xgi_vm_ops = { + .open = xgi_kern_vma_open, + .close = xgi_kern_vma_release, + .nopage = xgi_kern_vma_nopage, +}; + +void xgi_kern_vma_open(struct vm_area_struct *vma) +{ + XGI_INFO("VM: vma_open for 0x%lx - 0x%lx, offset 0x%lx\n", + vma->vm_start, vma->vm_end, XGI_VMA_OFFSET(vma)); + + if (XGI_VMA_PRIVATE(vma)) { + xgi_pcie_block_t *block = + (xgi_pcie_block_t *) XGI_VMA_PRIVATE(vma); + XGI_ATOMIC_INC(block->use_count); + } +} + +void xgi_kern_vma_release(struct vm_area_struct *vma) +{ + XGI_INFO("VM: vma_release for 0x%lx - 0x%lx, offset 0x%lx\n", + vma->vm_start, vma->vm_end, XGI_VMA_OFFSET(vma)); + + if (XGI_VMA_PRIVATE(vma)) { + xgi_pcie_block_t *block = + (xgi_pcie_block_t *) XGI_VMA_PRIVATE(vma); + XGI_ATOMIC_DEC(block->use_count); + + /* + * if use_count is down to 0, the kernel virtual mapping was freed + * but the underlying physical pages were not, we need to clear the + * bit and free the physical pages. + */ + if (XGI_ATOMIC_READ(block->use_count) == 0) { + // Need TO Finish + XGI_VMA_PRIVATE(vma) = NULL; + } + } +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1)) +struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, + unsigned long address, int *type) +{ + xgi_pcie_block_t *block = (xgi_pcie_block_t *) XGI_VMA_PRIVATE(vma); + struct page *page = NOPAGE_SIGBUS; + unsigned long offset = 0; + unsigned long page_addr = 0; +/* + XGI_INFO("VM: mmap([0x%lx-0x%lx] off=0x%lx) address: 0x%lx \n", + vma->vm_start, + vma->vm_end, + XGI_VMA_OFFSET(vma), + address); +*/ + offset = (address - vma->vm_start) + XGI_VMA_OFFSET(vma); + + offset = offset - block->bus_addr; + + offset >>= PAGE_SHIFT; + + page_addr = block->page_table[offset].virt_addr; + + if (xgi_temp) { + XGI_INFO("block->bus_addr: 0x%lx block->hw_addr: 0x%lx" + "block->page_count: 0x%lx block->page_order: 0x%lx" + "block->page_table[0x%lx].virt_addr: 0x%lx\n", + block->bus_addr, block->hw_addr, + block->page_count, block->page_order, + offset, block->page_table[offset].virt_addr); + xgi_temp = 0; + } + + if (!page_addr) + goto out; /* hole or end-of-file */ + page = virt_to_page(page_addr); + + /* got it, now increment the count */ + get_page(page); + out: + return page; + +} +#else +struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, + unsigned long address, int write_access) +{ + xgi_pcie_block_t *block = (xgi_pcie_block_t *) XGI_VMA_PRIVATE(vma); + struct page *page = NOPAGE_SIGBUS; + unsigned long offset = 0; + unsigned long page_addr = 0; +/* + XGI_INFO("VM: mmap([0x%lx-0x%lx] off=0x%lx) address: 0x%lx \n", + vma->vm_start, + vma->vm_end, + XGI_VMA_OFFSET(vma), + address); +*/ + offset = (address - vma->vm_start) + XGI_VMA_OFFSET(vma); + + offset = offset - block->bus_addr; + + offset >>= PAGE_SHIFT; + + page_addr = block->page_table[offset].virt_addr; + + if (xgi_temp) { + XGI_INFO("block->bus_addr: 0x%lx block->hw_addr: 0x%lx" + "block->page_count: 0x%lx block->page_order: 0x%lx" + "block->page_table[0x%lx].virt_addr: 0x%lx\n", + block->bus_addr, block->hw_addr, + block->page_count, block->page_order, + offset, block->page_table[offset].virt_addr); + xgi_temp = 0; + } + + if (!page_addr) + goto out; /* hole or end-of-file */ + page = virt_to_page(page_addr); + + /* got it, now increment the count */ + get_page(page); + out: + return page; +} +#endif + +#if 0 +static struct file_operations xgi_fops = { + /* owner: THIS_MODULE, */ + poll:xgi_kern_poll, + ioctl:xgi_kern_ioctl, + mmap:xgi_kern_mmap, + open:xgi_kern_open, + release:xgi_kern_release, +}; +#endif + +static struct file_operations xgi_fops = { + .owner = THIS_MODULE, + .poll = xgi_kern_poll, + .ioctl = xgi_kern_ioctl, + .mmap = xgi_kern_mmap, + .open = xgi_kern_open, + .release = xgi_kern_release, +}; + +static xgi_file_private_t *xgi_alloc_file_private(void) +{ + xgi_file_private_t *fp; + + XGI_KMALLOC(fp, sizeof(xgi_file_private_t)); + if (!fp) + return NULL; + + memset(fp, 0, sizeof(xgi_file_private_t)); + + /* initialize this file's event queue */ + init_waitqueue_head(&fp->wait_queue); + + xgi_init_lock(fp->fp_lock); + + return fp; +} + +static void xgi_free_file_private(xgi_file_private_t * fp) +{ + if (fp == NULL) + return; + + XGI_KFREE(fp, sizeof(xgi_file_private_t)); +} + +int xgi_kern_open(struct inode *inode, struct file *filp) +{ + xgi_info_t *info = NULL; + int dev_num; + int result = 0, status; + + /* + * the type and num values are only valid if we are not using devfs. + * However, since we use them to retrieve the device pointer, we + * don't need them with devfs as filp->private_data is already + * initialized + */ + filp->private_data = xgi_alloc_file_private(); + if (filp->private_data == NULL) + return -ENOMEM; + + XGI_INFO("filp->private_data %p\n", filp->private_data); + /* + * for control device, just jump to its open routine + * after setting up the private data + */ + if (XGI_IS_CONTROL_DEVICE(inode)) + return xgi_kern_ctl_open(inode, filp); + + /* what device are we talking about? */ + dev_num = XGI_DEVICE_NUMBER(inode); + if (dev_num >= XGI_MAX_DEVICES) { + xgi_free_file_private(filp->private_data); + filp->private_data = NULL; + return -ENODEV; + } + + info = &xgi_devices[dev_num]; + + XGI_INFO("Jong-xgi_kern_open on device %d\n", dev_num); + + xgi_down(info->info_sem); + XGI_CHECK_PCI_CONFIG(info); + + XGI_INFO_FROM_FP(filp) = info; + + /* + * map the memory and allocate isr on first open + */ + + if (!(info->flags & XGI_FLAG_OPEN)) { + XGI_INFO("info->flags & XGI_FLAG_OPEN \n"); + + if (info->device_id == 0) { + XGI_INFO("open of nonexistent device %d\n", dev_num); + result = -ENXIO; + goto failed; + } + + /* initialize struct irqaction */ + status = request_irq(info->irq, xgi_kern_isr, + SA_INTERRUPT | SA_SHIRQ, "xgi", + (void *)info); + if (status != 0) { + if (info->irq && (status == -EBUSY)) { + XGI_ERROR + ("Tried to get irq %d, but another driver", + (unsigned int)info->irq); + XGI_ERROR("has it and is not sharing it.\n"); + } + XGI_ERROR("isr request failed 0x%x\n", status); + result = -EIO; + goto failed; + } + + /* + * #define DECLARE_TASKLET(name, func, data) \ + * struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } + */ + info->tasklet.func = xgi_kern_isr_bh; + info->tasklet.data = (unsigned long)info; + tasklet_enable(&info->tasklet); + + /* Alloc 1M bytes for cmdbuffer which is flush2D batch array */ + xgi_cmdlist_initialize(info, 0x100000); + + info->flags |= XGI_FLAG_OPEN; + } + + XGI_ATOMIC_INC(info->use_count); + + failed: + xgi_up(info->info_sem); + + if ((result) && filp->private_data) { + xgi_free_file_private(filp->private_data); + filp->private_data = NULL; + } + + return result; +} + +int xgi_kern_release(struct inode *inode, struct file *filp) +{ + xgi_info_t *info = XGI_INFO_FROM_FP(filp); + + XGI_CHECK_PCI_CONFIG(info); + + /* + * for control device, just jump to its open routine + * after setting up the private data + */ + if (XGI_IS_CONTROL_DEVICE(inode)) + return xgi_kern_ctl_close(inode, filp); + + XGI_INFO("Jong-xgi_kern_release on device %d\n", + XGI_DEVICE_NUMBER(inode)); + + xgi_down(info->info_sem); + if (XGI_ATOMIC_DEC_AND_TEST(info->use_count)) { + + /* + * The usage count for this device has dropped to zero, it can be shut + * down safely; disable its interrupts. + */ + + /* + * Disable this device's tasklet to make sure that no bottom half will + * run with undefined device state. + */ + tasklet_disable(&info->tasklet); + + /* + * Free the IRQ, which may block until all pending interrupt processing + * has completed. + */ + free_irq(info->irq, (void *)info); + + xgi_cmdlist_cleanup(info); + + /* leave INIT flag alone so we don't reinit every time */ + info->flags &= ~XGI_FLAG_OPEN; + } + + xgi_up(info->info_sem); + + if (FILE_PRIVATE(filp)) { + xgi_free_file_private(FILE_PRIVATE(filp)); + FILE_PRIVATE(filp) = NULL; + } + + return 0; +} + +int xgi_kern_mmap(struct file *filp, struct vm_area_struct *vma) +{ + //struct inode *inode = INODE_FROM_FP(filp); + xgi_info_t *info = XGI_INFO_FROM_FP(filp); + xgi_pcie_block_t *block; + int pages = 0; + unsigned long prot; + + XGI_INFO("Jong-VM: mmap([0x%lx-0x%lx] off=0x%lx)\n", + vma->vm_start, vma->vm_end, XGI_VMA_OFFSET(vma)); + + XGI_CHECK_PCI_CONFIG(info); + + if (XGI_MASK_OFFSET(vma->vm_start) + || XGI_MASK_OFFSET(vma->vm_end)) { + XGI_ERROR("VM: bad mmap range: %lx - %lx\n", + vma->vm_start, vma->vm_end); + return -ENXIO; + } + + pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + + vma->vm_ops = &xgi_vm_ops; + + /* XGI IO(reg) space */ + if (IS_IO_OFFSET + (info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) { + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + if (XGI_REMAP_PAGE_RANGE(vma->vm_start, + XGI_VMA_OFFSET(vma), + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) + return -EAGAIN; + + /* mark it as IO so that we don't dump it on core dump */ + vma->vm_flags |= VM_IO; + XGI_INFO("VM: mmap io space \n"); + } + /* XGI fb space */ + /* Jong 06/14/2006; moved behind PCIE or modify IS_FB_OFFSET */ + else if (IS_FB_OFFSET + (info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) { + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + if (XGI_REMAP_PAGE_RANGE(vma->vm_start, + XGI_VMA_OFFSET(vma), + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) + return -EAGAIN; + + // mark it as IO so that we don't dump it on core dump + vma->vm_flags |= VM_IO; + XGI_INFO("VM: mmap fb space \n"); + } + /* PCIE allocator */ + /* XGI_VMA_OFFSET(vma) is offset based on pcie.base (HW address space) */ + else if (IS_PCIE_OFFSET + (info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) { + xgi_down(info->pcie_sem); + + block = + (xgi_pcie_block_t *) xgi_find_pcie_block(info, + XGI_VMA_OFFSET + (vma)); + + if (block == NULL) { + XGI_ERROR("couldn't find pre-allocated PCIE memory!\n"); + xgi_up(info->pcie_sem); + return -EAGAIN; + } + + if (block->page_count != pages) { + XGI_ERROR + ("pre-allocated PCIE memory has wrong number of pages!\n"); + xgi_up(info->pcie_sem); + return -EAGAIN; + } + + vma->vm_private_data = block; + XGI_ATOMIC_INC(block->use_count); + xgi_up(info->pcie_sem); + + /* + * prevent the swapper from swapping it out + * mark the memory i/o so the buffers aren't + * dumped on core dumps */ + vma->vm_flags |= (VM_LOCKED | VM_IO); + + /* un-cached */ + prot = pgprot_val(vma->vm_page_prot); + /* + if (boot_cpu_data.x86 > 3) + prot |= _PAGE_PCD | _PAGE_PWT; + */ + vma->vm_page_prot = __pgprot(prot); + + XGI_INFO("VM: mmap pcie space \n"); + } +#if 0 + else if (IS_FB_OFFSET + (info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) { + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + if (XGI_REMAP_PAGE_RANGE(vma->vm_start, + XGI_VMA_OFFSET(vma), + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) + return -EAGAIN; + + // mark it as IO so that we don't dump it on core dump + vma->vm_flags |= VM_IO; + XGI_INFO("VM: mmap fb space \n"); + } +#endif + else { + vma->vm_flags |= (VM_IO | VM_LOCKED); + XGI_ERROR("VM: mmap wrong range \n"); + } + + vma->vm_file = filp; + + return 0; +} + +unsigned int xgi_kern_poll(struct file *filp, struct poll_table_struct *wait) +{ + xgi_file_private_t *fp; + xgi_info_t *info; + unsigned int mask = 0; + unsigned long eflags; + + info = XGI_INFO_FROM_FP(filp); + + if (info->device_number == XGI_CONTROL_DEVICE_NUMBER) + return xgi_kern_ctl_poll(filp, wait); + + fp = XGI_GET_FP(filp); + + if (!(filp->f_flags & O_NONBLOCK)) { + /* add us to the list */ + poll_wait(filp, &fp->wait_queue, wait); + } + + xgi_lock_irqsave(fp->fp_lock, eflags); + + /* wake the user on any event */ + if (fp->num_events) { + XGI_INFO("Hey, an event occured!\n"); + /* + * trigger the client, when they grab the event, + * we'll decrement the event count + */ + mask |= (POLLPRI | POLLIN); + } + xgi_unlock_irqsave(fp->fp_lock, eflags); + + return mask; +} + +int xgi_kern_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + xgi_info_t *info; + xgi_mem_alloc_t *alloc = NULL; + + int status = 0; + void *arg_copy; + int arg_size; + int err = 0; + + info = XGI_INFO_FROM_FP(filp); + + XGI_INFO("Jong-ioctl(0x%x, 0x%x, 0x%lx, 0x%x)\n", _IOC_TYPE(cmd), + _IOC_NR(cmd), arg, _IOC_SIZE(cmd)); + /* + * extract the type and number bitfields, and don't decode + * wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok() + */ + if (_IOC_TYPE(cmd) != XGI_IOCTL_MAGIC) + return -ENOTTY; + if (_IOC_NR(cmd) > XGI_IOCTL_MAXNR) + return -ENOTTY; + + /* + * the direction is a bitmask, and VERIFY_WRITE catches R/W + * transfers. `Type' is user-oriented, while + * access_ok is kernel-oriented, so the concept of "read" and + * "write" is reversed + */ + if (_IOC_DIR(cmd) & _IOC_READ) { + err = !access_ok(VERIFY_WRITE, (void *)arg, _IOC_SIZE(cmd)); + } else if (_IOC_DIR(cmd) & _IOC_WRITE) { + err = !access_ok(VERIFY_READ, (void *)arg, _IOC_SIZE(cmd)); + } + if (err) + return -EFAULT; + + XGI_CHECK_PCI_CONFIG(info); + + arg_size = _IOC_SIZE(cmd); + XGI_KMALLOC(arg_copy, arg_size); + if (arg_copy == NULL) { + XGI_ERROR("failed to allocate ioctl memory\n"); + return -ENOMEM; + } + + /* Jong 05/25/2006 */ + /* copy_from_user(arg_copy, (void *)arg, arg_size); */ + if (copy_from_user(arg_copy, (void *)arg, arg_size)) { + XGI_ERROR("failed to copyin ioctl data\n"); + XGI_INFO("Jong-copy_from_user-fail! \n"); + } else + XGI_INFO("Jong-copy_from_user-OK! \n"); + + alloc = (xgi_mem_alloc_t *) arg_copy; + XGI_INFO("Jong-succeeded in copy_from_user 0x%lx, 0x%x bytes.\n", arg, + arg_size); + + switch (_IOC_NR(cmd)) { + case XGI_ESC_DEVICE_INFO: + XGI_INFO("Jong-xgi_ioctl_get_device_info \n"); + xgi_get_device_info(info, (struct xgi_chip_info_s *)arg_copy); + break; + case XGI_ESC_POST_VBIOS: + XGI_INFO("Jong-xgi_ioctl_post_vbios \n"); + break; + case XGI_ESC_FB_ALLOC: + XGI_INFO("Jong-xgi_ioctl_fb_alloc \n"); + xgi_fb_alloc(info, (struct xgi_mem_req_s *)arg_copy, alloc); + break; + case XGI_ESC_FB_FREE: + XGI_INFO("Jong-xgi_ioctl_fb_free \n"); + xgi_fb_free(info, *(unsigned long *)arg_copy); + break; + case XGI_ESC_MEM_COLLECT: + XGI_INFO("Jong-xgi_ioctl_mem_collect \n"); + xgi_mem_collect(info, (unsigned int *)arg_copy); + break; + case XGI_ESC_PCIE_ALLOC: + XGI_INFO("Jong-xgi_ioctl_pcie_alloc \n"); + xgi_pcie_alloc(info, ((xgi_mem_req_t *) arg_copy)->size, + ((xgi_mem_req_t *) arg_copy)->owner, alloc); + break; + case XGI_ESC_PCIE_FREE: + XGI_INFO("Jong-xgi_ioctl_pcie_free: bus_addr = 0x%lx \n", + *((unsigned long *)arg_copy)); + xgi_pcie_free(info, *((unsigned long *)arg_copy)); + break; + case XGI_ESC_PCIE_CHECK: + XGI_INFO("Jong-xgi_pcie_heap_check \n"); + xgi_pcie_heap_check(); + break; + case XGI_ESC_GET_SCREEN_INFO: + XGI_INFO("Jong-xgi_get_screen_info \n"); + xgi_get_screen_info(info, (struct xgi_screen_info_s *)arg_copy); + break; + case XGI_ESC_PUT_SCREEN_INFO: + XGI_INFO("Jong-xgi_put_screen_info \n"); + xgi_put_screen_info(info, (struct xgi_screen_info_s *)arg_copy); + break; + case XGI_ESC_MMIO_INFO: + XGI_INFO("Jong-xgi_ioctl_get_mmio_info \n"); + xgi_get_mmio_info(info, (struct xgi_mmio_info_s *)arg_copy); + break; + case XGI_ESC_GE_RESET: + XGI_INFO("Jong-xgi_ioctl_ge_reset \n"); + xgi_ge_reset(info); + break; + case XGI_ESC_SAREA_INFO: + XGI_INFO("Jong-xgi_ioctl_sarea_info \n"); + xgi_sarea_info(info, (struct xgi_sarea_info_s *)arg_copy); + break; + case XGI_ESC_DUMP_REGISTER: + XGI_INFO("Jong-xgi_ioctl_dump_register \n"); + xgi_dump_register(info); + break; + case XGI_ESC_DEBUG_INFO: + XGI_INFO("Jong-xgi_ioctl_restore_registers \n"); + xgi_restore_registers(info); + //xgi_write_pcie_mem(info, (struct xgi_mem_req_s *) arg_copy); + //xgi_read_pcie_mem(info, (struct xgi_mem_req_s *) arg_copy); + break; + case XGI_ESC_SUBMIT_CMDLIST: + XGI_INFO("Jong-xgi_ioctl_submit_cmdlist \n"); + xgi_submit_cmdlist(info, (xgi_cmd_info_t *) arg_copy); + break; + case XGI_ESC_TEST_RWINKERNEL: + XGI_INFO("Jong-xgi_test_rwinkernel \n"); + xgi_test_rwinkernel(info, *(unsigned long *)arg_copy); + break; + case XGI_ESC_STATE_CHANGE: + XGI_INFO("Jong-xgi_state_change \n"); + xgi_state_change(info, (xgi_state_info_t *) arg_copy); + break; + case XGI_ESC_CPUID: + XGI_INFO("Jong-XGI_ESC_CPUID \n"); + xgi_get_cpu_id((struct cpu_info_s *)arg_copy); + break; + default: + XGI_INFO("Jong-xgi_ioctl_default \n"); + status = -EINVAL; + break; + } + + if (copy_to_user((void *)arg, arg_copy, arg_size)) { + XGI_ERROR("failed to copyout ioctl data\n"); + XGI_INFO("Jong-copy_to_user-fail! \n"); + } else + XGI_INFO("Jong-copy_to_user-OK! \n"); + + XGI_KFREE(arg_copy, arg_size); + return status; +} + +/* + * xgi control driver operations defined here + */ +int xgi_kern_ctl_open(struct inode *inode, struct file *filp) +{ + xgi_info_t *info = &xgi_ctl_device; + + int rc = 0; + + XGI_INFO("Jong-xgi_kern_ctl_open\n"); + + xgi_down(info->info_sem); + info->device_number = XGI_CONTROL_DEVICE_NUMBER; + + /* save the xgi info in file->private_data */ + filp->private_data = info; + + if (XGI_ATOMIC_READ(info->use_count) == 0) { + init_waitqueue_head(&xgi_ctl_waitqueue); + } + + info->flags |= XGI_FLAG_OPEN + XGI_FLAG_CONTROL; + + XGI_ATOMIC_INC(info->use_count); + xgi_up(info->info_sem); + + return rc; +} + +int xgi_kern_ctl_close(struct inode *inode, struct file *filp) +{ + xgi_info_t *info = XGI_INFO_FROM_FP(filp); + + XGI_INFO("Jong-xgi_kern_ctl_close\n"); + + xgi_down(info->info_sem); + if (XGI_ATOMIC_DEC_AND_TEST(info->use_count)) { + info->flags = 0; + } + xgi_up(info->info_sem); + + if (FILE_PRIVATE(filp)) { + xgi_free_file_private(FILE_PRIVATE(filp)); + FILE_PRIVATE(filp) = NULL; + } + + return 0; +} + +unsigned int xgi_kern_ctl_poll(struct file *filp, poll_table * wait) +{ + //xgi_info_t *info = XGI_INFO_FROM_FP(filp);; + unsigned int ret = 0; + + if (!(filp->f_flags & O_NONBLOCK)) { + poll_wait(filp, &xgi_ctl_waitqueue, wait); + } + + return ret; +} + +/* + * xgi proc system + */ +static u8 xgi_find_pcie_capability(struct pci_dev *dev) +{ + u16 status; + u8 cap_ptr, cap_id; + + pci_read_config_word(dev, PCI_STATUS, &status); + status &= PCI_STATUS_CAP_LIST; + if (!status) + return 0; + + switch (dev->hdr_type) { + case PCI_HEADER_TYPE_NORMAL: + case PCI_HEADER_TYPE_BRIDGE: + pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &cap_ptr); + break; + default: + return 0; + } + + do { + cap_ptr &= 0xFC; + pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_ID, &cap_id); + pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_NEXT, + &cap_ptr); + } while (cap_ptr && cap_id != 0xFF); + + return 0; +} + +static struct pci_dev *xgi_get_pci_device(xgi_info_t * info) +{ + struct pci_dev *dev; + + dev = XGI_PCI_GET_DEVICE(info->vendor_id, info->device_id, NULL); + while (dev) { + if (XGI_PCI_SLOT_NUMBER(dev) == info->slot + && XGI_PCI_BUS_NUMBER(dev) == info->bus) + return dev; + dev = XGI_PCI_GET_DEVICE(info->vendor_id, info->device_id, dev); + } + + return NULL; +} + +int xgi_kern_read_card_info(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + struct pci_dev *dev; + char *type; + int len = 0; + + xgi_info_t *info; + info = (xgi_info_t *) data; + + dev = xgi_get_pci_device(info); + if (!dev) + return 0; + + type = xgi_find_pcie_capability(dev) ? "PCIE" : "PCI"; + len += sprintf(page + len, "Card Type: \t %s\n", type); + + XGI_PCI_DEV_PUT(dev); + return len; +} + +int xgi_kern_read_version(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + int len = 0; + + len += sprintf(page + len, "XGI version: %s\n", "1.0"); + len += sprintf(page + len, "GCC version: %s\n", "3.0"); + + return len; +} + +int xgi_kern_read_pcie_info(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + return 0; +} + +int xgi_kern_read_status(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + return 0; +} + +static void xgi_proc_create(void) +{ +#ifdef CONFIG_PROC_FS + + struct pci_dev *dev; + int i = 0; + char name[6]; + + struct proc_dir_entry *entry; + struct proc_dir_entry *proc_xgi_pcie, *proc_xgi_cards; + + xgi_info_t *info; + xgi_info_t *xgi_max_devices; + + /* world readable directory */ + int flags = S_IFDIR | S_IRUGO | S_IXUGO; + + proc_xgi = create_proc_entry("xgi", flags, proc_root_driver); + if (!proc_xgi) + goto failed; + + proc_xgi_cards = create_proc_entry("cards", flags, proc_xgi); + if (!proc_xgi_cards) + goto failed; + + proc_xgi_pcie = create_proc_entry("pcie", flags, proc_xgi); + if (!proc_xgi_pcie) + goto failed; + + /* + * Set the module owner to ensure that the reference + * count reflects accesses to the proc files. + */ + proc_xgi->owner = THIS_MODULE; + proc_xgi_cards->owner = THIS_MODULE; + proc_xgi_pcie->owner = THIS_MODULE; + + xgi_max_devices = xgi_devices + XGI_MAX_DEVICES; + for (info = xgi_devices; info < xgi_max_devices; info++) { + if (info->device_id == 0) + break; + + /* world readable file */ + flags = S_IFREG | S_IRUGO; + + dev = xgi_get_pci_device(info); + if (!dev) + break; + + sprintf(name, "%d", i++); + entry = create_proc_entry(name, flags, proc_xgi_cards); + if (!entry) { + XGI_PCI_DEV_PUT(dev); + goto failed; + } + + entry->data = info; + entry->read_proc = xgi_kern_read_card_info; + entry->owner = THIS_MODULE; + + if (xgi_find_pcie_capability(dev)) { + entry = + create_proc_entry("status", flags, proc_xgi_pcie); + if (!entry) { + XGI_PCI_DEV_PUT(dev); + goto failed; + } + + entry->data = info; + entry->read_proc = xgi_kern_read_status; + entry->owner = THIS_MODULE; + + entry = create_proc_entry("card", flags, proc_xgi_pcie); + if (!entry) { + XGI_PCI_DEV_PUT(dev); + goto failed; + } + + entry->data = info; + entry->read_proc = xgi_kern_read_pcie_info; + entry->owner = THIS_MODULE; + } + + XGI_PCI_DEV_PUT(dev); + } + + entry = create_proc_entry("version", flags, proc_xgi); + if (!entry) + goto failed; + + entry->read_proc = xgi_kern_read_version; + entry->owner = THIS_MODULE; + + entry = create_proc_entry("host-bridge", flags, proc_xgi_pcie); + if (!entry) + goto failed; + + entry->data = NULL; + entry->read_proc = xgi_kern_read_pcie_info; + entry->owner = THIS_MODULE; + + return; + + failed: + XGI_ERROR("failed to create /proc entries!\n"); + xgi_proc_remove_all(proc_xgi); +#endif +} + +#ifdef CONFIG_PROC_FS +static void xgi_proc_remove_all(struct proc_dir_entry *entry) +{ + while (entry) { + struct proc_dir_entry *next = entry->next; + if (entry->subdir) + xgi_proc_remove_all(entry->subdir); + remove_proc_entry(entry->name, entry->parent); + if (entry == proc_xgi) + break; + entry = next; + } +} +#endif + +static void xgi_proc_remove(void) +{ +#ifdef CONFIG_PROC_FS + xgi_proc_remove_all(proc_xgi); +#endif +} + +/* + * driver receives an interrupt if someone waiting, then hand it off. + */ +irqreturn_t xgi_kern_isr(int irq, void *dev_id, struct pt_regs *regs) +{ + xgi_info_t *info = (xgi_info_t *) dev_id; + u32 need_to_run_bottom_half = 0; + + //XGI_INFO("xgi_kern_isr \n"); + + //XGI_CHECK_PCI_CONFIG(info); + + //xgi_dvi_irq_handler(info); + + if (need_to_run_bottom_half) { + tasklet_schedule(&info->tasklet); + } + + return IRQ_HANDLED; +} + +void xgi_kern_isr_bh(unsigned long data) +{ + xgi_info_t *info = (xgi_info_t *) data; + + XGI_INFO("xgi_kern_isr_bh \n"); + + //xgi_dvi_irq_handler(info); + + XGI_CHECK_PCI_CONFIG(info); +} + +static void xgi_lock_init(xgi_info_t * info) +{ + if (info == NULL) + return; + + spin_lock_init(&info->info_lock); + + sema_init(&info->info_sem, 1); + sema_init(&info->fb_sem, 1); + sema_init(&info->pcie_sem, 1); + + XGI_ATOMIC_SET(info->use_count, 0); +} + +static void xgi_dev_init(xgi_info_t * info) +{ + struct pci_dev *pdev = NULL; + struct xgi_dev *dev; + int found = 0; + u16 pci_cmd; + + XGI_INFO("Enter xgi_dev_init \n"); + + //XGI_PCI_FOR_EACH_DEV(pdev) + { + for (dev = xgidev_list; dev->vendor; dev++) { + if ((dev->vendor == pdev->vendor) + && (dev->device == pdev->device)) { + XGI_INFO("dev->vendor = pdev->vendor= %x \n", + dev->vendor); + XGI_INFO("dev->device = pdev->device= %x \n", + dev->device); + + xgi_devices[found].device_id = pdev->device; + + pci_read_config_byte(pdev, PCI_REVISION_ID, + &xgi_devices[found]. + revision_id); + + XGI_INFO("PCI_REVISION_ID= %x \n", + xgi_devices[found].revision_id); + + pci_read_config_word(pdev, PCI_COMMAND, + &pci_cmd); + + XGI_INFO("PCI_COMMAND = %x \n", pci_cmd); + + break; + } + } + } +} + +/* + * Export to Linux Kernel + */ + +static int __init xgi_init_module(void) +{ + xgi_info_t *info = &xgi_devices[xgi_num_devices]; + int i, result; + + XGI_INFO("Jong-xgi kernel driver %s initializing\n", XGI_DRV_VERSION); + //SET_MODULE_OWNER(&xgi_fops); + + memset(xgi_devices, 0, sizeof(xgi_devices)); + + if (pci_register_driver(&xgi_pci_driver) < 0) { + pci_unregister_driver(&xgi_pci_driver); + XGI_ERROR("no XGI graphics adapter found\n"); + return -ENODEV; + } + + XGI_INFO("Jong-xgi_devices[%d].fb.base.: 0x%lx \n", xgi_num_devices, + xgi_devices[xgi_num_devices].fb.base); + XGI_INFO("Jong-xgi_devices[%d].fb.size.: 0x%lx \n", xgi_num_devices, + xgi_devices[xgi_num_devices].fb.size); + +/* Jong 07/27/2006; test for ubuntu */ +/* +#ifdef CONFIG_DEVFS_FS + + XGI_INFO("Jong-Use devfs \n"); + do + { + xgi_devfs_handles[0] = XGI_DEVFS_REGISTER("xgi", 0); + if (xgi_devfs_handles[0] == NULL) + { + result = -ENOMEM; + XGI_ERROR("devfs register failed\n"); + goto failed; + } + } while(0); + #else *//* no devfs, do it the "classic" way */ + + XGI_INFO("Jong-Use non-devfs \n"); + /* + * Register your major, and accept a dynamic number. This is the + * first thing to do, in order to avoid releasing other module's + * fops in scull_cleanup_module() + */ + result = XGI_REGISTER_CHRDEV(xgi_major, "xgi", &xgi_fops); + if (result < 0) { + XGI_ERROR("register chrdev failed\n"); + pci_unregister_driver(&xgi_pci_driver); + return result; + } + if (xgi_major == 0) + xgi_major = result; /* dynamic */ + + /* #endif *//* CONFIG_DEVFS_FS */ + + XGI_INFO("Jong-major number %d\n", xgi_major); + + /* instantiate tasklets */ + for (i = 0; i < XGI_MAX_DEVICES; i++) { + /* + * We keep one tasklet per card to avoid latency issues with more + * than one device; no two instances of a single tasklet are ever + * executed concurrently. + */ + XGI_ATOMIC_SET(xgi_devices[i].tasklet.count, 1); + } + + /* init the xgi control device */ + { + xgi_info_t *info_ctl = &xgi_ctl_device; + xgi_lock_init(info_ctl); + } + + /* Init the resource manager */ + INIT_LIST_HEAD(&xgi_mempid_list); + if (!xgi_fb_heap_init(info)) { + XGI_ERROR("xgi_fb_heap_init() failed\n"); + result = -EIO; + goto failed; + } + + /* Init the resource manager */ + if (!xgi_pcie_heap_init(info)) { + XGI_ERROR("xgi_pcie_heap_init() failed\n"); + result = -EIO; + goto failed; + } + + /* create /proc/driver/xgi */ + xgi_proc_create(); + +#if defined(DEBUG) + inter_module_register("xgi_devices", THIS_MODULE, xgi_devices); +#endif + + return 0; + + failed: +#ifdef CONFIG_DEVFS_FS + XGI_DEVFS_REMOVE_CONTROL(); + XGI_DEVFS_REMOVE_DEVICE(xgi_num_devices); +#endif + + if (XGI_UNREGISTER_CHRDEV(xgi_major, "xgi") < 0) + XGI_ERROR("unregister xgi chrdev failed\n"); + + for (i = 0; i < xgi_num_devices; i++) { + if (xgi_devices[i].dev) { + release_mem_region(xgi_devices[i].fb.base, + xgi_devices[i].fb.size); + release_mem_region(xgi_devices[i].mmio.base, + xgi_devices[i].mmio.size); + } + } + + pci_unregister_driver(&xgi_pci_driver); + return result; + + return 1; +} + +void __exit xgi_exit_module(void) +{ + int i; + xgi_info_t *info, *max_devices; + +#ifdef CONFIG_DEVFS_FS + /* + XGI_DEVFS_REMOVE_CONTROL(); + for (i = 0; i < XGI_MAX_DEVICES; i++) + XGI_DEVFS_REMOVE_DEVICE(i); + */ + XGI_DEVFS_REMOVE_DEVICE(xgi_num_devices); +#endif + + if (XGI_UNREGISTER_CHRDEV(xgi_major, "xgi") < 0) + XGI_ERROR("unregister xgi chrdev failed\n"); + + XGI_INFO("Jong-unregister xgi chrdev scceeded\n"); + for (i = 0; i < XGI_MAX_DEVICES; i++) { + if (xgi_devices[i].dev) { + /* clean up the flush2D batch array */ + xgi_cmdlist_cleanup(&xgi_devices[i]); + + if (xgi_devices[i].fb.vbase != NULL) { + iounmap((void *)xgi_devices[i].fb.vbase); + xgi_devices[i].fb.vbase = NULL; + } + if (xgi_devices[i].mmio.vbase != NULL) { + iounmap((void *)xgi_devices[i].mmio.vbase); + xgi_devices[i].mmio.vbase = NULL; + } + //release_mem_region(xgi_devices[i].fb.base, xgi_devices[i].fb.size); + //XGI_INFO("release frame buffer mem region scceeded\n"); + + release_mem_region(xgi_devices[i].mmio.base, + xgi_devices[i].mmio.size); + XGI_INFO("release MMIO mem region scceeded\n"); + + xgi_fb_heap_cleanup(&xgi_devices[i]); + XGI_INFO("xgi_fb_heap_cleanup scceeded\n"); + + xgi_pcie_heap_cleanup(&xgi_devices[i]); + XGI_INFO("xgi_pcie_heap_cleanup scceeded\n"); + + XGI_PCI_DISABLE_DEVICE(xgi_devices[i].dev); + } + } + + pci_unregister_driver(&xgi_pci_driver); + + /* remove /proc/driver/xgi */ + xgi_proc_remove(); + +#if defined(DEBUG) + inter_module_unregister("xgi_devices"); +#endif +} + +module_init(xgi_init_module); +module_exit(xgi_exit_module); + +#if defined(XGI_PM_SUPPORT_ACPI) +int xgi_acpi_event(struct pci_dev *dev, u32 state) +{ + return 1; +} + +int xgi_kern_acpi_standby(struct pci_dev *dev, u32 state) +{ + return 1; +} + +int xgi_kern_acpi_resume(struct pci_dev *dev) +{ + return 1; +} +#endif + +MODULE_AUTHOR("Andrea Zhang "); +MODULE_DESCRIPTION("xgi kernel driver for xgi cards"); +MODULE_LICENSE("GPL"); diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 568a7af1..429719a7 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -1,364 +1,364 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ - -#ifndef _XGI_DRV_H_ -#define _XGI_DRV_H_ - -#define XGI_MAJOR_VERSION 0 -#define XGI_MINOR_VERSION 7 -#define XGI_PATCHLEVEL 5 - -#define XGI_DRV_VERSION "0.7.5" - -#ifndef XGI_DRV_NAME -#define XGI_DRV_NAME "xgi" -#endif - -/* - * xgi reserved major device number, Set this to 0 to - * request dynamic major number allocation. - */ -#ifndef XGI_DEV_MAJOR -#define XGI_DEV_MAJOR 0 -#endif - -#ifndef XGI_MAX_DEVICES -#define XGI_MAX_DEVICES 1 -#endif - -/* Jong 06/06/2006 */ -/* #define XGI_DEBUG */ - -#ifndef PCI_VENDOR_ID_XGI -/* -#define PCI_VENDOR_ID_XGI 0x1023 -*/ -#define PCI_VENDOR_ID_XGI 0x18CA - -#endif - -#ifndef PCI_DEVICE_ID_XP5 -#define PCI_DEVICE_ID_XP5 0x2200 -#endif - -#ifndef PCI_DEVICE_ID_XG47 -#define PCI_DEVICE_ID_XG47 0x0047 -#endif - -/* Macros to make printk easier */ -#define XGI_ERROR(fmt, arg...) \ - printk(KERN_ERR "[" XGI_DRV_NAME ":%s] *ERROR* " fmt, __FUNCTION__, ##arg) - -#define XGI_MEM_ERROR(area, fmt, arg...) \ - printk(KERN_ERR "[" XGI_DRV_NAME ":%s] *ERROR* " fmt, __FUNCTION__, ##arg) - -/* #define XGI_DEBUG */ - -#ifdef XGI_DEBUG -#define XGI_INFO(fmt, arg...) \ - printk(KERN_ALERT "[" XGI_DRV_NAME ":%s] " fmt, __FUNCTION__, ##arg) -/* printk(KERN_INFO "[" XGI_DRV_NAME ":%s] " fmt, __FUNCTION__, ##arg) */ -#else -#define XGI_INFO(fmt, arg...) do { } while (0) -#endif - -/* device name length; must be atleast 8 */ -#define XGI_DEVICE_NAME_LENGTH 40 - -/* need a fake device number for control device; just to flag it for msgs */ -#define XGI_CONTROL_DEVICE_NUMBER 100 - -typedef struct { - U32 base; // pcie base is different from fb base - U32 size; - U8 *vbase; -} xgi_aperture_t; - -typedef struct xgi_screen_info_s { - U32 scrn_start; - U32 scrn_xres; - U32 scrn_yres; - U32 scrn_bpp; - U32 scrn_pitch; -} xgi_screen_info_t; - -typedef struct xgi_sarea_info_s { - U32 bus_addr; - U32 size; -} xgi_sarea_info_t; - -typedef struct xgi_info_s { - struct pci_dev *dev; - int flags; - int device_number; - int bus; /* PCI config info */ - int slot; - int vendor_id; - U32 device_id; - U8 revision_id; - - /* physical characteristics */ - xgi_aperture_t mmio; - xgi_aperture_t fb; - xgi_aperture_t pcie; - xgi_screen_info_t scrn_info; - xgi_sarea_info_t sarea_info; - - /* look up table parameters */ - U32 *lut_base; - U32 lutPageSize; - U32 lutPageOrder; - U32 isLUTInLFB; - U32 sdfbPageSize; - - U32 pcie_config; - U32 pcie_status; - U32 irq; - - atomic_t use_count; - - /* keep track of any pending bottom halfes */ - struct tasklet_struct tasklet; - - spinlock_t info_lock; - - struct semaphore info_sem; - struct semaphore fb_sem; - struct semaphore pcie_sem; -} xgi_info_t; - -typedef struct xgi_ioctl_post_vbios { - U32 bus; - U32 slot; -} xgi_ioctl_post_vbios_t; - -typedef enum xgi_mem_location_s -{ - NON_LOCAL = 0, - LOCAL = 1, - INVALID = 0x7fffffff -} xgi_mem_location_t; - -enum PcieOwner -{ - PCIE_2D = 0, - /* - PCIE_3D should not begin with 1, - 2D alloc pcie memory will use owner 1. - */ - PCIE_3D = 11,/*vetex buf*/ - PCIE_3D_CMDLIST = 12, - PCIE_3D_SCRATCHPAD = 13, - PCIE_3D_TEXTURE = 14, - PCIE_INVALID = 0x7fffffff -}; - -typedef struct xgi_mem_req_s { - xgi_mem_location_t location; - unsigned long size; - unsigned long is_front; - enum PcieOwner owner; - unsigned long pid; -} xgi_mem_req_t; - -typedef struct xgi_mem_alloc_s { - xgi_mem_location_t location; - unsigned long size; - unsigned long bus_addr; - unsigned long hw_addr; - unsigned long pid; -} xgi_mem_alloc_t; - -typedef struct xgi_chip_info_s { - U32 device_id; - char device_name[32]; - U32 vendor_id; - U32 curr_display_mode; //Singe, DualView(Contained), MHS - U32 fb_size; - U32 sarea_bus_addr; - U32 sarea_size; -} xgi_chip_info_t; - -typedef struct xgi_opengl_cmd_s { - U32 cmd; -} xgi_opengl_cmd_t; - -typedef struct xgi_mmio_info_s { - xgi_opengl_cmd_t cmd_head; - void *mmioBase; - int size; -} xgi_mmio_info_t; - -typedef enum { - BTYPE_2D = 0, - BTYPE_3D = 1, - BTYPE_FLIP = 2, - BTYPE_CTRL = 3, - BTYPE_NONE = 0x7fffffff -}BATCH_TYPE; - -typedef struct xgi_cmd_info_s { - BATCH_TYPE _firstBeginType; - U32 _firstBeginAddr; - U32 _firstSize; - U32 _curDebugID; - U32 _lastBeginAddr; - U32 _beginCount; -} xgi_cmd_info_t; - -typedef struct xgi_state_info_s { - U32 _fromState; - U32 _toState; -} xgi_state_info_t; - -typedef struct cpu_info_s { - U32 _eax; - U32 _ebx; - U32 _ecx; - U32 _edx; -} cpu_info_t; - -typedef struct xgi_mem_pid_s { - struct list_head list; - xgi_mem_location_t location; - unsigned long bus_addr; - unsigned long pid; -} xgi_mem_pid_t; - -/* - * Ioctl definitions - */ - -#define XGI_IOCTL_MAGIC 'x' /* use 'x' as magic number */ - -#define XGI_IOCTL_BASE 0 -#define XGI_ESC_DEVICE_INFO (XGI_IOCTL_BASE + 0) -#define XGI_ESC_POST_VBIOS (XGI_IOCTL_BASE + 1) - -#define XGI_ESC_FB_INIT (XGI_IOCTL_BASE + 2) -#define XGI_ESC_FB_ALLOC (XGI_IOCTL_BASE + 3) -#define XGI_ESC_FB_FREE (XGI_IOCTL_BASE + 4) -#define XGI_ESC_PCIE_INIT (XGI_IOCTL_BASE + 5) -#define XGI_ESC_PCIE_ALLOC (XGI_IOCTL_BASE + 6) -#define XGI_ESC_PCIE_FREE (XGI_IOCTL_BASE + 7) -#define XGI_ESC_SUBMIT_CMDLIST (XGI_IOCTL_BASE + 8) -#define XGI_ESC_PUT_SCREEN_INFO (XGI_IOCTL_BASE + 9) -#define XGI_ESC_GET_SCREEN_INFO (XGI_IOCTL_BASE + 10) -#define XGI_ESC_GE_RESET (XGI_IOCTL_BASE + 11) -#define XGI_ESC_SAREA_INFO (XGI_IOCTL_BASE + 12) -#define XGI_ESC_DUMP_REGISTER (XGI_IOCTL_BASE + 13) -#define XGI_ESC_DEBUG_INFO (XGI_IOCTL_BASE + 14) -#define XGI_ESC_TEST_RWINKERNEL (XGI_IOCTL_BASE + 16) -#define XGI_ESC_STATE_CHANGE (XGI_IOCTL_BASE + 17) -#define XGI_ESC_MMIO_INFO (XGI_IOCTL_BASE + 18) -#define XGI_ESC_PCIE_CHECK (XGI_IOCTL_BASE + 19) -#define XGI_ESC_CPUID (XGI_IOCTL_BASE + 20) -#define XGI_ESC_MEM_COLLECT (XGI_IOCTL_BASE + 21) - -#define XGI_IOCTL_DEVICE_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_DEVICE_INFO, xgi_chip_info_t) -#define XGI_IOCTL_POST_VBIOS _IO(XGI_IOCTL_MAGIC, XGI_ESC_POST_VBIOS) - -#define XGI_IOCTL_FB_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_FB_INIT) -#define XGI_IOCTL_FB_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_FB_ALLOC, xgi_mem_req_t) -#define XGI_IOCTL_FB_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_FB_FREE, unsigned long) - -#define XGI_IOCTL_PCIE_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_INIT) -#define XGI_IOCTL_PCIE_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_ALLOC, xgi_mem_req_t) -#define XGI_IOCTL_PCIE_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_FREE, unsigned long) - -#define XGI_IOCTL_PUT_SCREEN_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PUT_SCREEN_INFO, xgi_screen_info_t) -#define XGI_IOCTL_GET_SCREEN_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_GET_SCREEN_INFO, xgi_screen_info_t) - -#define XGI_IOCTL_GE_RESET _IO(XGI_IOCTL_MAGIC, XGI_ESC_GE_RESET) -#define XGI_IOCTL_SAREA_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_SAREA_INFO, xgi_sarea_info_t) -#define XGI_IOCTL_DUMP_REGISTER _IO(XGI_IOCTL_MAGIC, XGI_ESC_DUMP_REGISTER) -#define XGI_IOCTL_DEBUG_INFO _IO(XGI_IOCTL_MAGIC, XGI_ESC_DEBUG_INFO) -#define XGI_IOCTL_MMIO_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_MMIO_INFO, xgi_mmio_info_t) - -#define XGI_IOCTL_SUBMIT_CMDLIST _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_SUBMIT_CMDLIST, xgi_cmd_info_t) -#define XGI_IOCTL_TEST_RWINKERNEL _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_TEST_RWINKERNEL, unsigned long) -#define XGI_IOCTL_STATE_CHANGE _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_STATE_CHANGE, xgi_state_info_t) - -#define XGI_IOCTL_PCIE_CHECK _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_CHECK) -#define XGI_IOCTL_CPUID _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_CPUID, cpu_info_t) -#define XGI_IOCTL_MAXNR 30 - -/* - * flags - */ -#define XGI_FLAG_OPEN 0x0001 -#define XGI_FLAG_NEEDS_POSTING 0x0002 -#define XGI_FLAG_WAS_POSTED 0x0004 -#define XGI_FLAG_CONTROL 0x0010 -#define XGI_FLAG_MAP_REGS_EARLY 0x0200 - -/* mmap(2) offsets */ - -#define IS_IO_OFFSET(info, offset, length) \ - (((offset) >= (info)->mmio.base) \ - && (((offset) + (length)) <= (info)->mmio.base + (info)->mmio.size)) - -/* Jong 06/14/2006 */ -/* (info)->fb.base is a base address for physical (bus) address space */ -/* what's the definition of offest? on physical (bus) address space or HW address space */ -/* Jong 06/15/2006; use HW address space */ -#define IS_FB_OFFSET(info, offset, length) \ - (((offset) >= 0) \ - && (((offset) + (length)) <= (info)->fb.size)) -#if 0 -#define IS_FB_OFFSET(info, offset, length) \ - (((offset) >= (info)->fb.base) \ - && (((offset) + (length)) <= (info)->fb.base + (info)->fb.size)) -#endif - -#define IS_PCIE_OFFSET(info, offset, length) \ - (((offset) >= (info)->pcie.base) \ - && (((offset) + (length)) <= (info)->pcie.base + (info)->pcie.size)) - -extern int xgi_fb_heap_init(xgi_info_t *info); -extern void xgi_fb_heap_cleanup(xgi_info_t *info); - -extern void xgi_fb_alloc(xgi_info_t *info, xgi_mem_req_t *req, xgi_mem_alloc_t *alloc); -extern void xgi_fb_free(xgi_info_t *info, unsigned long offset); -extern void xgi_mem_collect(xgi_info_t *info, unsigned int *pcnt); - -extern int xgi_pcie_heap_init(xgi_info_t *info); -extern void xgi_pcie_heap_cleanup(xgi_info_t *info); - -extern void xgi_pcie_alloc(xgi_info_t *info, unsigned long size, enum PcieOwner owner, xgi_mem_alloc_t *alloc); -extern void xgi_pcie_free(xgi_info_t *info, unsigned long offset); -extern void xgi_pcie_heap_check(void); -extern void *xgi_find_pcie_block(xgi_info_t *info, unsigned long address); -extern void *xgi_find_pcie_virt(xgi_info_t *info, unsigned long address); - -extern void xgi_read_pcie_mem(xgi_info_t *info, xgi_mem_req_t *req); -extern void xgi_write_pcie_mem(xgi_info_t *info, xgi_mem_req_t *req); - -extern void xgi_test_rwinkernel(xgi_info_t *info, unsigned long address); - -#endif + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#ifndef _XGI_DRV_H_ +#define _XGI_DRV_H_ + +#define XGI_MAJOR_VERSION 0 +#define XGI_MINOR_VERSION 7 +#define XGI_PATCHLEVEL 5 + +#define XGI_DRV_VERSION "0.7.5" + +#ifndef XGI_DRV_NAME +#define XGI_DRV_NAME "xgi" +#endif + +/* + * xgi reserved major device number, Set this to 0 to + * request dynamic major number allocation. + */ +#ifndef XGI_DEV_MAJOR +#define XGI_DEV_MAJOR 0 +#endif + +#ifndef XGI_MAX_DEVICES +#define XGI_MAX_DEVICES 1 +#endif + +/* Jong 06/06/2006 */ +/* #define XGI_DEBUG */ + +#ifndef PCI_VENDOR_ID_XGI +/* +#define PCI_VENDOR_ID_XGI 0x1023 +*/ +#define PCI_VENDOR_ID_XGI 0x18CA + +#endif + +#ifndef PCI_DEVICE_ID_XP5 +#define PCI_DEVICE_ID_XP5 0x2200 +#endif + +#ifndef PCI_DEVICE_ID_XG47 +#define PCI_DEVICE_ID_XG47 0x0047 +#endif + +/* Macros to make printk easier */ +#define XGI_ERROR(fmt, arg...) \ + printk(KERN_ERR "[" XGI_DRV_NAME ":%s] *ERROR* " fmt, __FUNCTION__, ##arg) + +#define XGI_MEM_ERROR(area, fmt, arg...) \ + printk(KERN_ERR "[" XGI_DRV_NAME ":%s] *ERROR* " fmt, __FUNCTION__, ##arg) + +/* #define XGI_DEBUG */ + +#ifdef XGI_DEBUG +#define XGI_INFO(fmt, arg...) \ + printk(KERN_ALERT "[" XGI_DRV_NAME ":%s] " fmt, __FUNCTION__, ##arg) +/* printk(KERN_INFO "[" XGI_DRV_NAME ":%s] " fmt, __FUNCTION__, ##arg) */ +#else +#define XGI_INFO(fmt, arg...) do { } while (0) +#endif + +/* device name length; must be atleast 8 */ +#define XGI_DEVICE_NAME_LENGTH 40 + +/* need a fake device number for control device; just to flag it for msgs */ +#define XGI_CONTROL_DEVICE_NUMBER 100 + +typedef struct { + U32 base; // pcie base is different from fb base + U32 size; + U8 *vbase; +} xgi_aperture_t; + +typedef struct xgi_screen_info_s { + U32 scrn_start; + U32 scrn_xres; + U32 scrn_yres; + U32 scrn_bpp; + U32 scrn_pitch; +} xgi_screen_info_t; + +typedef struct xgi_sarea_info_s { + U32 bus_addr; + U32 size; +} xgi_sarea_info_t; + +typedef struct xgi_info_s { + struct pci_dev *dev; + int flags; + int device_number; + int bus; /* PCI config info */ + int slot; + int vendor_id; + U32 device_id; + U8 revision_id; + + /* physical characteristics */ + xgi_aperture_t mmio; + xgi_aperture_t fb; + xgi_aperture_t pcie; + xgi_screen_info_t scrn_info; + xgi_sarea_info_t sarea_info; + + /* look up table parameters */ + U32 *lut_base; + U32 lutPageSize; + U32 lutPageOrder; + U32 isLUTInLFB; + U32 sdfbPageSize; + + U32 pcie_config; + U32 pcie_status; + U32 irq; + + atomic_t use_count; + + /* keep track of any pending bottom halfes */ + struct tasklet_struct tasklet; + + spinlock_t info_lock; + + struct semaphore info_sem; + struct semaphore fb_sem; + struct semaphore pcie_sem; +} xgi_info_t; + +typedef struct xgi_ioctl_post_vbios { + U32 bus; + U32 slot; +} xgi_ioctl_post_vbios_t; + +typedef enum xgi_mem_location_s { + NON_LOCAL = 0, + LOCAL = 1, + INVALID = 0x7fffffff +} xgi_mem_location_t; + +enum PcieOwner { + PCIE_2D = 0, + /* + PCIE_3D should not begin with 1, + 2D alloc pcie memory will use owner 1. + */ + PCIE_3D = 11, /*vetex buf */ + PCIE_3D_CMDLIST = 12, + PCIE_3D_SCRATCHPAD = 13, + PCIE_3D_TEXTURE = 14, + PCIE_INVALID = 0x7fffffff +}; + +typedef struct xgi_mem_req_s { + xgi_mem_location_t location; + unsigned long size; + unsigned long is_front; + enum PcieOwner owner; + unsigned long pid; +} xgi_mem_req_t; + +typedef struct xgi_mem_alloc_s { + xgi_mem_location_t location; + unsigned long size; + unsigned long bus_addr; + unsigned long hw_addr; + unsigned long pid; +} xgi_mem_alloc_t; + +typedef struct xgi_chip_info_s { + U32 device_id; + char device_name[32]; + U32 vendor_id; + U32 curr_display_mode; //Singe, DualView(Contained), MHS + U32 fb_size; + U32 sarea_bus_addr; + U32 sarea_size; +} xgi_chip_info_t; + +typedef struct xgi_opengl_cmd_s { + U32 cmd; +} xgi_opengl_cmd_t; + +typedef struct xgi_mmio_info_s { + xgi_opengl_cmd_t cmd_head; + void *mmioBase; + int size; +} xgi_mmio_info_t; + +typedef enum { + BTYPE_2D = 0, + BTYPE_3D = 1, + BTYPE_FLIP = 2, + BTYPE_CTRL = 3, + BTYPE_NONE = 0x7fffffff +} BATCH_TYPE; + +typedef struct xgi_cmd_info_s { + BATCH_TYPE _firstBeginType; + U32 _firstBeginAddr; + U32 _firstSize; + U32 _curDebugID; + U32 _lastBeginAddr; + U32 _beginCount; +} xgi_cmd_info_t; + +typedef struct xgi_state_info_s { + U32 _fromState; + U32 _toState; +} xgi_state_info_t; + +typedef struct cpu_info_s { + U32 _eax; + U32 _ebx; + U32 _ecx; + U32 _edx; +} cpu_info_t; + +typedef struct xgi_mem_pid_s { + struct list_head list; + xgi_mem_location_t location; + unsigned long bus_addr; + unsigned long pid; +} xgi_mem_pid_t; + +/* + * Ioctl definitions + */ + +#define XGI_IOCTL_MAGIC 'x' /* use 'x' as magic number */ + +#define XGI_IOCTL_BASE 0 +#define XGI_ESC_DEVICE_INFO (XGI_IOCTL_BASE + 0) +#define XGI_ESC_POST_VBIOS (XGI_IOCTL_BASE + 1) + +#define XGI_ESC_FB_INIT (XGI_IOCTL_BASE + 2) +#define XGI_ESC_FB_ALLOC (XGI_IOCTL_BASE + 3) +#define XGI_ESC_FB_FREE (XGI_IOCTL_BASE + 4) +#define XGI_ESC_PCIE_INIT (XGI_IOCTL_BASE + 5) +#define XGI_ESC_PCIE_ALLOC (XGI_IOCTL_BASE + 6) +#define XGI_ESC_PCIE_FREE (XGI_IOCTL_BASE + 7) +#define XGI_ESC_SUBMIT_CMDLIST (XGI_IOCTL_BASE + 8) +#define XGI_ESC_PUT_SCREEN_INFO (XGI_IOCTL_BASE + 9) +#define XGI_ESC_GET_SCREEN_INFO (XGI_IOCTL_BASE + 10) +#define XGI_ESC_GE_RESET (XGI_IOCTL_BASE + 11) +#define XGI_ESC_SAREA_INFO (XGI_IOCTL_BASE + 12) +#define XGI_ESC_DUMP_REGISTER (XGI_IOCTL_BASE + 13) +#define XGI_ESC_DEBUG_INFO (XGI_IOCTL_BASE + 14) +#define XGI_ESC_TEST_RWINKERNEL (XGI_IOCTL_BASE + 16) +#define XGI_ESC_STATE_CHANGE (XGI_IOCTL_BASE + 17) +#define XGI_ESC_MMIO_INFO (XGI_IOCTL_BASE + 18) +#define XGI_ESC_PCIE_CHECK (XGI_IOCTL_BASE + 19) +#define XGI_ESC_CPUID (XGI_IOCTL_BASE + 20) +#define XGI_ESC_MEM_COLLECT (XGI_IOCTL_BASE + 21) + +#define XGI_IOCTL_DEVICE_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_DEVICE_INFO, xgi_chip_info_t) +#define XGI_IOCTL_POST_VBIOS _IO(XGI_IOCTL_MAGIC, XGI_ESC_POST_VBIOS) + +#define XGI_IOCTL_FB_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_FB_INIT) +#define XGI_IOCTL_FB_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_FB_ALLOC, xgi_mem_req_t) +#define XGI_IOCTL_FB_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_FB_FREE, unsigned long) + +#define XGI_IOCTL_PCIE_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_INIT) +#define XGI_IOCTL_PCIE_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_ALLOC, xgi_mem_req_t) +#define XGI_IOCTL_PCIE_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_FREE, unsigned long) + +#define XGI_IOCTL_PUT_SCREEN_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PUT_SCREEN_INFO, xgi_screen_info_t) +#define XGI_IOCTL_GET_SCREEN_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_GET_SCREEN_INFO, xgi_screen_info_t) + +#define XGI_IOCTL_GE_RESET _IO(XGI_IOCTL_MAGIC, XGI_ESC_GE_RESET) +#define XGI_IOCTL_SAREA_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_SAREA_INFO, xgi_sarea_info_t) +#define XGI_IOCTL_DUMP_REGISTER _IO(XGI_IOCTL_MAGIC, XGI_ESC_DUMP_REGISTER) +#define XGI_IOCTL_DEBUG_INFO _IO(XGI_IOCTL_MAGIC, XGI_ESC_DEBUG_INFO) +#define XGI_IOCTL_MMIO_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_MMIO_INFO, xgi_mmio_info_t) + +#define XGI_IOCTL_SUBMIT_CMDLIST _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_SUBMIT_CMDLIST, xgi_cmd_info_t) +#define XGI_IOCTL_TEST_RWINKERNEL _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_TEST_RWINKERNEL, unsigned long) +#define XGI_IOCTL_STATE_CHANGE _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_STATE_CHANGE, xgi_state_info_t) + +#define XGI_IOCTL_PCIE_CHECK _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_CHECK) +#define XGI_IOCTL_CPUID _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_CPUID, cpu_info_t) +#define XGI_IOCTL_MAXNR 30 + +/* + * flags + */ +#define XGI_FLAG_OPEN 0x0001 +#define XGI_FLAG_NEEDS_POSTING 0x0002 +#define XGI_FLAG_WAS_POSTED 0x0004 +#define XGI_FLAG_CONTROL 0x0010 +#define XGI_FLAG_MAP_REGS_EARLY 0x0200 + +/* mmap(2) offsets */ + +#define IS_IO_OFFSET(info, offset, length) \ + (((offset) >= (info)->mmio.base) \ + && (((offset) + (length)) <= (info)->mmio.base + (info)->mmio.size)) + +/* Jong 06/14/2006 */ +/* (info)->fb.base is a base address for physical (bus) address space */ +/* what's the definition of offest? on physical (bus) address space or HW address space */ +/* Jong 06/15/2006; use HW address space */ +#define IS_FB_OFFSET(info, offset, length) \ + (((offset) >= 0) \ + && (((offset) + (length)) <= (info)->fb.size)) +#if 0 +#define IS_FB_OFFSET(info, offset, length) \ + (((offset) >= (info)->fb.base) \ + && (((offset) + (length)) <= (info)->fb.base + (info)->fb.size)) +#endif + +#define IS_PCIE_OFFSET(info, offset, length) \ + (((offset) >= (info)->pcie.base) \ + && (((offset) + (length)) <= (info)->pcie.base + (info)->pcie.size)) + +extern int xgi_fb_heap_init(xgi_info_t * info); +extern void xgi_fb_heap_cleanup(xgi_info_t * info); + +extern void xgi_fb_alloc(xgi_info_t * info, xgi_mem_req_t * req, + xgi_mem_alloc_t * alloc); +extern void xgi_fb_free(xgi_info_t * info, unsigned long offset); +extern void xgi_mem_collect(xgi_info_t * info, unsigned int *pcnt); + +extern int xgi_pcie_heap_init(xgi_info_t * info); +extern void xgi_pcie_heap_cleanup(xgi_info_t * info); + +extern void xgi_pcie_alloc(xgi_info_t * info, unsigned long size, + enum PcieOwner owner, xgi_mem_alloc_t * alloc); +extern void xgi_pcie_free(xgi_info_t * info, unsigned long offset); +extern void xgi_pcie_heap_check(void); +extern void *xgi_find_pcie_block(xgi_info_t * info, unsigned long address); +extern void *xgi_find_pcie_virt(xgi_info_t * info, unsigned long address); + +extern void xgi_read_pcie_mem(xgi_info_t * info, xgi_mem_req_t * req); +extern void xgi_write_pcie_mem(xgi_info_t * info, xgi_mem_req_t * req); + +extern void xgi_test_rwinkernel(xgi_info_t * info, unsigned long address); + +#endif diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index 67fdfe17..fab99ae2 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -1,528 +1,491 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ - -#include "xgi_types.h" -#include "xgi_linux.h" -#include "xgi_drv.h" -#include "xgi_fb.h" - -#define XGI_FB_HEAP_START 0x1000000 - -static xgi_mem_heap_t *xgi_fb_heap; -static kmem_cache_t *xgi_fb_cache_block = NULL; -extern struct list_head xgi_mempid_list; - -static xgi_mem_block_t *xgi_mem_new_node(void); -static xgi_mem_block_t *xgi_mem_alloc(xgi_info_t *info, unsigned long size); -static xgi_mem_block_t *xgi_mem_free(xgi_info_t *info, unsigned long offset); - -void xgi_fb_alloc(xgi_info_t *info, - xgi_mem_req_t *req, - xgi_mem_alloc_t *alloc) -{ - xgi_mem_block_t *block; - xgi_mem_pid_t *mempid_block; - - if (req->is_front) - { - alloc->location = LOCAL; - alloc->bus_addr = info->fb.base; - alloc->hw_addr = 0; - XGI_INFO("Video RAM allocation on front buffer successfully! \n"); - } - else - { - xgi_down(info->fb_sem); - block = xgi_mem_alloc(info, req->size); - xgi_up(info->fb_sem); - - if (block == NULL) - { - alloc->location = LOCAL; - alloc->size = 0; - alloc->bus_addr = 0; - alloc->hw_addr = 0; - XGI_ERROR("Video RAM allocation failed\n"); - } - else - { - XGI_INFO("Video RAM allocation succeeded: 0x%p\n", - (char *) block->offset); - alloc->location = LOCAL; - alloc->size = block->size; - alloc->bus_addr = info->fb.base + block->offset; - alloc->hw_addr = block->offset; - - /* manage mempid */ - mempid_block = kmalloc(sizeof(xgi_mem_pid_t), GFP_KERNEL); - mempid_block->location = LOCAL; - mempid_block->bus_addr = alloc->bus_addr; - mempid_block->pid = alloc->pid; - - if (!mempid_block) - XGI_ERROR("mempid_block alloc failed\n"); - - XGI_INFO("Memory ProcessID add one fb block pid:%ld successfully! \n", mempid_block->pid); - list_add(&mempid_block->list, &xgi_mempid_list); - } - } -} - -void xgi_fb_free(xgi_info_t *info, unsigned long bus_addr) -{ - xgi_mem_block_t *block; - unsigned long offset = bus_addr - info->fb.base; - xgi_mem_pid_t *mempid_block; - xgi_mem_pid_t *mempid_freeblock = NULL; - struct list_head *mempid_list; - - if (offset < 0) - { - XGI_INFO("free onscreen frame buffer successfully !\n"); - } - else - { - xgi_down(info->fb_sem); - block = xgi_mem_free(info, offset); - xgi_up(info->fb_sem); - - if (block == NULL) - { - XGI_ERROR("xgi_mem_free() failed at base 0x%lx\n", offset); - } - - /* manage mempid */ - mempid_list = xgi_mempid_list.next; - while (mempid_list != &xgi_mempid_list) - { - mempid_block = list_entry(mempid_list, struct xgi_mem_pid_s, list); - if (mempid_block->location == LOCAL && mempid_block->bus_addr == bus_addr) - { - mempid_freeblock = mempid_block; - break; - } - mempid_list = mempid_list->next; - } - if (mempid_freeblock) - { - list_del(&mempid_freeblock->list); - XGI_INFO("Memory ProcessID delete one fb block pid:%ld successfully! \n", mempid_freeblock->pid); - kfree(mempid_freeblock); - } - } -} - -int xgi_fb_heap_init(xgi_info_t *info) -{ - xgi_mem_block_t *block; - - xgi_fb_heap = kmalloc(sizeof(xgi_mem_heap_t), GFP_KERNEL); - if (!xgi_fb_heap) - { - XGI_ERROR("xgi_fb_heap alloc failed\n"); - return 0; - } - - INIT_LIST_HEAD(&xgi_fb_heap->free_list); - INIT_LIST_HEAD(&xgi_fb_heap->used_list); - INIT_LIST_HEAD(&xgi_fb_heap->sort_list); - - xgi_fb_cache_block = kmem_cache_create("xgi_fb_block", sizeof(xgi_mem_block_t), - 0, SLAB_HWCACHE_ALIGN, NULL, NULL); - - if (NULL == xgi_fb_cache_block) - { - XGI_ERROR("Fail to creat xgi_fb_block\n"); - goto fail1; - } - - block = (xgi_mem_block_t *)kmem_cache_alloc(xgi_fb_cache_block, GFP_KERNEL); - if (!block) - { - XGI_ERROR("kmem_cache_alloc failed\n"); - goto fail2; - } - - block->offset = XGI_FB_HEAP_START; - block->size = info->fb.size - XGI_FB_HEAP_START; - - list_add(&block->list, &xgi_fb_heap->free_list); - - xgi_fb_heap->max_freesize = info->fb.size - XGI_FB_HEAP_START; - - XGI_INFO("fb start offset: 0x%lx, memory size : 0x%lx\n", block->offset, block->size); - XGI_INFO("xgi_fb_heap->max_freesize: 0x%lx \n", xgi_fb_heap->max_freesize); - - return 1; - -fail2: - if (xgi_fb_cache_block) - { - kmem_cache_destroy(xgi_fb_cache_block); - xgi_fb_cache_block = NULL; - } -fail1: - if(xgi_fb_heap) - { - kfree(xgi_fb_heap); - xgi_fb_heap = NULL; - } - return 0; -} - -void xgi_fb_heap_cleanup(xgi_info_t *info) -{ - struct list_head *free_list, *temp; - xgi_mem_block_t *block; - int i; - - if (xgi_fb_heap) - { - free_list = &xgi_fb_heap->free_list; - for (i = 0; i < 3; i++, free_list++) - { - temp = free_list->next; - while (temp != free_list) - { - block = list_entry(temp, struct xgi_mem_block_s, list); - temp = temp->next; - - XGI_INFO("No. %d block->offset: 0x%lx block->size: 0x%lx \n", - i, block->offset, block->size); - //XGI_INFO("No. %d free block: 0x%p \n", i, block); - kmem_cache_free(xgi_fb_cache_block, block); - block = NULL; - } - } - XGI_INFO("xgi_fb_heap: 0x%p \n", xgi_fb_heap); - kfree(xgi_fb_heap); - xgi_fb_heap = NULL; - } - - if (xgi_fb_cache_block) - { - kmem_cache_destroy(xgi_fb_cache_block); - xgi_fb_cache_block = NULL; - } -} - -static xgi_mem_block_t * xgi_mem_new_node(void) -{ - xgi_mem_block_t *block; - - block = (xgi_mem_block_t *)kmem_cache_alloc(xgi_fb_cache_block, GFP_KERNEL); - if (!block) - { - XGI_ERROR("kmem_cache_alloc failed\n"); - return NULL; - } - - return block; -} - -#if 0 -static void xgi_mem_insert_node_after(xgi_mem_list_t *list, - xgi_mem_block_t *current, - xgi_mem_block_t *block); -static void xgi_mem_insert_node_before(xgi_mem_list_t *list, - xgi_mem_block_t *current, - xgi_mem_block_t *block); -static void xgi_mem_insert_node_head(xgi_mem_list_t *list, - xgi_mem_block_t *block); -static void xgi_mem_insert_node_tail(xgi_mem_list_t *list, - xgi_mem_block_t *block); -static void xgi_mem_delete_node(xgi_mem_list_t *list, - xgi_mem_block_t *block); -/* - * insert node:block after node:current - */ -static void xgi_mem_insert_node_after(xgi_mem_list_t *list, - xgi_mem_block_t *current, - xgi_mem_block_t *block) -{ - block->prev = current; - block->next = current->next; - current->next = block; - - if (current == list->tail) - { - list->tail = block; - } - else - { - block->next->prev = block; - } -} - -/* - * insert node:block before node:current - */ -static void xgi_mem_insert_node_before(xgi_mem_list_t *list, - xgi_mem_block_t *current, - xgi_mem_block_t *block) -{ - block->prev = current->prev; - block->next = current; - current->prev = block; - if (current == list->head) - { - list->head = block; - } - else - { - block->prev->next = block; - } -} -void xgi_mem_insert_node_head(xgi_mem_list_t *list, - xgi_mem_block_t *block) -{ - block->next = list->head; - block->prev = NULL; - - if (NULL == list->head) - { - list->tail = block; - } - else - { - list->head->prev = block; - } - list->head = block; -} - -static void xgi_mem_insert_node_tail(xgi_mem_list_t *list, - xgi_mem_block_t *block) - -{ - block->next = NULL; - block->prev = list->tail; - if (NULL == list->tail) - { - list->head = block; - } - else - { - list->tail->next = block; - } - list->tail = block; -} - -static void xgi_mem_delete_node(xgi_mem_list_t *list, - xgi_mem_block_t *block) -{ - if (block == list->head) - { - list->head = block->next; - } - if (block == list->tail) - { - list->tail = block->prev; - } - - if (block->prev) - { - block->prev->next = block->next; - } - if (block->next) - { - block->next->prev = block->prev; - } - - block->next = block->prev = NULL; -} -#endif -static xgi_mem_block_t *xgi_mem_alloc(xgi_info_t *info, unsigned long originalSize) -{ - struct list_head *free_list; - xgi_mem_block_t *block, *free_block, *used_block; - - unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK; - - XGI_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n", originalSize, size); - - if (size == 0) - { - XGI_ERROR("size == 0\n"); - return (NULL); - } - XGI_INFO("max_freesize: 0x%lx \n", xgi_fb_heap->max_freesize); - if (size > xgi_fb_heap->max_freesize) - { - XGI_ERROR("size: 0x%lx is bigger than frame buffer total free size: 0x%lx !\n", - size, xgi_fb_heap->max_freesize); - return (NULL); - } - - free_list = xgi_fb_heap->free_list.next; - - while (free_list != &xgi_fb_heap->free_list) - { - XGI_INFO("free_list: 0x%px \n", free_list); - block = list_entry(free_list, struct xgi_mem_block_s, list); - if (size <= block->size) - { - break; - } - free_list = free_list->next; - } - - if (free_list == &xgi_fb_heap->free_list) - { - XGI_ERROR("Can't allocate %ldk size from frame buffer memory !\n", size/1024); - return (NULL); - } - - free_block = block; - XGI_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n", - size, free_block->offset, free_block->size); - - if (size == free_block->size) - { - used_block = free_block; - XGI_INFO("size == free_block->size: free_block = 0x%p\n", free_block); - list_del(&free_block->list); - } - else - { - used_block = xgi_mem_new_node(); - - if (used_block == NULL) return (NULL); - - if (used_block == free_block) - { - XGI_ERROR("used_block == free_block = 0x%p\n", used_block); - } - - used_block->offset = free_block->offset; - used_block->size = size; - - free_block->offset += size; - free_block->size -= size; - } - - xgi_fb_heap->max_freesize -= size; - - list_add(&used_block->list, &xgi_fb_heap->used_list); - - return (used_block); -} - -static xgi_mem_block_t *xgi_mem_free(xgi_info_t *info, unsigned long offset) -{ - struct list_head *free_list, *used_list; - xgi_mem_block_t *used_block = NULL, *block = NULL; - xgi_mem_block_t *prev, *next; - - unsigned long upper; - unsigned long lower; - - used_list = xgi_fb_heap->used_list.next; - while (used_list != &xgi_fb_heap->used_list) - { - block = list_entry(used_list, struct xgi_mem_block_s, list); - if (block->offset == offset) - { - break; - } - used_list = used_list->next; - } - - if (used_list == &xgi_fb_heap->used_list) - { - XGI_ERROR("can't find block: 0x%lx to free!\n", offset); - return (NULL); - } - - used_block = block; - XGI_INFO("used_block: 0x%p, offset = 0x%lx, size = 0x%lx\n", - used_block, used_block->offset, used_block->size); - - xgi_fb_heap->max_freesize += used_block->size; - - prev = next = NULL; - upper = used_block->offset + used_block->size; - lower = used_block->offset; - - free_list = xgi_fb_heap->free_list.next; - while (free_list != &xgi_fb_heap->free_list) - { - block = list_entry(free_list, struct xgi_mem_block_s, list); - - if (block->offset == upper) - { - next = block; - } - else if ((block->offset + block->size) == lower) - { - prev = block; - } - free_list = free_list->next; - } - - XGI_INFO("next = 0x%p, prev = 0x%p\n", next, prev); - list_del(&used_block->list); - - if (prev && next) - { - prev->size += (used_block->size + next->size); - list_del(&next->list); - XGI_INFO("free node 0x%p\n", next); - kmem_cache_free(xgi_fb_cache_block, next); - kmem_cache_free(xgi_fb_cache_block, used_block); - - next = NULL; - used_block = NULL; - return (prev); - } - - if (prev) - { - prev->size += used_block->size; - XGI_INFO("free node 0x%p\n", used_block); - kmem_cache_free(xgi_fb_cache_block, used_block); - used_block = NULL; - return (prev); - } - - if (next) - { - next->size += used_block->size; - next->offset = used_block->offset; - XGI_INFO("free node 0x%p\n", used_block); - kmem_cache_free(xgi_fb_cache_block, used_block); - used_block = NULL; - return (next); - } - - list_add(&used_block->list, &xgi_fb_heap->free_list); - XGI_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n", - used_block, used_block->offset, used_block->size); - - return (used_block); -} - + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#include "xgi_types.h" +#include "xgi_linux.h" +#include "xgi_drv.h" +#include "xgi_fb.h" + +#define XGI_FB_HEAP_START 0x1000000 + +static xgi_mem_heap_t *xgi_fb_heap; +static kmem_cache_t *xgi_fb_cache_block = NULL; +extern struct list_head xgi_mempid_list; + +static xgi_mem_block_t *xgi_mem_new_node(void); +static xgi_mem_block_t *xgi_mem_alloc(xgi_info_t * info, unsigned long size); +static xgi_mem_block_t *xgi_mem_free(xgi_info_t * info, unsigned long offset); + +void xgi_fb_alloc(xgi_info_t * info, + xgi_mem_req_t * req, xgi_mem_alloc_t * alloc) +{ + xgi_mem_block_t *block; + xgi_mem_pid_t *mempid_block; + + if (req->is_front) { + alloc->location = LOCAL; + alloc->bus_addr = info->fb.base; + alloc->hw_addr = 0; + XGI_INFO + ("Video RAM allocation on front buffer successfully! \n"); + } else { + xgi_down(info->fb_sem); + block = xgi_mem_alloc(info, req->size); + xgi_up(info->fb_sem); + + if (block == NULL) { + alloc->location = LOCAL; + alloc->size = 0; + alloc->bus_addr = 0; + alloc->hw_addr = 0; + XGI_ERROR("Video RAM allocation failed\n"); + } else { + XGI_INFO("Video RAM allocation succeeded: 0x%p\n", + (char *)block->offset); + alloc->location = LOCAL; + alloc->size = block->size; + alloc->bus_addr = info->fb.base + block->offset; + alloc->hw_addr = block->offset; + + /* manage mempid */ + mempid_block = + kmalloc(sizeof(xgi_mem_pid_t), GFP_KERNEL); + mempid_block->location = LOCAL; + mempid_block->bus_addr = alloc->bus_addr; + mempid_block->pid = alloc->pid; + + if (!mempid_block) + XGI_ERROR("mempid_block alloc failed\n"); + + XGI_INFO + ("Memory ProcessID add one fb block pid:%ld successfully! \n", + mempid_block->pid); + list_add(&mempid_block->list, &xgi_mempid_list); + } + } +} + +void xgi_fb_free(xgi_info_t * info, unsigned long bus_addr) +{ + xgi_mem_block_t *block; + unsigned long offset = bus_addr - info->fb.base; + xgi_mem_pid_t *mempid_block; + xgi_mem_pid_t *mempid_freeblock = NULL; + struct list_head *mempid_list; + + if (offset < 0) { + XGI_INFO("free onscreen frame buffer successfully !\n"); + } else { + xgi_down(info->fb_sem); + block = xgi_mem_free(info, offset); + xgi_up(info->fb_sem); + + if (block == NULL) { + XGI_ERROR("xgi_mem_free() failed at base 0x%lx\n", + offset); + } + + /* manage mempid */ + mempid_list = xgi_mempid_list.next; + while (mempid_list != &xgi_mempid_list) { + mempid_block = + list_entry(mempid_list, struct xgi_mem_pid_s, list); + if (mempid_block->location == LOCAL + && mempid_block->bus_addr == bus_addr) { + mempid_freeblock = mempid_block; + break; + } + mempid_list = mempid_list->next; + } + if (mempid_freeblock) { + list_del(&mempid_freeblock->list); + XGI_INFO + ("Memory ProcessID delete one fb block pid:%ld successfully! \n", + mempid_freeblock->pid); + kfree(mempid_freeblock); + } + } +} + +int xgi_fb_heap_init(xgi_info_t * info) +{ + xgi_mem_block_t *block; + + xgi_fb_heap = kmalloc(sizeof(xgi_mem_heap_t), GFP_KERNEL); + if (!xgi_fb_heap) { + XGI_ERROR("xgi_fb_heap alloc failed\n"); + return 0; + } + + INIT_LIST_HEAD(&xgi_fb_heap->free_list); + INIT_LIST_HEAD(&xgi_fb_heap->used_list); + INIT_LIST_HEAD(&xgi_fb_heap->sort_list); + + xgi_fb_cache_block = + kmem_cache_create("xgi_fb_block", sizeof(xgi_mem_block_t), 0, + SLAB_HWCACHE_ALIGN, NULL, NULL); + + if (NULL == xgi_fb_cache_block) { + XGI_ERROR("Fail to creat xgi_fb_block\n"); + goto fail1; + } + + block = + (xgi_mem_block_t *) kmem_cache_alloc(xgi_fb_cache_block, + GFP_KERNEL); + if (!block) { + XGI_ERROR("kmem_cache_alloc failed\n"); + goto fail2; + } + + block->offset = XGI_FB_HEAP_START; + block->size = info->fb.size - XGI_FB_HEAP_START; + + list_add(&block->list, &xgi_fb_heap->free_list); + + xgi_fb_heap->max_freesize = info->fb.size - XGI_FB_HEAP_START; + + XGI_INFO("fb start offset: 0x%lx, memory size : 0x%lx\n", block->offset, + block->size); + XGI_INFO("xgi_fb_heap->max_freesize: 0x%lx \n", + xgi_fb_heap->max_freesize); + + return 1; + + fail2: + if (xgi_fb_cache_block) { + kmem_cache_destroy(xgi_fb_cache_block); + xgi_fb_cache_block = NULL; + } + fail1: + if (xgi_fb_heap) { + kfree(xgi_fb_heap); + xgi_fb_heap = NULL; + } + return 0; +} + +void xgi_fb_heap_cleanup(xgi_info_t * info) +{ + struct list_head *free_list, *temp; + xgi_mem_block_t *block; + int i; + + if (xgi_fb_heap) { + free_list = &xgi_fb_heap->free_list; + for (i = 0; i < 3; i++, free_list++) { + temp = free_list->next; + while (temp != free_list) { + block = + list_entry(temp, struct xgi_mem_block_s, + list); + temp = temp->next; + + XGI_INFO + ("No. %d block->offset: 0x%lx block->size: 0x%lx \n", + i, block->offset, block->size); + //XGI_INFO("No. %d free block: 0x%p \n", i, block); + kmem_cache_free(xgi_fb_cache_block, block); + block = NULL; + } + } + XGI_INFO("xgi_fb_heap: 0x%p \n", xgi_fb_heap); + kfree(xgi_fb_heap); + xgi_fb_heap = NULL; + } + + if (xgi_fb_cache_block) { + kmem_cache_destroy(xgi_fb_cache_block); + xgi_fb_cache_block = NULL; + } +} + +static xgi_mem_block_t *xgi_mem_new_node(void) +{ + xgi_mem_block_t *block; + + block = + (xgi_mem_block_t *) kmem_cache_alloc(xgi_fb_cache_block, + GFP_KERNEL); + if (!block) { + XGI_ERROR("kmem_cache_alloc failed\n"); + return NULL; + } + + return block; +} + +#if 0 +static void xgi_mem_insert_node_after(xgi_mem_list_t * list, + xgi_mem_block_t * current, + xgi_mem_block_t * block); +static void xgi_mem_insert_node_before(xgi_mem_list_t * list, + xgi_mem_block_t * current, + xgi_mem_block_t * block); +static void xgi_mem_insert_node_head(xgi_mem_list_t * list, + xgi_mem_block_t * block); +static void xgi_mem_insert_node_tail(xgi_mem_list_t * list, + xgi_mem_block_t * block); +static void xgi_mem_delete_node(xgi_mem_list_t * list, xgi_mem_block_t * block); +/* + * insert node:block after node:current + */ +static void xgi_mem_insert_node_after(xgi_mem_list_t * list, + xgi_mem_block_t * current, + xgi_mem_block_t * block) +{ + block->prev = current; + block->next = current->next; + current->next = block; + + if (current == list->tail) { + list->tail = block; + } else { + block->next->prev = block; + } +} + +/* + * insert node:block before node:current + */ +static void xgi_mem_insert_node_before(xgi_mem_list_t * list, + xgi_mem_block_t * current, + xgi_mem_block_t * block) +{ + block->prev = current->prev; + block->next = current; + current->prev = block; + if (current == list->head) { + list->head = block; + } else { + block->prev->next = block; + } +} +void xgi_mem_insert_node_head(xgi_mem_list_t * list, xgi_mem_block_t * block) +{ + block->next = list->head; + block->prev = NULL; + + if (NULL == list->head) { + list->tail = block; + } else { + list->head->prev = block; + } + list->head = block; +} + +static void xgi_mem_insert_node_tail(xgi_mem_list_t * list, + xgi_mem_block_t * block) +{ + block->next = NULL; + block->prev = list->tail; + if (NULL == list->tail) { + list->head = block; + } else { + list->tail->next = block; + } + list->tail = block; +} + +static void xgi_mem_delete_node(xgi_mem_list_t * list, xgi_mem_block_t * block) +{ + if (block == list->head) { + list->head = block->next; + } + if (block == list->tail) { + list->tail = block->prev; + } + + if (block->prev) { + block->prev->next = block->next; + } + if (block->next) { + block->next->prev = block->prev; + } + + block->next = block->prev = NULL; +} +#endif +static xgi_mem_block_t *xgi_mem_alloc(xgi_info_t * info, + unsigned long originalSize) +{ + struct list_head *free_list; + xgi_mem_block_t *block, *free_block, *used_block; + + unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK; + + XGI_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n", + originalSize, size); + + if (size == 0) { + XGI_ERROR("size == 0\n"); + return (NULL); + } + XGI_INFO("max_freesize: 0x%lx \n", xgi_fb_heap->max_freesize); + if (size > xgi_fb_heap->max_freesize) { + XGI_ERROR + ("size: 0x%lx is bigger than frame buffer total free size: 0x%lx !\n", + size, xgi_fb_heap->max_freesize); + return (NULL); + } + + free_list = xgi_fb_heap->free_list.next; + + while (free_list != &xgi_fb_heap->free_list) { + XGI_INFO("free_list: 0x%px \n", free_list); + block = list_entry(free_list, struct xgi_mem_block_s, list); + if (size <= block->size) { + break; + } + free_list = free_list->next; + } + + if (free_list == &xgi_fb_heap->free_list) { + XGI_ERROR + ("Can't allocate %ldk size from frame buffer memory !\n", + size / 1024); + return (NULL); + } + + free_block = block; + XGI_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n", + size, free_block->offset, free_block->size); + + if (size == free_block->size) { + used_block = free_block; + XGI_INFO("size == free_block->size: free_block = 0x%p\n", + free_block); + list_del(&free_block->list); + } else { + used_block = xgi_mem_new_node(); + + if (used_block == NULL) + return (NULL); + + if (used_block == free_block) { + XGI_ERROR("used_block == free_block = 0x%p\n", + used_block); + } + + used_block->offset = free_block->offset; + used_block->size = size; + + free_block->offset += size; + free_block->size -= size; + } + + xgi_fb_heap->max_freesize -= size; + + list_add(&used_block->list, &xgi_fb_heap->used_list); + + return (used_block); +} + +static xgi_mem_block_t *xgi_mem_free(xgi_info_t * info, unsigned long offset) +{ + struct list_head *free_list, *used_list; + xgi_mem_block_t *used_block = NULL, *block = NULL; + xgi_mem_block_t *prev, *next; + + unsigned long upper; + unsigned long lower; + + used_list = xgi_fb_heap->used_list.next; + while (used_list != &xgi_fb_heap->used_list) { + block = list_entry(used_list, struct xgi_mem_block_s, list); + if (block->offset == offset) { + break; + } + used_list = used_list->next; + } + + if (used_list == &xgi_fb_heap->used_list) { + XGI_ERROR("can't find block: 0x%lx to free!\n", offset); + return (NULL); + } + + used_block = block; + XGI_INFO("used_block: 0x%p, offset = 0x%lx, size = 0x%lx\n", + used_block, used_block->offset, used_block->size); + + xgi_fb_heap->max_freesize += used_block->size; + + prev = next = NULL; + upper = used_block->offset + used_block->size; + lower = used_block->offset; + + free_list = xgi_fb_heap->free_list.next; + while (free_list != &xgi_fb_heap->free_list) { + block = list_entry(free_list, struct xgi_mem_block_s, list); + + if (block->offset == upper) { + next = block; + } else if ((block->offset + block->size) == lower) { + prev = block; + } + free_list = free_list->next; + } + + XGI_INFO("next = 0x%p, prev = 0x%p\n", next, prev); + list_del(&used_block->list); + + if (prev && next) { + prev->size += (used_block->size + next->size); + list_del(&next->list); + XGI_INFO("free node 0x%p\n", next); + kmem_cache_free(xgi_fb_cache_block, next); + kmem_cache_free(xgi_fb_cache_block, used_block); + + next = NULL; + used_block = NULL; + return (prev); + } + + if (prev) { + prev->size += used_block->size; + XGI_INFO("free node 0x%p\n", used_block); + kmem_cache_free(xgi_fb_cache_block, used_block); + used_block = NULL; + return (prev); + } + + if (next) { + next->size += used_block->size; + next->offset = used_block->offset; + XGI_INFO("free node 0x%p\n", used_block); + kmem_cache_free(xgi_fb_cache_block, used_block); + used_block = NULL; + return (next); + } + + list_add(&used_block->list, &xgi_fb_heap->free_list); + XGI_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n", + used_block, used_block->offset, used_block->size); + + return (used_block); +} diff --git a/linux-core/xgi_fb.h b/linux-core/xgi_fb.h index 4b7ec2f2..ae078ae0 100644 --- a/linux-core/xgi_fb.h +++ b/linux-core/xgi_fb.h @@ -1,71 +1,70 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ - -#ifndef _XGI_FB_H_ -#define _XGI_FB_H_ - -typedef struct xgi_mem_block_s { - struct list_head list; - unsigned long offset; - unsigned long size; - atomic_t use_count; -} xgi_mem_block_t; - -typedef struct xgi_mem_heap_s { - struct list_head free_list; - struct list_head used_list; - struct list_head sort_list; - unsigned long max_freesize; - spinlock_t lock; -} xgi_mem_heap_t; - -#if 0 -typedef struct xgi_mem_block_s { - struct xgi_mem_block_s *next; - struct xgi_mem_block_s *prev; - unsigned long offset; - unsigned long size; - atomic_t use_count; -} xgi_mem_block_t; - -typedef struct xgi_mem_list_s { - xgi_mem_block_t *head; - xgi_mem_block_t *tail; -} xgi_mem_list_t; - -typedef struct xgi_mem_heap_s { - xgi_mem_list_t *free_list; - xgi_mem_list_t *used_list; - xgi_mem_list_t *sort_list; - unsigned long max_freesize; - spinlock_t lock; -} xgi_mem_heap_t; -#endif - -#endif - + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#ifndef _XGI_FB_H_ +#define _XGI_FB_H_ + +typedef struct xgi_mem_block_s { + struct list_head list; + unsigned long offset; + unsigned long size; + atomic_t use_count; +} xgi_mem_block_t; + +typedef struct xgi_mem_heap_s { + struct list_head free_list; + struct list_head used_list; + struct list_head sort_list; + unsigned long max_freesize; + spinlock_t lock; +} xgi_mem_heap_t; + +#if 0 +typedef struct xgi_mem_block_s { + struct xgi_mem_block_s *next; + struct xgi_mem_block_s *prev; + unsigned long offset; + unsigned long size; + atomic_t use_count; +} xgi_mem_block_t; + +typedef struct xgi_mem_list_s { + xgi_mem_block_t *head; + xgi_mem_block_t *tail; +} xgi_mem_list_t; + +typedef struct xgi_mem_heap_s { + xgi_mem_list_t *free_list; + xgi_mem_list_t *used_list; + xgi_mem_list_t *sort_list; + unsigned long max_freesize; + spinlock_t lock; +} xgi_mem_heap_t; +#endif + +#endif diff --git a/linux-core/xgi_linux.h b/linux-core/xgi_linux.h index f207a4f6..67c1af82 100644 --- a/linux-core/xgi_linux.h +++ b/linux-core/xgi_linux.h @@ -1,596 +1,591 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ - - -#ifndef _XGI_LINUX_H_ -#define _XGI_LINUX_H_ - -#include - -#ifndef LINUX_VERSION_CODE -#include -#endif - -#ifndef KERNEL_VERSION /* pre-2.1.90 didn't have it */ -#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) -#endif - -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) -# error "This driver does not support pre-2.4 kernels!" -#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) -#define KERNEL_2_4 -#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) -# error "This driver does not support 2.5 kernels!" -#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 7, 0) -#define KERNEL_2_6 -#else -# error "This driver does not support development kernels!" -#endif - -#if defined (CONFIG_SMP) && !defined (__SMP__) -#define __SMP__ -#endif - -#if defined (CONFIG_MODVERSIONS) && !defined (MODVERSIONS) -#define MODVERSIONS -#endif - -#if defined (MODVERSIONS) && !defined (KERNEL_2_6) -#include -#endif - -#include /* printk */ -#include - -#include /* module_init, module_exit */ -#include /* pic_t, size_t, __u32, etc */ -#include /* error codes */ -#include /* circular linked list */ -#include /* NULL, offsetof */ -#include /* wait queues */ - -#include /* kmalloc, kfree, etc */ -#include /* vmalloc, vfree, etc */ - -#include /* poll_wait */ -#include /* mdelay, udelay */ -#include /* rdtsc rdtscl */ - -#include /* suser(), capable() replacement - for_each_task, for_each_process */ -#ifdef for_each_process -#define XGI_SCAN_PROCESS(p) for_each_process(p) -#else -#define XGI_SCAN_PROCESS(p) for_each_task(p) -#endif - -#ifdef KERNEL_2_6 -#include /* module_param() */ -#include /* kernel_locked */ -#include /* flush_tlb(), flush_tlb_all() */ -#include /* page table entry lookup */ -#endif - -#include /* pci_find_class, etc */ -#include /* tasklets, interrupt helpers */ -#include - -#include /* cli, sli, save_flags */ -#include /* ioremap, virt_to_phys */ -#include /* access_ok */ -#include /* PAGE_OFFSET */ -#include /* pte bit definitions */ - -#include -#include -#include - -#ifdef CONFIG_PROC_FS -#include -#endif - -#ifdef CONFIG_DEVFS_FS -#include -#endif - -#ifdef CONFIG_KMOD -#include -#endif - -#ifdef CONFIG_PM -#include -#endif - -#ifdef CONFIG_MTRR -#include -#endif - -#ifdef CONFIG_KDB -#include -#include -#endif - -#if defined (CONFIG_AGP) || defined (CONFIG_AGP_MODULE) -#define AGPGART -#include -#include -#endif - -#ifndef MAX_ORDER -#ifdef KERNEL_2_4 -#define MAX_ORDER 10 -#endif -#ifdef KERNEL_2_6 -#define MAX_ORDER 11 -#endif -#endif - -#ifndef module_init -#define module_init(x) int init_module(void) { return x(); } -#define module_exit(x) void cleanup_module(void) { x(); } -#endif - -#ifndef minor -#define minor(x) MINOR(x) -#endif - -#ifndef IRQ_HANDLED -typedef void irqreturn_t; -#define IRQ_NONE -#define IRQ_HANDLED -#define IRQ_RETVAL(x) -#endif - -#if !defined (list_for_each) -#define list_for_each(pos, head) \ - for (pos = (head)->next, prefetch(pos->next); pos != (head); \ - pos = pos->next, prefetch(pos->next)) -#endif - -#ifdef KERNEL_2_4 -#define XGI_PCI_FOR_EACH_DEV(dev) pci_for_each_dev(dev) -#endif -#ifdef KERNEL_2_6 -extern struct list_head pci_devices; /* list of all devices */ -#define XGI_PCI_FOR_EACH_DEV(dev) \ - for(dev = pci_dev_g(pci_devices.next); dev != pci_dev_g(&pci_devices); dev = pci_dev_g(dev->global_list.next)) -#endif - -/* - * the following macro causes problems when used in the same module - * as module_param(); undef it so we don't accidentally mix the two - */ -#if defined (KERNEL_2_6) -#undef MODULE_PARM -#endif - -#ifdef EXPORT_NO_SYMBOLS -EXPORT_NO_SYMBOLS; -#endif - -#if defined (KERNEL_2_4) -#define XGI_IS_SUSER() suser() -#define XGI_PCI_DEVICE_NAME(dev) ((dev)->name) -#define XGI_NUM_CPUS() smp_num_cpus -#define XGI_CLI() __cli() -#define XGI_SAVE_FLAGS(eflags) __save_flags(eflags) -#define XGI_RESTORE_FLAGS(eflags) __restore_flags(eflags) -#define XGI_MAY_SLEEP() (!in_interrupt()) -#define XGI_MODULE_PARAMETER(x) MODULE_PARM(x, "i") -#endif - -#if defined (KERNEL_2_6) -#define XGI_IS_SUSER() capable(CAP_SYS_ADMIN) -#define XGI_PCI_DEVICE_NAME(dev) ((dev)->pretty_name) -#define XGI_NUM_CPUS() num_online_cpus() -#define XGI_CLI() local_irq_disable() -#define XGI_SAVE_FLAGS(eflags) local_save_flags(eflags) -#define XGI_RESTORE_FLAGS(eflags) local_irq_restore(eflags) -#define XGI_MAY_SLEEP() (!in_interrupt() && !in_atomic()) -#define XGI_MODULE_PARAMETER(x) module_param(x, int, 0) -#endif - -/* Earlier 2.4.x kernels don't have pci_disable_device() */ -#ifdef XGI_PCI_DISABLE_DEVICE_PRESENT -#define XGI_PCI_DISABLE_DEVICE(dev) pci_disable_device(dev) -#else -#define XGI_PCI_DISABLE_DEVICE(dev) -#endif - -/* common defines */ -#define GET_MODULE_SYMBOL(mod,sym) (const void *) inter_module_get(sym) -#define PUT_MODULE_SYMBOL(sym) inter_module_put((char *) sym) - -#define XGI_GET_PAGE_STRUCT(phys_page) virt_to_page(__va(phys_page)) -#define XGI_VMA_OFFSET(vma) (((vma)->vm_pgoff) << PAGE_SHIFT) -#define XGI_VMA_PRIVATE(vma) ((vma)->vm_private_data) - -#define XGI_DEVICE_NUMBER(x) minor((x)->i_rdev) -#define XGI_IS_CONTROL_DEVICE(x) (minor((x)->i_rdev) == 255) - -#define XGI_PCI_RESOURCE_START(dev, bar) ((dev)->resource[bar].start) -#define XGI_PCI_RESOURCE_SIZE(dev, bar) ((dev)->resource[bar].end - (dev)->resource[bar].start + 1) - -#define XGI_PCI_BUS_NUMBER(dev) (dev)->bus->number -#define XGI_PCI_SLOT_NUMBER(dev) PCI_SLOT((dev)->devfn) - -#ifdef XGI_PCI_GET_CLASS_PRESENT -#define XGI_PCI_DEV_PUT(dev) pci_dev_put(dev) -#define XGI_PCI_GET_DEVICE(vendor,device,from) pci_get_device(vendor,device,from) -#define XGI_PCI_GET_SLOT(bus,devfn) pci_get_slot(pci_find_bus(0,bus),devfn) -#define XGI_PCI_GET_CLASS(class,from) pci_get_class(class,from) -#else -#define XGI_PCI_DEV_PUT(dev) -#define XGI_PCI_GET_DEVICE(vendor,device,from) pci_find_device(vendor,device,from) -#define XGI_PCI_GET_SLOT(bus,devfn) pci_find_slot(bus,devfn) -#define XGI_PCI_GET_CLASS(class,from) pci_find_class(class,from) -#endif - -/* - * acpi support has been back-ported to the 2.4 kernel, but the 2.4 driver - * model is not sufficient for full acpi support. it may work in some cases, - * but not enough for us to officially support this configuration. - */ -#if defined(CONFIG_ACPI) && defined(KERNEL_2_6) -#define XGI_PM_SUPPORT_ACPI -#endif - -#if defined(CONFIG_APM) || defined(CONFIG_APM_MODULE) -#define XGI_PM_SUPPORT_APM -#endif - - -#if defined(CONFIG_DEVFS_FS) -#if defined(KERNEL_2_6) -typedef void* devfs_handle_t; -#define XGI_DEVFS_REGISTER(_name, _minor) \ - ({ \ - devfs_handle_t __handle = NULL; \ - if (devfs_mk_cdev(MKDEV(XGI_DEV_MAJOR, _minor), \ - S_IFCHR | S_IRUGO | S_IWUGO, _name) == 0) \ - { \ - __handle = (void *) 1; /* XXX Fix me! (boolean) */ \ - } \ - __handle; \ - }) -/* -#define XGI_DEVFS_REMOVE_DEVICE(i) devfs_remove("xgi%d", i) -*/ -#define XGI_DEVFS_REMOVE_CONTROL() devfs_remove("xgi_ctl") -#define XGI_DEVFS_REMOVE_DEVICE(i) devfs_remove("xgi") -#else // defined(KERNEL_2_4) -#define XGI_DEVFS_REGISTER(_name, _minor) \ - ({ \ - devfs_handle_t __handle = devfs_register(NULL, _name, DEVFS_FL_AUTO_DEVNUM, \ - XGI_DEV_MAJOR, _minor, \ - S_IFCHR | S_IRUGO | S_IWUGO, &xgi_fops, NULL); \ - __handle; \ - }) - -#define XGI_DEVFS_REMOVE_DEVICE(i) \ - ({ \ - if (xgi_devfs_handles[i] != NULL) \ - { \ - devfs_unregister(xgi_devfs_handles[i]); \ - } \ - }) -#define XGI_DEVFS_REMOVE_CONTROL() \ - ({ \ - if (xgi_devfs_handles[0] != NULL) \ - { \ - devfs_unregister(xgi_devfs_handles[0]); \ - } \ - }) -#endif /* defined(KERNEL_2_4) */ -#endif /* defined(CONFIG_DEVFS_FS) */ - -#if defined(CONFIG_DEVFS_FS) && !defined(KERNEL_2_6) -#define XGI_REGISTER_CHRDEV(x...) devfs_register_chrdev(x) -#define XGI_UNREGISTER_CHRDEV(x...) devfs_unregister_chrdev(x) -#else -#define XGI_REGISTER_CHRDEV(x...) register_chrdev(x) -#define XGI_UNREGISTER_CHRDEV(x...) unregister_chrdev(x) -#endif - -#if defined(XGI_REMAP_PFN_RANGE_PRESENT) -#define XGI_REMAP_PAGE_RANGE(from, offset, x...) \ - remap_pfn_range(vma, from, ((offset) >> PAGE_SHIFT), x) -#elif defined(XGI_REMAP_PAGE_RANGE_5) -#define XGI_REMAP_PAGE_RANGE(x...) remap_page_range(vma, x) -#elif defined(XGI_REMAP_PAGE_RANGE_4) -#define XGI_REMAP_PAGE_RANGE(x...) remap_page_range(x) -#else -#warning "xgi_configure.sh failed, assuming remap_page_range(5)!" -#define XGI_REMAP_PAGE_RANGE(x...) remap_page_range(vma, x) -#endif - -#if defined(pmd_offset_map) -#define XGI_PMD_OFFSET(addres, pg_dir, pg_mid_dir) \ - { \ - pg_mid_dir = pmd_offset_map(pg_dir, address); \ - } -#define XGI_PMD_UNMAP(pg_mid_dir) \ - { \ - pmd_unmap(pg_mid_dir); \ - } -#else -#define XGI_PMD_OFFSET(addres, pg_dir, pg_mid_dir) \ - { \ - pg_mid_dir = pmd_offset(pg_dir, address); \ - } -#define XGI_PMD_UNMAP(pg_mid_dir) -#endif - -#define XGI_PMD_PRESENT(pg_mid_dir) \ - ({ \ - if ((pg_mid_dir) && (pmd_none(*pg_mid_dir))) \ - { \ - XGI_PMD_UNMAP(pg_mid_dir); \ - pg_mid_dir = NULL; \ - } \ - pg_mid_dir != NULL; \ - }) - -#if defined(pte_offset_atomic) -#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \ - { \ - pte = pte_offset_atomic(pg_mid_dir, address); \ - XGI_PMD_UNMAP(pg_mid_dir); \ - } -#define XGI_PTE_UNMAP(pte) \ - { \ - pte_kunmap(pte); \ - } -#elif defined(pte_offset) -#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \ - { \ - pte = pte_offset(pg_mid_dir, address); \ - XGI_PMD_UNMAP(pg_mid_dir); \ - } -#define XGI_PTE_UNMAP(pte) -#else -#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \ - { \ - pte = pte_offset_map(pg_mid_dir, address); \ - XGI_PMD_UNMAP(pg_mid_dir); \ - } -#define XGI_PTE_UNMAP(pte) \ - { \ - pte_unmap(pte); \ - } -#endif - -#define XGI_PTE_PRESENT(pte) \ - ({ \ - if (pte) \ - { \ - if (!pte_present(*pte)) \ - { \ - XGI_PTE_UNMAP(pte); pte = NULL; \ - } \ - } \ - pte != NULL; \ - }) - -#define XGI_PTE_VALUE(pte) \ - ({ \ - unsigned long __pte_value = pte_val(*pte); \ - XGI_PTE_UNMAP(pte); \ - __pte_value; \ - }) - -#define XGI_PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) / PAGE_SIZE) -#define XGI_MASK_OFFSET(addr) ((addr) & (PAGE_SIZE - 1)) - -#if !defined (pgprot_noncached) -static inline pgprot_t pgprot_noncached(pgprot_t old_prot) - { - pgprot_t new_prot = old_prot; - if (boot_cpu_data.x86 > 3) - new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_PCD); - return new_prot; - } -#endif - -#if defined(XGI_BUILD_XGI_PAT_SUPPORT) && !defined (pgprot_writecombined) -/* Added define for write combining page, only valid if pat enabled. */ -#define _PAGE_WRTCOMB _PAGE_PWT -#define __PAGE_KERNEL_WRTCOMB \ - (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_WRTCOMB | _PAGE_ACCESSED) -#define PAGE_KERNEL_WRTCOMB MAKE_GLOBAL(__PAGE_KERNEL_WRTCOMB) - -static inline pgprot_t pgprot_writecombined(pgprot_t old_prot) - { - pgprot_t new_prot = old_prot; - if (boot_cpu_data.x86 > 3) - { - pgprot_val(old_prot) &= ~(_PAGE_PCD | _PAGE_PWT); - new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_WRTCOMB); - } - return new_prot; - } -#endif - -#if !defined(page_to_pfn) -#define page_to_pfn(page) ((page) - mem_map) -#endif - -#define XGI_VMALLOC(ptr, size) \ - { \ - (ptr) = vmalloc_32(size); \ - } - -#define XGI_VFREE(ptr, size) \ - { \ - vfree((void *) (ptr)); \ - } - -#define XGI_IOREMAP(ptr, physaddr, size) \ - { \ - (ptr) = ioremap(physaddr, size); \ - } - -#define XGI_IOREMAP_NOCACHE(ptr, physaddr, size) \ - { \ - (ptr) = ioremap_nocache(physaddr, size); \ - } - -#define XGI_IOUNMAP(ptr, size) \ - { \ - iounmap(ptr); \ - } - -/* - * only use this because GFP_KERNEL may sleep.. - * GFP_ATOMIC is ok, it won't sleep - */ -#define XGI_KMALLOC(ptr, size) \ - { \ - (ptr) = kmalloc(size, GFP_KERNEL); \ - } - -#define XGI_KMALLOC_ATOMIC(ptr, size) \ - { \ - (ptr) = kmalloc(size, GFP_ATOMIC); \ - } - -#define XGI_KFREE(ptr, size) \ - { \ - kfree((void *) (ptr)); \ - } - -#define XGI_GET_FREE_PAGES(ptr, order) \ - { \ - (ptr) = __get_free_pages(GFP_KERNEL, order); \ - } - -#define XGI_FREE_PAGES(ptr, order) \ - { \ - free_pages(ptr, order); \ - } - -typedef struct xgi_pte_s { - unsigned long phys_addr; - unsigned long virt_addr; -} xgi_pte_t; - -/* - * AMD Athlon processors expose a subtle bug in the Linux - * kernel, that may lead to AGP memory corruption. Recent - * kernel versions had a workaround for this problem, but - * 2.4.20 is the first kernel to address it properly. The - * page_attr API provides the means to solve the problem. - */ -#if defined(XGI_CHANGE_PAGE_ATTR_PRESENT) -static inline void XGI_SET_PAGE_ATTRIB_UNCACHED(xgi_pte_t *page_ptr) - { - struct page *page = virt_to_page(__va(page_ptr->phys_addr)); - change_page_attr(page, 1, PAGE_KERNEL_NOCACHE); - } -static inline void XGI_SET_PAGE_ATTRIB_CACHED(xgi_pte_t *page_ptr) - { - struct page *page = virt_to_page(__va(page_ptr->phys_addr)); - change_page_attr(page, 1, PAGE_KERNEL); - } -#else -#define XGI_SET_PAGE_ATTRIB_UNCACHED(page_list) -#define XGI_SET_PAGE_ATTRIB_CACHED(page_list) -#endif - -#ifdef KERNEL_2_4 -#define XGI_INC_PAGE_COUNT(page) atomic_inc(&(page)->count) -#define XGI_DEC_PAGE_COUNT(page) atomic_dec(&(page)->count) -#define XGI_PAGE_COUNT(page) atomic_read(&(page)->count) -#define XGI_SET_PAGE_COUNT(page,v) atomic_set(&(page)->count, v) - -#define XGILockPage(page) set_bit(PG_locked, &(page)->flags) -#define XGIUnlockPage(page) clear_bit(PG_locked, &(page)->flags) -#endif - -#ifdef KERNEL_2_6 -/* add for SUSE 9, Jill*/ -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 4) -#define XGI_INC_PAGE_COUNT(page) atomic_inc(&(page)->count) -#define XGI_DEC_PAGE_COUNT(page) atomic_dec(&(page)->count) -#define XGI_PAGE_COUNT(page) atomic_read(&(page)->count) -#define XGI_SET_PAGE_COUNT(page,v) atomic_set(&(page)->count, v) -#else -#define XGI_INC_PAGE_COUNT(page) atomic_inc(&(page)->_count) -#define XGI_DEC_PAGE_COUNT(page) atomic_dec(&(page)->_count) -#define XGI_PAGE_COUNT(page) atomic_read(&(page)->_count) -#define XGI_SET_PAGE_COUNT(page,v) atomic_set(&(page)->_count, v) -#endif -#define XGILockPage(page) SetPageLocked(page) -#define XGIUnlockPage(page) ClearPageLocked(page) -#endif - - -/* - * hide a pointer to struct xgi_info_t in a file-private info - */ - -typedef struct -{ - void *info; - U32 num_events; - spinlock_t fp_lock; - wait_queue_head_t wait_queue; -} xgi_file_private_t; - -#define FILE_PRIVATE(filp) ((filp)->private_data) - -#define XGI_GET_FP(filp) ((xgi_file_private_t *) FILE_PRIVATE(filp)) - -/* for the card devices */ -#define XGI_INFO_FROM_FP(filp) (XGI_GET_FP(filp)->info) - -#ifdef KERNEL_2_0 -#define INODE_FROM_FP(filp) ((filp)->f_inode) -#else -#define INODE_FROM_FP(filp) ((filp)->f_dentry->d_inode) -#endif - -#define XGI_ATOMIC_SET(data,val) atomic_set(&(data), (val)) -#define XGI_ATOMIC_INC(data) atomic_inc(&(data)) -#define XGI_ATOMIC_DEC(data) atomic_dec(&(data)) -#define XGI_ATOMIC_DEC_AND_TEST(data) atomic_dec_and_test(&(data)) -#define XGI_ATOMIC_READ(data) atomic_read(&(data)) - -/* - * lock-related functions that should only be called from this file - */ -#define xgi_init_lock(lock) spin_lock_init(&lock) -#define xgi_lock(lock) spin_lock(&lock) -#define xgi_unlock(lock) spin_unlock(&lock) -#define xgi_down(lock) down(&lock) -#define xgi_up(lock) up(&lock) - -#define xgi_lock_irqsave(lock,flags) spin_lock_irqsave(&lock,flags) -#define xgi_unlock_irqsave(lock,flags) spin_unlock_irqrestore(&lock,flags) - -#endif + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#ifndef _XGI_LINUX_H_ +#define _XGI_LINUX_H_ + +#include + +#ifndef LINUX_VERSION_CODE +#include +#endif + +#ifndef KERNEL_VERSION /* pre-2.1.90 didn't have it */ +#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) +# error "This driver does not support pre-2.4 kernels!" +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) +#define KERNEL_2_4 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) +# error "This driver does not support 2.5 kernels!" +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 7, 0) +#define KERNEL_2_6 +#else +# error "This driver does not support development kernels!" +#endif + +#if defined (CONFIG_SMP) && !defined (__SMP__) +#define __SMP__ +#endif + +#if defined (CONFIG_MODVERSIONS) && !defined (MODVERSIONS) +#define MODVERSIONS +#endif + +#if defined (MODVERSIONS) && !defined (KERNEL_2_6) +#include +#endif + +#include /* printk */ +#include + +#include /* module_init, module_exit */ +#include /* pic_t, size_t, __u32, etc */ +#include /* error codes */ +#include /* circular linked list */ +#include /* NULL, offsetof */ +#include /* wait queues */ + +#include /* kmalloc, kfree, etc */ +#include /* vmalloc, vfree, etc */ + +#include /* poll_wait */ +#include /* mdelay, udelay */ +#include /* rdtsc rdtscl */ + +#include /* suser(), capable() replacement + for_each_task, for_each_process */ +#ifdef for_each_process +#define XGI_SCAN_PROCESS(p) for_each_process(p) +#else +#define XGI_SCAN_PROCESS(p) for_each_task(p) +#endif + +#ifdef KERNEL_2_6 +#include /* module_param() */ +#include /* kernel_locked */ +#include /* flush_tlb(), flush_tlb_all() */ +#include /* page table entry lookup */ +#endif + +#include /* pci_find_class, etc */ +#include /* tasklets, interrupt helpers */ +#include + +#include /* cli, sli, save_flags */ +#include /* ioremap, virt_to_phys */ +#include /* access_ok */ +#include /* PAGE_OFFSET */ +#include /* pte bit definitions */ + +#include +#include +#include + +#ifdef CONFIG_PROC_FS +#include +#endif + +#ifdef CONFIG_DEVFS_FS +#include +#endif + +#ifdef CONFIG_KMOD +#include +#endif + +#ifdef CONFIG_PM +#include +#endif + +#ifdef CONFIG_MTRR +#include +#endif + +#ifdef CONFIG_KDB +#include +#include +#endif + +#if defined (CONFIG_AGP) || defined (CONFIG_AGP_MODULE) +#define AGPGART +#include +#include +#endif + +#ifndef MAX_ORDER +#ifdef KERNEL_2_4 +#define MAX_ORDER 10 +#endif +#ifdef KERNEL_2_6 +#define MAX_ORDER 11 +#endif +#endif + +#ifndef module_init +#define module_init(x) int init_module(void) { return x(); } +#define module_exit(x) void cleanup_module(void) { x(); } +#endif + +#ifndef minor +#define minor(x) MINOR(x) +#endif + +#ifndef IRQ_HANDLED +typedef void irqreturn_t; +#define IRQ_NONE +#define IRQ_HANDLED +#define IRQ_RETVAL(x) +#endif + +#if !defined (list_for_each) +#define list_for_each(pos, head) \ + for (pos = (head)->next, prefetch(pos->next); pos != (head); \ + pos = pos->next, prefetch(pos->next)) +#endif + +#ifdef KERNEL_2_4 +#define XGI_PCI_FOR_EACH_DEV(dev) pci_for_each_dev(dev) +#endif +#ifdef KERNEL_2_6 +extern struct list_head pci_devices; /* list of all devices */ +#define XGI_PCI_FOR_EACH_DEV(dev) \ + for(dev = pci_dev_g(pci_devices.next); dev != pci_dev_g(&pci_devices); dev = pci_dev_g(dev->global_list.next)) +#endif + +/* + * the following macro causes problems when used in the same module + * as module_param(); undef it so we don't accidentally mix the two + */ +#if defined (KERNEL_2_6) +#undef MODULE_PARM +#endif + +#ifdef EXPORT_NO_SYMBOLS +EXPORT_NO_SYMBOLS; +#endif + +#if defined (KERNEL_2_4) +#define XGI_IS_SUSER() suser() +#define XGI_PCI_DEVICE_NAME(dev) ((dev)->name) +#define XGI_NUM_CPUS() smp_num_cpus +#define XGI_CLI() __cli() +#define XGI_SAVE_FLAGS(eflags) __save_flags(eflags) +#define XGI_RESTORE_FLAGS(eflags) __restore_flags(eflags) +#define XGI_MAY_SLEEP() (!in_interrupt()) +#define XGI_MODULE_PARAMETER(x) MODULE_PARM(x, "i") +#endif + +#if defined (KERNEL_2_6) +#define XGI_IS_SUSER() capable(CAP_SYS_ADMIN) +#define XGI_PCI_DEVICE_NAME(dev) ((dev)->pretty_name) +#define XGI_NUM_CPUS() num_online_cpus() +#define XGI_CLI() local_irq_disable() +#define XGI_SAVE_FLAGS(eflags) local_save_flags(eflags) +#define XGI_RESTORE_FLAGS(eflags) local_irq_restore(eflags) +#define XGI_MAY_SLEEP() (!in_interrupt() && !in_atomic()) +#define XGI_MODULE_PARAMETER(x) module_param(x, int, 0) +#endif + +/* Earlier 2.4.x kernels don't have pci_disable_device() */ +#ifdef XGI_PCI_DISABLE_DEVICE_PRESENT +#define XGI_PCI_DISABLE_DEVICE(dev) pci_disable_device(dev) +#else +#define XGI_PCI_DISABLE_DEVICE(dev) +#endif + +/* common defines */ +#define GET_MODULE_SYMBOL(mod,sym) (const void *) inter_module_get(sym) +#define PUT_MODULE_SYMBOL(sym) inter_module_put((char *) sym) + +#define XGI_GET_PAGE_STRUCT(phys_page) virt_to_page(__va(phys_page)) +#define XGI_VMA_OFFSET(vma) (((vma)->vm_pgoff) << PAGE_SHIFT) +#define XGI_VMA_PRIVATE(vma) ((vma)->vm_private_data) + +#define XGI_DEVICE_NUMBER(x) minor((x)->i_rdev) +#define XGI_IS_CONTROL_DEVICE(x) (minor((x)->i_rdev) == 255) + +#define XGI_PCI_RESOURCE_START(dev, bar) ((dev)->resource[bar].start) +#define XGI_PCI_RESOURCE_SIZE(dev, bar) ((dev)->resource[bar].end - (dev)->resource[bar].start + 1) + +#define XGI_PCI_BUS_NUMBER(dev) (dev)->bus->number +#define XGI_PCI_SLOT_NUMBER(dev) PCI_SLOT((dev)->devfn) + +#ifdef XGI_PCI_GET_CLASS_PRESENT +#define XGI_PCI_DEV_PUT(dev) pci_dev_put(dev) +#define XGI_PCI_GET_DEVICE(vendor,device,from) pci_get_device(vendor,device,from) +#define XGI_PCI_GET_SLOT(bus,devfn) pci_get_slot(pci_find_bus(0,bus),devfn) +#define XGI_PCI_GET_CLASS(class,from) pci_get_class(class,from) +#else +#define XGI_PCI_DEV_PUT(dev) +#define XGI_PCI_GET_DEVICE(vendor,device,from) pci_find_device(vendor,device,from) +#define XGI_PCI_GET_SLOT(bus,devfn) pci_find_slot(bus,devfn) +#define XGI_PCI_GET_CLASS(class,from) pci_find_class(class,from) +#endif + +/* + * acpi support has been back-ported to the 2.4 kernel, but the 2.4 driver + * model is not sufficient for full acpi support. it may work in some cases, + * but not enough for us to officially support this configuration. + */ +#if defined(CONFIG_ACPI) && defined(KERNEL_2_6) +#define XGI_PM_SUPPORT_ACPI +#endif + +#if defined(CONFIG_APM) || defined(CONFIG_APM_MODULE) +#define XGI_PM_SUPPORT_APM +#endif + +#if defined(CONFIG_DEVFS_FS) +#if defined(KERNEL_2_6) +typedef void *devfs_handle_t; +#define XGI_DEVFS_REGISTER(_name, _minor) \ + ({ \ + devfs_handle_t __handle = NULL; \ + if (devfs_mk_cdev(MKDEV(XGI_DEV_MAJOR, _minor), \ + S_IFCHR | S_IRUGO | S_IWUGO, _name) == 0) \ + { \ + __handle = (void *) 1; /* XXX Fix me! (boolean) */ \ + } \ + __handle; \ + }) +/* +#define XGI_DEVFS_REMOVE_DEVICE(i) devfs_remove("xgi%d", i) +*/ +#define XGI_DEVFS_REMOVE_CONTROL() devfs_remove("xgi_ctl") +#define XGI_DEVFS_REMOVE_DEVICE(i) devfs_remove("xgi") +#else // defined(KERNEL_2_4) +#define XGI_DEVFS_REGISTER(_name, _minor) \ + ({ \ + devfs_handle_t __handle = devfs_register(NULL, _name, DEVFS_FL_AUTO_DEVNUM, \ + XGI_DEV_MAJOR, _minor, \ + S_IFCHR | S_IRUGO | S_IWUGO, &xgi_fops, NULL); \ + __handle; \ + }) + +#define XGI_DEVFS_REMOVE_DEVICE(i) \ + ({ \ + if (xgi_devfs_handles[i] != NULL) \ + { \ + devfs_unregister(xgi_devfs_handles[i]); \ + } \ + }) +#define XGI_DEVFS_REMOVE_CONTROL() \ + ({ \ + if (xgi_devfs_handles[0] != NULL) \ + { \ + devfs_unregister(xgi_devfs_handles[0]); \ + } \ + }) +#endif /* defined(KERNEL_2_4) */ +#endif /* defined(CONFIG_DEVFS_FS) */ + +#if defined(CONFIG_DEVFS_FS) && !defined(KERNEL_2_6) +#define XGI_REGISTER_CHRDEV(x...) devfs_register_chrdev(x) +#define XGI_UNREGISTER_CHRDEV(x...) devfs_unregister_chrdev(x) +#else +#define XGI_REGISTER_CHRDEV(x...) register_chrdev(x) +#define XGI_UNREGISTER_CHRDEV(x...) unregister_chrdev(x) +#endif + +#if defined(XGI_REMAP_PFN_RANGE_PRESENT) +#define XGI_REMAP_PAGE_RANGE(from, offset, x...) \ + remap_pfn_range(vma, from, ((offset) >> PAGE_SHIFT), x) +#elif defined(XGI_REMAP_PAGE_RANGE_5) +#define XGI_REMAP_PAGE_RANGE(x...) remap_page_range(vma, x) +#elif defined(XGI_REMAP_PAGE_RANGE_4) +#define XGI_REMAP_PAGE_RANGE(x...) remap_page_range(x) +#else +#warning "xgi_configure.sh failed, assuming remap_page_range(5)!" +#define XGI_REMAP_PAGE_RANGE(x...) remap_page_range(vma, x) +#endif + +#if defined(pmd_offset_map) +#define XGI_PMD_OFFSET(addres, pg_dir, pg_mid_dir) \ + { \ + pg_mid_dir = pmd_offset_map(pg_dir, address); \ + } +#define XGI_PMD_UNMAP(pg_mid_dir) \ + { \ + pmd_unmap(pg_mid_dir); \ + } +#else +#define XGI_PMD_OFFSET(addres, pg_dir, pg_mid_dir) \ + { \ + pg_mid_dir = pmd_offset(pg_dir, address); \ + } +#define XGI_PMD_UNMAP(pg_mid_dir) +#endif + +#define XGI_PMD_PRESENT(pg_mid_dir) \ + ({ \ + if ((pg_mid_dir) && (pmd_none(*pg_mid_dir))) \ + { \ + XGI_PMD_UNMAP(pg_mid_dir); \ + pg_mid_dir = NULL; \ + } \ + pg_mid_dir != NULL; \ + }) + +#if defined(pte_offset_atomic) +#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \ + { \ + pte = pte_offset_atomic(pg_mid_dir, address); \ + XGI_PMD_UNMAP(pg_mid_dir); \ + } +#define XGI_PTE_UNMAP(pte) \ + { \ + pte_kunmap(pte); \ + } +#elif defined(pte_offset) +#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \ + { \ + pte = pte_offset(pg_mid_dir, address); \ + XGI_PMD_UNMAP(pg_mid_dir); \ + } +#define XGI_PTE_UNMAP(pte) +#else +#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \ + { \ + pte = pte_offset_map(pg_mid_dir, address); \ + XGI_PMD_UNMAP(pg_mid_dir); \ + } +#define XGI_PTE_UNMAP(pte) \ + { \ + pte_unmap(pte); \ + } +#endif + +#define XGI_PTE_PRESENT(pte) \ + ({ \ + if (pte) \ + { \ + if (!pte_present(*pte)) \ + { \ + XGI_PTE_UNMAP(pte); pte = NULL; \ + } \ + } \ + pte != NULL; \ + }) + +#define XGI_PTE_VALUE(pte) \ + ({ \ + unsigned long __pte_value = pte_val(*pte); \ + XGI_PTE_UNMAP(pte); \ + __pte_value; \ + }) + +#define XGI_PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) / PAGE_SIZE) +#define XGI_MASK_OFFSET(addr) ((addr) & (PAGE_SIZE - 1)) + +#if !defined (pgprot_noncached) +static inline pgprot_t pgprot_noncached(pgprot_t old_prot) +{ + pgprot_t new_prot = old_prot; + if (boot_cpu_data.x86 > 3) + new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_PCD); + return new_prot; +} +#endif + +#if defined(XGI_BUILD_XGI_PAT_SUPPORT) && !defined (pgprot_writecombined) +/* Added define for write combining page, only valid if pat enabled. */ +#define _PAGE_WRTCOMB _PAGE_PWT +#define __PAGE_KERNEL_WRTCOMB \ + (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_WRTCOMB | _PAGE_ACCESSED) +#define PAGE_KERNEL_WRTCOMB MAKE_GLOBAL(__PAGE_KERNEL_WRTCOMB) + +static inline pgprot_t pgprot_writecombined(pgprot_t old_prot) +{ + pgprot_t new_prot = old_prot; + if (boot_cpu_data.x86 > 3) { + pgprot_val(old_prot) &= ~(_PAGE_PCD | _PAGE_PWT); + new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_WRTCOMB); + } + return new_prot; +} +#endif + +#if !defined(page_to_pfn) +#define page_to_pfn(page) ((page) - mem_map) +#endif + +#define XGI_VMALLOC(ptr, size) \ + { \ + (ptr) = vmalloc_32(size); \ + } + +#define XGI_VFREE(ptr, size) \ + { \ + vfree((void *) (ptr)); \ + } + +#define XGI_IOREMAP(ptr, physaddr, size) \ + { \ + (ptr) = ioremap(physaddr, size); \ + } + +#define XGI_IOREMAP_NOCACHE(ptr, physaddr, size) \ + { \ + (ptr) = ioremap_nocache(physaddr, size); \ + } + +#define XGI_IOUNMAP(ptr, size) \ + { \ + iounmap(ptr); \ + } + +/* + * only use this because GFP_KERNEL may sleep.. + * GFP_ATOMIC is ok, it won't sleep + */ +#define XGI_KMALLOC(ptr, size) \ + { \ + (ptr) = kmalloc(size, GFP_KERNEL); \ + } + +#define XGI_KMALLOC_ATOMIC(ptr, size) \ + { \ + (ptr) = kmalloc(size, GFP_ATOMIC); \ + } + +#define XGI_KFREE(ptr, size) \ + { \ + kfree((void *) (ptr)); \ + } + +#define XGI_GET_FREE_PAGES(ptr, order) \ + { \ + (ptr) = __get_free_pages(GFP_KERNEL, order); \ + } + +#define XGI_FREE_PAGES(ptr, order) \ + { \ + free_pages(ptr, order); \ + } + +typedef struct xgi_pte_s { + unsigned long phys_addr; + unsigned long virt_addr; +} xgi_pte_t; + +/* + * AMD Athlon processors expose a subtle bug in the Linux + * kernel, that may lead to AGP memory corruption. Recent + * kernel versions had a workaround for this problem, but + * 2.4.20 is the first kernel to address it properly. The + * page_attr API provides the means to solve the problem. + */ +#if defined(XGI_CHANGE_PAGE_ATTR_PRESENT) +static inline void XGI_SET_PAGE_ATTRIB_UNCACHED(xgi_pte_t * page_ptr) +{ + struct page *page = virt_to_page(__va(page_ptr->phys_addr)); + change_page_attr(page, 1, PAGE_KERNEL_NOCACHE); +} +static inline void XGI_SET_PAGE_ATTRIB_CACHED(xgi_pte_t * page_ptr) +{ + struct page *page = virt_to_page(__va(page_ptr->phys_addr)); + change_page_attr(page, 1, PAGE_KERNEL); +} +#else +#define XGI_SET_PAGE_ATTRIB_UNCACHED(page_list) +#define XGI_SET_PAGE_ATTRIB_CACHED(page_list) +#endif + +#ifdef KERNEL_2_4 +#define XGI_INC_PAGE_COUNT(page) atomic_inc(&(page)->count) +#define XGI_DEC_PAGE_COUNT(page) atomic_dec(&(page)->count) +#define XGI_PAGE_COUNT(page) atomic_read(&(page)->count) +#define XGI_SET_PAGE_COUNT(page,v) atomic_set(&(page)->count, v) + +#define XGILockPage(page) set_bit(PG_locked, &(page)->flags) +#define XGIUnlockPage(page) clear_bit(PG_locked, &(page)->flags) +#endif + +#ifdef KERNEL_2_6 +/* add for SUSE 9, Jill*/ +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 4) +#define XGI_INC_PAGE_COUNT(page) atomic_inc(&(page)->count) +#define XGI_DEC_PAGE_COUNT(page) atomic_dec(&(page)->count) +#define XGI_PAGE_COUNT(page) atomic_read(&(page)->count) +#define XGI_SET_PAGE_COUNT(page,v) atomic_set(&(page)->count, v) +#else +#define XGI_INC_PAGE_COUNT(page) atomic_inc(&(page)->_count) +#define XGI_DEC_PAGE_COUNT(page) atomic_dec(&(page)->_count) +#define XGI_PAGE_COUNT(page) atomic_read(&(page)->_count) +#define XGI_SET_PAGE_COUNT(page,v) atomic_set(&(page)->_count, v) +#endif +#define XGILockPage(page) SetPageLocked(page) +#define XGIUnlockPage(page) ClearPageLocked(page) +#endif + +/* + * hide a pointer to struct xgi_info_t in a file-private info + */ + +typedef struct { + void *info; + U32 num_events; + spinlock_t fp_lock; + wait_queue_head_t wait_queue; +} xgi_file_private_t; + +#define FILE_PRIVATE(filp) ((filp)->private_data) + +#define XGI_GET_FP(filp) ((xgi_file_private_t *) FILE_PRIVATE(filp)) + +/* for the card devices */ +#define XGI_INFO_FROM_FP(filp) (XGI_GET_FP(filp)->info) + +#ifdef KERNEL_2_0 +#define INODE_FROM_FP(filp) ((filp)->f_inode) +#else +#define INODE_FROM_FP(filp) ((filp)->f_dentry->d_inode) +#endif + +#define XGI_ATOMIC_SET(data,val) atomic_set(&(data), (val)) +#define XGI_ATOMIC_INC(data) atomic_inc(&(data)) +#define XGI_ATOMIC_DEC(data) atomic_dec(&(data)) +#define XGI_ATOMIC_DEC_AND_TEST(data) atomic_dec_and_test(&(data)) +#define XGI_ATOMIC_READ(data) atomic_read(&(data)) + +/* + * lock-related functions that should only be called from this file + */ +#define xgi_init_lock(lock) spin_lock_init(&lock) +#define xgi_lock(lock) spin_lock(&lock) +#define xgi_unlock(lock) spin_unlock(&lock) +#define xgi_down(lock) down(&lock) +#define xgi_up(lock) up(&lock) + +#define xgi_lock_irqsave(lock,flags) spin_lock_irqsave(&lock,flags) +#define xgi_unlock_irqsave(lock,flags) spin_unlock_irqrestore(&lock,flags) + +#endif diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index b15c7ecf..61e40594 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -1,657 +1,630 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ - -#include "xgi_types.h" -#include "xgi_linux.h" -#include "xgi_drv.h" -#include "xgi_regs.h" -#include "xgi_pcie.h" - -void xgi_get_device_info(xgi_info_t *info, xgi_chip_info_t *req) -{ - req->device_id = info->device_id; - req->device_name[0] = 'x'; - req->device_name[1] = 'g'; - req->device_name[2] = '4'; - req->device_name[3] = '7'; - req->vendor_id = info->vendor_id; - req->curr_display_mode = 0; - req->fb_size = info->fb.size; - req->sarea_bus_addr = info->sarea_info.bus_addr; - req->sarea_size = info->sarea_info.size; -} - -void xgi_get_mmio_info(xgi_info_t *info, xgi_mmio_info_t *req) -{ - req->mmioBase = (void *)info->mmio.base; - req->size = info->mmio.size; -} - -void xgi_put_screen_info(xgi_info_t *info, xgi_screen_info_t *req) -{ - info->scrn_info.scrn_start = req->scrn_start; - info->scrn_info.scrn_xres = req->scrn_xres; - info->scrn_info.scrn_yres = req->scrn_yres; - info->scrn_info.scrn_bpp = req->scrn_bpp; - info->scrn_info.scrn_pitch = req->scrn_pitch; - - XGI_INFO("info->scrn_info.scrn_start: 0x%lx" - "info->scrn_info.scrn_xres: 0x%lx" - "info->scrn_info.scrn_yres: 0x%lx" - "info->scrn_info.scrn_bpp: 0x%lx" - "info->scrn_info.scrn_pitch: 0x%lx\n", - info->scrn_info.scrn_start, - info->scrn_info.scrn_xres, - info->scrn_info.scrn_yres, - info->scrn_info.scrn_bpp, - info->scrn_info.scrn_pitch); -} - -void xgi_get_screen_info(xgi_info_t *info, xgi_screen_info_t *req) -{ - req->scrn_start = info->scrn_info.scrn_start; - req->scrn_xres = info->scrn_info.scrn_xres; - req->scrn_yres = info->scrn_info.scrn_yres; - req->scrn_bpp = info->scrn_info.scrn_bpp; - req->scrn_pitch = info->scrn_info.scrn_pitch; - - XGI_INFO("req->scrn_start: 0x%lx" - "req->scrn_xres: 0x%lx" - "req->scrn_yres: 0x%lx" - "req->scrn_bpp: 0x%lx" - "req->scrn_pitch: 0x%lx\n", - req->scrn_start, - req->scrn_xres, - req->scrn_yres, - req->scrn_bpp, - req->scrn_pitch); -} - -void xgi_ge_reset(xgi_info_t *info) -{ - xgi_disable_ge(info); - xgi_enable_ge(info); -} - -void xgi_sarea_info(xgi_info_t *info, xgi_sarea_info_t *req) -{ - info->sarea_info.bus_addr = req->bus_addr; - info->sarea_info.size = req->size; - XGI_INFO("info->sarea_info.bus_addr: 0x%lx" - "info->sarea_info.size: 0x%lx\n", - info->sarea_info.bus_addr, - info->sarea_info.size); -} - -/* - * irq functions - */ -#define STALL_INTERRUPT_RESET_THRESHOLD 0xffff - -static U32 s_invalid_begin = 0; - -BOOL xgi_ge_irq_handler(xgi_info_t *info) -{ - volatile U8 *mmio_vbase = info->mmio.vbase; - volatile U32 *ge_3d_status = (volatile U32 *)(mmio_vbase + 0x2800); - U32 int_status = ge_3d_status[4]; // interrupt status - U32 auto_reset_count = 0; - BOOL is_support_auto_reset = FALSE; - - // Check GE on/off - if (0 == (0xffffc0f0 & int_status)) - { - U32 old_ge_status = ge_3d_status[0x00]; - U32 old_pcie_cmd_fetch_Addr = ge_3d_status[0x0a]; - if (0 != (0x1000 & int_status)) - { - // We got GE stall interrupt. - ge_3d_status[0x04] = int_status | 0x04000000; - - if (TRUE == is_support_auto_reset) - { - BOOL is_wrong_signal = FALSE; - static U32 last_int_tick_low, last_int_tick_high; - static U32 new_int_tick_low, new_int_tick_high; - static U32 continoue_int_count = 0; - // OE II is busy. - while (old_ge_status & 0x001c0000) - { - U16 check; - // Check Read back status - *(mmio_vbase + 0x235c) = 0x80; - check = *((volatile U16*)(mmio_vbase + 0x2360)); - if ((check & 0x3f) != ((check & 0x3f00) >> 8)) - { - is_wrong_signal = TRUE; - break; - } - // Check RO channel - *(mmio_vbase + 0x235c) = 0x83; - check = *((volatile U16*)(mmio_vbase + 0x2360)); - if ((check & 0x0f) != ((check & 0xf0) >> 4)) - { - is_wrong_signal = TRUE; - break; - } - // Check RW channel - *(mmio_vbase + 0x235c) = 0x88; - check = *((volatile U16*)(mmio_vbase + 0x2360)); - if ((check & 0x0f) != ((check & 0xf0) >> 4)) - { - is_wrong_signal = TRUE; - break; - } - // Check RO channel outstanding - *(mmio_vbase + 0x235c) = 0x8f; - check = *((volatile U16*)(mmio_vbase + 0x2360)); - if (0 != (check & 0x3ff)) - { - is_wrong_signal = TRUE; - break; - } - // Check RW channel outstanding - *(mmio_vbase + 0x235c) = 0x90; - check = *((volatile U16*)(mmio_vbase + 0x2360)); - if (0 != (check & 0x3ff)) - { - is_wrong_signal = TRUE; - break; - } - // No pending PCIE request. GE stall. - break; - } - - if (is_wrong_signal) - { - // Nothing but skip. - } - else if (0 == continoue_int_count++) - { - rdtsc(last_int_tick_low, last_int_tick_high); - } - else - { - rdtscl(new_int_tick_low); - if ((new_int_tick_low - last_int_tick_low) > STALL_INTERRUPT_RESET_THRESHOLD) - { - continoue_int_count = 0; - } - else if (continoue_int_count >= 3) - { - continoue_int_count = 0; - - // GE Hung up, need reset. - XGI_INFO("Reset GE!\n"); - - *(mmio_vbase + 0xb057) = 8; - int time_out = 0xffff; - while (0 != (ge_3d_status[0x00] & 0xf0000000)) - { - while (0 != ((--time_out) & 0xfff)); - if (0 == time_out) - { - XGI_INFO("Can not reset back 0x%lx!\n", ge_3d_status[0x00]); - *(mmio_vbase + 0xb057) = 0; - // Have to use 3x5.36 to reset. - // Save and close dynamic gating - U8 old_3ce = *(mmio_vbase + 0x3ce); - *(mmio_vbase + 0x3ce) = 0x2a; - U8 old_3cf = *(mmio_vbase + 0x3cf); - *(mmio_vbase + 0x3cf) = old_3cf & 0xfe; - // Reset GE - U8 old_index = *(mmio_vbase + 0x3d4); - *(mmio_vbase + 0x3d4) = 0x36; - U8 old_36 = *(mmio_vbase + 0x3d5); - *(mmio_vbase + 0x3d5) = old_36 | 0x10; - while (0 != ((--time_out) & 0xfff)); - *(mmio_vbase + 0x3d5) = old_36; - *(mmio_vbase + 0x3d4) = old_index; - // Restore dynamic gating - *(mmio_vbase + 0x3cf) = old_3cf; - *(mmio_vbase + 0x3ce) = old_3ce; - break; - } - } - *(mmio_vbase + 0xb057) = 0; - - // Increase Reset counter - auto_reset_count++; - } - } - } - return TRUE; - } - else if (0 != (0x1 & int_status)) - { - s_invalid_begin++; - ge_3d_status[0x04] = (int_status & ~0x01) | 0x04000000; - return TRUE; - } - } - return FALSE; -} - -BOOL xgi_crt_irq_handler(xgi_info_t *info) -{ - BOOL ret = FALSE; - U8 *mmio_vbase = info->mmio.vbase; - U32 device_status = 0; - U32 hw_status = 0; - U8 save_3ce = bReadReg(0x3ce); - - - if (bIn3cf(0x37) & 0x01) // CRT1 interrupt just happened - { - U8 op3cf_3d; - U8 op3cf_37; - - // What happened? - op3cf_37 = bIn3cf(0x37); - -#if 0 - if (op3cf_37 & 0x04) - device_status |= GDEVST_CONNECT; - else - device_status &= ~GDEVST_CONNECT; - - device_status |= GDEVST_DEVICE_CHANGED; - hw_status |= HWST_DEVICE_CHANGED; -#endif - // Clear CRT interrupt - op3cf_3d = bIn3cf(0x3d); - bOut3cf(0x3d, (op3cf_3d | 0x04)); - bOut3cf(0x3d, (op3cf_3d & ~0x04)); - ret = TRUE; - } - bWriteReg(0x3ce, save_3ce); - - return (ret); -} - -BOOL xgi_dvi_irq_handler(xgi_info_t *info) -{ - BOOL ret = FALSE; - U8 *mmio_vbase = info->mmio.vbase; - U32 device_status = 0; - U32 hw_status = 0; - U8 save_3ce = bReadReg(0x3ce); - - if (bIn3cf(0x38) & 0x20) // DVI interrupt just happened - { - U8 op3cf_39; - U8 op3cf_37; - U8 op3x5_5a; - U8 save_3x4 = bReadReg(0x3d4);; - - // What happened? - op3cf_37 = bIn3cf(0x37); -#if 0 - //Also update our internal flag - if (op3cf_37 & 0x10) // Second Monitor plugged In - { - device_status |= GDEVST_CONNECT; - //Because currenly we cannot determine if DVI digital - //or DVI analog is connected according to DVI interrupt - //We should still call BIOS to check it when utility ask us - device_status &= ~GDEVST_CHECKED; - } - else - { - device_status &= ~GDEVST_CONNECT; - } -#endif - //Notify BIOS that DVI plug/unplug happened - op3x5_5a = bIn3x5(0x5a); - bOut3x5(0x5a, op3x5_5a & 0xf7); - - bWriteReg(0x3d4, save_3x4); - - //device_status |= GDEVST_DEVICE_CHANGED; - //hw_status |= HWST_DEVICE_CHANGED; - - // Clear DVI interrupt - op3cf_39 = bIn3cf(0x39); - bOut3c5(0x39, (op3cf_39 & ~0x01)); //Set 3cf.39 bit 0 to 0 - bOut3c5(0x39, (op3cf_39 | 0x01 )); //Set 3cf.39 bit 0 to 1 - - ret = TRUE; - } - bWriteReg(0x3ce, save_3ce); - - return (ret); -} - -void xgi_dump_register(xgi_info_t *info) -{ - int i, j; - unsigned char temp; - - // 0x3C5 - printk("\r\n=====xgi_dump_register========0x%x===============\r\n", 0x3C5); - - for(i=0; i<0x10; i++) - { - if(i == 0) - { - printk("%5x", i); - } - else - { - printk("%3x", i); - } - } - printk("\r\n"); - - for(i=0; i<0x10; i++) - { - printk("%1x ", i); - - for(j=0; j<0x10; j++) - { - temp = bIn3c5(i*0x10 + j); - printk("%3x", temp); - } - printk("\r\n"); - } - - // 0x3D5 - printk("\r\n====xgi_dump_register=========0x%x===============\r\n", 0x3D5); - for(i=0; i<0x10; i++) - { - if(i == 0) - { - printk("%5x", i); - } - else - { - printk("%3x", i); - } - } - printk("\r\n"); - - for(i=0; i<0x10; i++) - { - printk("%1x ", i); - - for(j=0; j<0x10; j++) - { - temp = bIn3x5(i*0x10 + j); - printk("%3x", temp); - } - printk("\r\n"); - } - - // 0x3CF - printk("\r\n=========xgi_dump_register====0x%x===============\r\n", 0x3CF); - for(i=0; i<0x10; i++) - { - if(i == 0) - { - printk("%5x", i); - } - else - { - printk("%3x", i); - } - } - printk("\r\n"); - - for(i=0; i<0x10; i++) - { - printk("%1x ", i); - - for(j=0; j<0x10; j++) - { - temp = bIn3cf(i*0x10 + j); - printk("%3x", temp); - } - printk("\r\n"); - } - - printk("\r\n=====xgi_dump_register======0x%x===============\r\n", 0xB000); - for(i=0; i<0x10; i++) - { - if(i == 0) - { - printk("%5x", i); - } - else - { - printk("%3x", i); - } - } - printk("\r\n"); - - for(i=0; i<0x5; i++) - { - printk("%1x ", i); - - for(j=0; j<0x10; j++) - { - temp = bReadReg(0xB000 + i*0x10 + j); - printk("%3x", temp); - } - printk("\r\n"); - } - - printk("\r\n==================0x%x===============\r\n", 0x2200); - for(i=0; i<0x10; i++) - { - if(i == 0) - { - printk("%5x", i); - } - else - { - printk("%3x", i); - } - } - printk("\r\n"); - - for(i=0; i<0xB; i++) - { - printk("%1x ", i); - - for(j=0; j<0x10; j++) - { - temp = bReadReg(0x2200 + i*0x10 + j); - printk("%3x", temp); - } - printk("\r\n"); - } - - printk("\r\n==================0x%x===============\r\n", 0x2300); - for(i=0; i<0x10; i++) - { - if(i == 0) - { - printk("%5x", i); - } - else - { - printk("%3x", i); - } - } - printk("\r\n"); - - for(i=0; i<0x7; i++) - { - printk("%1x ", i); - - for(j=0; j<0x10; j++) - { - temp = bReadReg(0x2300 + i*0x10 + j); - printk("%3x", temp); - } - printk("\r\n"); - } - - printk("\r\n==================0x%x===============\r\n", 0x2400); - for(i=0; i<0x10; i++) - { - if(i == 0) - { - printk("%5x", i); - } - else - { - printk("%3x", i); - } - } - printk("\r\n"); - - for(i=0; i<0x10; i++) - { - printk("%1x ", i); - - for(j=0; j<0x10; j++) - { - temp = bReadReg(0x2400 + i*0x10 + j); - printk("%3x", temp); - } - printk("\r\n"); - } - - printk("\r\n==================0x%x===============\r\n", 0x2800); - for(i=0; i<0x10; i++) - { - if(i == 0) - { - printk("%5x", i); - } - else - { - printk("%3x", i); - } - } - printk("\r\n"); - - for(i=0; i<0x10; i++) - { - printk("%1x ", i); - - for(j=0; j<0x10; j++) - { - temp = bReadReg(0x2800 + i*0x10 + j); - printk("%3x", temp); - } - printk("\r\n"); - } -} - -void xgi_restore_registers(xgi_info_t *info) -{ - bOut3x5(0x13, 0); - bOut3x5(0x8b, 2); -} - -void xgi_waitfor_pci_idle(xgi_info_t *info) -{ -#define WHOLD_GE_STATUS 0x2800 -#define IDLE_MASK ~0x90200000 - - int idleCount = 0; - while(idleCount < 5) - { - if (dwReadReg(WHOLD_GE_STATUS) & IDLE_MASK) - { - idleCount = 0; - } - else - { - idleCount ++; - } - } -} - -int xgi_get_cpu_id(struct cpu_info_s *arg) -{ - int op = arg->_eax; - __asm__("cpuid" - : "=a" (arg->_eax), - "=b" (arg->_ebx), - "=c" (arg->_ecx), - "=d" (arg->_edx) - : "0" (op)); - - XGI_INFO("opCode = 0x%x, eax = 0x%x, ebx = 0x%x, ecx = 0x%x, edx = 0x%x \n", - op, arg->_eax, arg->_ebx, arg->_ecx, arg->_edx); -} - -/*memory collect function*/ -extern struct list_head xgi_mempid_list; -void xgi_mem_collect(xgi_info_t *info, unsigned int *pcnt) -{ - xgi_mem_pid_t *mempid_block; - struct list_head *mempid_list; - struct task_struct *p,*find; - unsigned int cnt = 0; - - mempid_list = xgi_mempid_list.next; - - while (mempid_list != &xgi_mempid_list) - { - mempid_block = list_entry(mempid_list, struct xgi_mem_pid_s, list); - mempid_list = mempid_list->next; - - find = NULL; - XGI_SCAN_PROCESS(p) - { - if (p->pid == mempid_block->pid) - { - XGI_INFO("[!]Find active pid:%ld state:%ld location:%d addr:0x%lx! \n", mempid_block->pid, p->state, mempid_block->location, mempid_block->bus_addr); - find = p; - if (mempid_block->bus_addr == 0xFFFFFFFF) - ++cnt; - break; - } - } - if (!find) - { - if (mempid_block->location == LOCAL) - { - XGI_INFO("Memory ProcessID free fb and delete one block pid:%ld addr:0x%lx successfully! \n", mempid_block->pid, mempid_block->bus_addr); - xgi_fb_free(info, mempid_block->bus_addr); - } - else if (mempid_block->bus_addr != 0xFFFFFFFF) - { - XGI_INFO("Memory ProcessID free pcie and delete one block pid:%ld addr:0x%lx successfully! \n", mempid_block->pid, mempid_block->bus_addr); - xgi_pcie_free(info, mempid_block->bus_addr); - } - else - { - /*only delete the memory block*/ - list_del(&mempid_block->list); - XGI_INFO("Memory ProcessID delete one pcie block pid:%ld successfully! \n", mempid_block->pid); - kfree(mempid_block); - } - } - } - *pcnt = cnt; -} + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#include "xgi_types.h" +#include "xgi_linux.h" +#include "xgi_drv.h" +#include "xgi_regs.h" +#include "xgi_pcie.h" + +void xgi_get_device_info(xgi_info_t * info, xgi_chip_info_t * req) +{ + req->device_id = info->device_id; + req->device_name[0] = 'x'; + req->device_name[1] = 'g'; + req->device_name[2] = '4'; + req->device_name[3] = '7'; + req->vendor_id = info->vendor_id; + req->curr_display_mode = 0; + req->fb_size = info->fb.size; + req->sarea_bus_addr = info->sarea_info.bus_addr; + req->sarea_size = info->sarea_info.size; +} + +void xgi_get_mmio_info(xgi_info_t * info, xgi_mmio_info_t * req) +{ + req->mmioBase = (void *)info->mmio.base; + req->size = info->mmio.size; +} + +void xgi_put_screen_info(xgi_info_t * info, xgi_screen_info_t * req) +{ + info->scrn_info.scrn_start = req->scrn_start; + info->scrn_info.scrn_xres = req->scrn_xres; + info->scrn_info.scrn_yres = req->scrn_yres; + info->scrn_info.scrn_bpp = req->scrn_bpp; + info->scrn_info.scrn_pitch = req->scrn_pitch; + + XGI_INFO("info->scrn_info.scrn_start: 0x%lx" + "info->scrn_info.scrn_xres: 0x%lx" + "info->scrn_info.scrn_yres: 0x%lx" + "info->scrn_info.scrn_bpp: 0x%lx" + "info->scrn_info.scrn_pitch: 0x%lx\n", + info->scrn_info.scrn_start, + info->scrn_info.scrn_xres, + info->scrn_info.scrn_yres, + info->scrn_info.scrn_bpp, info->scrn_info.scrn_pitch); +} + +void xgi_get_screen_info(xgi_info_t * info, xgi_screen_info_t * req) +{ + req->scrn_start = info->scrn_info.scrn_start; + req->scrn_xres = info->scrn_info.scrn_xres; + req->scrn_yres = info->scrn_info.scrn_yres; + req->scrn_bpp = info->scrn_info.scrn_bpp; + req->scrn_pitch = info->scrn_info.scrn_pitch; + + XGI_INFO("req->scrn_start: 0x%lx" + "req->scrn_xres: 0x%lx" + "req->scrn_yres: 0x%lx" + "req->scrn_bpp: 0x%lx" + "req->scrn_pitch: 0x%lx\n", + req->scrn_start, + req->scrn_xres, + req->scrn_yres, req->scrn_bpp, req->scrn_pitch); +} + +void xgi_ge_reset(xgi_info_t * info) +{ + xgi_disable_ge(info); + xgi_enable_ge(info); +} + +void xgi_sarea_info(xgi_info_t * info, xgi_sarea_info_t * req) +{ + info->sarea_info.bus_addr = req->bus_addr; + info->sarea_info.size = req->size; + XGI_INFO("info->sarea_info.bus_addr: 0x%lx" + "info->sarea_info.size: 0x%lx\n", + info->sarea_info.bus_addr, info->sarea_info.size); +} + +/* + * irq functions + */ +#define STALL_INTERRUPT_RESET_THRESHOLD 0xffff + +static U32 s_invalid_begin = 0; + +BOOL xgi_ge_irq_handler(xgi_info_t * info) +{ + volatile U8 *mmio_vbase = info->mmio.vbase; + volatile U32 *ge_3d_status = (volatile U32 *)(mmio_vbase + 0x2800); + U32 int_status = ge_3d_status[4]; // interrupt status + U32 auto_reset_count = 0; + BOOL is_support_auto_reset = FALSE; + + // Check GE on/off + if (0 == (0xffffc0f0 & int_status)) { + U32 old_ge_status = ge_3d_status[0x00]; + U32 old_pcie_cmd_fetch_Addr = ge_3d_status[0x0a]; + if (0 != (0x1000 & int_status)) { + // We got GE stall interrupt. + ge_3d_status[0x04] = int_status | 0x04000000; + + if (TRUE == is_support_auto_reset) { + BOOL is_wrong_signal = FALSE; + static U32 last_int_tick_low, + last_int_tick_high; + static U32 new_int_tick_low, new_int_tick_high; + static U32 continoue_int_count = 0; + // OE II is busy. + while (old_ge_status & 0x001c0000) { + U16 check; + // Check Read back status + *(mmio_vbase + 0x235c) = 0x80; + check = + *((volatile U16 *)(mmio_vbase + + 0x2360)); + if ((check & 0x3f) != + ((check & 0x3f00) >> 8)) { + is_wrong_signal = TRUE; + break; + } + // Check RO channel + *(mmio_vbase + 0x235c) = 0x83; + check = + *((volatile U16 *)(mmio_vbase + + 0x2360)); + if ((check & 0x0f) != + ((check & 0xf0) >> 4)) { + is_wrong_signal = TRUE; + break; + } + // Check RW channel + *(mmio_vbase + 0x235c) = 0x88; + check = + *((volatile U16 *)(mmio_vbase + + 0x2360)); + if ((check & 0x0f) != + ((check & 0xf0) >> 4)) { + is_wrong_signal = TRUE; + break; + } + // Check RO channel outstanding + *(mmio_vbase + 0x235c) = 0x8f; + check = + *((volatile U16 *)(mmio_vbase + + 0x2360)); + if (0 != (check & 0x3ff)) { + is_wrong_signal = TRUE; + break; + } + // Check RW channel outstanding + *(mmio_vbase + 0x235c) = 0x90; + check = + *((volatile U16 *)(mmio_vbase + + 0x2360)); + if (0 != (check & 0x3ff)) { + is_wrong_signal = TRUE; + break; + } + // No pending PCIE request. GE stall. + break; + } + + if (is_wrong_signal) { + // Nothing but skip. + } else if (0 == continoue_int_count++) { + rdtsc(last_int_tick_low, + last_int_tick_high); + } else { + rdtscl(new_int_tick_low); + if ((new_int_tick_low - + last_int_tick_low) > + STALL_INTERRUPT_RESET_THRESHOLD) { + continoue_int_count = 0; + } else if (continoue_int_count >= 3) { + continoue_int_count = 0; + + // GE Hung up, need reset. + XGI_INFO("Reset GE!\n"); + + *(mmio_vbase + 0xb057) = 8; + int time_out = 0xffff; + while (0 != + (ge_3d_status[0x00] & + 0xf0000000)) { + while (0 != + ((--time_out) & + 0xfff)) ; + if (0 == time_out) { + XGI_INFO + ("Can not reset back 0x%lx!\n", + ge_3d_status + [0x00]); + *(mmio_vbase + + 0xb057) = 0; + // Have to use 3x5.36 to reset. + // Save and close dynamic gating + U8 old_3ce = + *(mmio_vbase + + 0x3ce); + *(mmio_vbase + + 0x3ce) = 0x2a; + U8 old_3cf = + *(mmio_vbase + + 0x3cf); + *(mmio_vbase + + 0x3cf) = + old_3cf & 0xfe; + // Reset GE + U8 old_index = + *(mmio_vbase + + 0x3d4); + *(mmio_vbase + + 0x3d4) = 0x36; + U8 old_36 = + *(mmio_vbase + + 0x3d5); + *(mmio_vbase + + 0x3d5) = + old_36 | 0x10; + while (0 != + ((--time_out) & 0xfff)) ; + *(mmio_vbase + + 0x3d5) = + old_36; + *(mmio_vbase + + 0x3d4) = + old_index; + // Restore dynamic gating + *(mmio_vbase + + 0x3cf) = + old_3cf; + *(mmio_vbase + + 0x3ce) = + old_3ce; + break; + } + } + *(mmio_vbase + 0xb057) = 0; + + // Increase Reset counter + auto_reset_count++; + } + } + } + return TRUE; + } else if (0 != (0x1 & int_status)) { + s_invalid_begin++; + ge_3d_status[0x04] = (int_status & ~0x01) | 0x04000000; + return TRUE; + } + } + return FALSE; +} + +BOOL xgi_crt_irq_handler(xgi_info_t * info) +{ + BOOL ret = FALSE; + U8 *mmio_vbase = info->mmio.vbase; + U32 device_status = 0; + U32 hw_status = 0; + U8 save_3ce = bReadReg(0x3ce); + + if (bIn3cf(0x37) & 0x01) // CRT1 interrupt just happened + { + U8 op3cf_3d; + U8 op3cf_37; + + // What happened? + op3cf_37 = bIn3cf(0x37); + +#if 0 + if (op3cf_37 & 0x04) + device_status |= GDEVST_CONNECT; + else + device_status &= ~GDEVST_CONNECT; + + device_status |= GDEVST_DEVICE_CHANGED; + hw_status |= HWST_DEVICE_CHANGED; +#endif + // Clear CRT interrupt + op3cf_3d = bIn3cf(0x3d); + bOut3cf(0x3d, (op3cf_3d | 0x04)); + bOut3cf(0x3d, (op3cf_3d & ~0x04)); + ret = TRUE; + } + bWriteReg(0x3ce, save_3ce); + + return (ret); +} + +BOOL xgi_dvi_irq_handler(xgi_info_t * info) +{ + BOOL ret = FALSE; + U8 *mmio_vbase = info->mmio.vbase; + U32 device_status = 0; + U32 hw_status = 0; + U8 save_3ce = bReadReg(0x3ce); + + if (bIn3cf(0x38) & 0x20) // DVI interrupt just happened + { + U8 op3cf_39; + U8 op3cf_37; + U8 op3x5_5a; + U8 save_3x4 = bReadReg(0x3d4);; + + // What happened? + op3cf_37 = bIn3cf(0x37); +#if 0 + //Also update our internal flag + if (op3cf_37 & 0x10) // Second Monitor plugged In + { + device_status |= GDEVST_CONNECT; + //Because currenly we cannot determine if DVI digital + //or DVI analog is connected according to DVI interrupt + //We should still call BIOS to check it when utility ask us + device_status &= ~GDEVST_CHECKED; + } else { + device_status &= ~GDEVST_CONNECT; + } +#endif + //Notify BIOS that DVI plug/unplug happened + op3x5_5a = bIn3x5(0x5a); + bOut3x5(0x5a, op3x5_5a & 0xf7); + + bWriteReg(0x3d4, save_3x4); + + //device_status |= GDEVST_DEVICE_CHANGED; + //hw_status |= HWST_DEVICE_CHANGED; + + // Clear DVI interrupt + op3cf_39 = bIn3cf(0x39); + bOut3c5(0x39, (op3cf_39 & ~0x01)); //Set 3cf.39 bit 0 to 0 + bOut3c5(0x39, (op3cf_39 | 0x01)); //Set 3cf.39 bit 0 to 1 + + ret = TRUE; + } + bWriteReg(0x3ce, save_3ce); + + return (ret); +} + +void xgi_dump_register(xgi_info_t * info) +{ + int i, j; + unsigned char temp; + + // 0x3C5 + printk("\r\n=====xgi_dump_register========0x%x===============\r\n", + 0x3C5); + + for (i = 0; i < 0x10; i++) { + if (i == 0) { + printk("%5x", i); + } else { + printk("%3x", i); + } + } + printk("\r\n"); + + for (i = 0; i < 0x10; i++) { + printk("%1x ", i); + + for (j = 0; j < 0x10; j++) { + temp = bIn3c5(i * 0x10 + j); + printk("%3x", temp); + } + printk("\r\n"); + } + + // 0x3D5 + printk("\r\n====xgi_dump_register=========0x%x===============\r\n", + 0x3D5); + for (i = 0; i < 0x10; i++) { + if (i == 0) { + printk("%5x", i); + } else { + printk("%3x", i); + } + } + printk("\r\n"); + + for (i = 0; i < 0x10; i++) { + printk("%1x ", i); + + for (j = 0; j < 0x10; j++) { + temp = bIn3x5(i * 0x10 + j); + printk("%3x", temp); + } + printk("\r\n"); + } + + // 0x3CF + printk("\r\n=========xgi_dump_register====0x%x===============\r\n", + 0x3CF); + for (i = 0; i < 0x10; i++) { + if (i == 0) { + printk("%5x", i); + } else { + printk("%3x", i); + } + } + printk("\r\n"); + + for (i = 0; i < 0x10; i++) { + printk("%1x ", i); + + for (j = 0; j < 0x10; j++) { + temp = bIn3cf(i * 0x10 + j); + printk("%3x", temp); + } + printk("\r\n"); + } + + printk("\r\n=====xgi_dump_register======0x%x===============\r\n", + 0xB000); + for (i = 0; i < 0x10; i++) { + if (i == 0) { + printk("%5x", i); + } else { + printk("%3x", i); + } + } + printk("\r\n"); + + for (i = 0; i < 0x5; i++) { + printk("%1x ", i); + + for (j = 0; j < 0x10; j++) { + temp = bReadReg(0xB000 + i * 0x10 + j); + printk("%3x", temp); + } + printk("\r\n"); + } + + printk("\r\n==================0x%x===============\r\n", 0x2200); + for (i = 0; i < 0x10; i++) { + if (i == 0) { + printk("%5x", i); + } else { + printk("%3x", i); + } + } + printk("\r\n"); + + for (i = 0; i < 0xB; i++) { + printk("%1x ", i); + + for (j = 0; j < 0x10; j++) { + temp = bReadReg(0x2200 + i * 0x10 + j); + printk("%3x", temp); + } + printk("\r\n"); + } + + printk("\r\n==================0x%x===============\r\n", 0x2300); + for (i = 0; i < 0x10; i++) { + if (i == 0) { + printk("%5x", i); + } else { + printk("%3x", i); + } + } + printk("\r\n"); + + for (i = 0; i < 0x7; i++) { + printk("%1x ", i); + + for (j = 0; j < 0x10; j++) { + temp = bReadReg(0x2300 + i * 0x10 + j); + printk("%3x", temp); + } + printk("\r\n"); + } + + printk("\r\n==================0x%x===============\r\n", 0x2400); + for (i = 0; i < 0x10; i++) { + if (i == 0) { + printk("%5x", i); + } else { + printk("%3x", i); + } + } + printk("\r\n"); + + for (i = 0; i < 0x10; i++) { + printk("%1x ", i); + + for (j = 0; j < 0x10; j++) { + temp = bReadReg(0x2400 + i * 0x10 + j); + printk("%3x", temp); + } + printk("\r\n"); + } + + printk("\r\n==================0x%x===============\r\n", 0x2800); + for (i = 0; i < 0x10; i++) { + if (i == 0) { + printk("%5x", i); + } else { + printk("%3x", i); + } + } + printk("\r\n"); + + for (i = 0; i < 0x10; i++) { + printk("%1x ", i); + + for (j = 0; j < 0x10; j++) { + temp = bReadReg(0x2800 + i * 0x10 + j); + printk("%3x", temp); + } + printk("\r\n"); + } +} + +void xgi_restore_registers(xgi_info_t * info) +{ + bOut3x5(0x13, 0); + bOut3x5(0x8b, 2); +} + +void xgi_waitfor_pci_idle(xgi_info_t * info) +{ +#define WHOLD_GE_STATUS 0x2800 +#define IDLE_MASK ~0x90200000 + + int idleCount = 0; + while (idleCount < 5) { + if (dwReadReg(WHOLD_GE_STATUS) & IDLE_MASK) { + idleCount = 0; + } else { + idleCount++; + } + } +} + +int xgi_get_cpu_id(struct cpu_info_s *arg) +{ + int op = arg->_eax; + __asm__("cpuid":"=a"(arg->_eax), + "=b"(arg->_ebx), + "=c"(arg->_ecx), "=d"(arg->_edx) + : "0"(op)); + + XGI_INFO + ("opCode = 0x%x, eax = 0x%x, ebx = 0x%x, ecx = 0x%x, edx = 0x%x \n", + op, arg->_eax, arg->_ebx, arg->_ecx, arg->_edx); +} + +/*memory collect function*/ +extern struct list_head xgi_mempid_list; +void xgi_mem_collect(xgi_info_t * info, unsigned int *pcnt) +{ + xgi_mem_pid_t *mempid_block; + struct list_head *mempid_list; + struct task_struct *p, *find; + unsigned int cnt = 0; + + mempid_list = xgi_mempid_list.next; + + while (mempid_list != &xgi_mempid_list) { + mempid_block = + list_entry(mempid_list, struct xgi_mem_pid_s, list); + mempid_list = mempid_list->next; + + find = NULL; + XGI_SCAN_PROCESS(p) { + if (p->pid == mempid_block->pid) { + XGI_INFO + ("[!]Find active pid:%ld state:%ld location:%d addr:0x%lx! \n", + mempid_block->pid, p->state, + mempid_block->location, + mempid_block->bus_addr); + find = p; + if (mempid_block->bus_addr == 0xFFFFFFFF) + ++cnt; + break; + } + } + if (!find) { + if (mempid_block->location == LOCAL) { + XGI_INFO + ("Memory ProcessID free fb and delete one block pid:%ld addr:0x%lx successfully! \n", + mempid_block->pid, mempid_block->bus_addr); + xgi_fb_free(info, mempid_block->bus_addr); + } else if (mempid_block->bus_addr != 0xFFFFFFFF) { + XGI_INFO + ("Memory ProcessID free pcie and delete one block pid:%ld addr:0x%lx successfully! \n", + mempid_block->pid, mempid_block->bus_addr); + xgi_pcie_free(info, mempid_block->bus_addr); + } else { + /*only delete the memory block */ + list_del(&mempid_block->list); + XGI_INFO + ("Memory ProcessID delete one pcie block pid:%ld successfully! \n", + mempid_block->pid); + kfree(mempid_block); + } + } + } + *pcnt = cnt; +} diff --git a/linux-core/xgi_misc.h b/linux-core/xgi_misc.h index ac4daaa1..37120aaa 100644 --- a/linux-core/xgi_misc.h +++ b/linux-core/xgi_misc.h @@ -1,49 +1,47 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ - - -#ifndef _XGI_MISC_H_ -#define _XGI_MISC_H_ - -extern void xgi_dump_register(xgi_info_t *info); -extern void xgi_get_device_info(xgi_info_t *info, xgi_chip_info_t * req); -extern void xgi_get_mmio_info(xgi_info_t *info, xgi_mmio_info_t *req); -extern void xgi_get_screen_info(xgi_info_t *info, xgi_screen_info_t *req); -extern void xgi_put_screen_info(xgi_info_t *info, xgi_screen_info_t *req); -extern void xgi_ge_reset(xgi_info_t *info); -extern void xgi_sarea_info(xgi_info_t *info, xgi_sarea_info_t *req); -extern int xgi_get_cpu_id(struct cpu_info_s *arg); - -extern void xgi_restore_registers(xgi_info_t *info); -extern BOOL xgi_ge_irq_handler(xgi_info_t *info); -extern BOOL xgi_crt_irq_handler(xgi_info_t *info); -extern BOOL xgi_dvi_irq_handler(xgi_info_t *info); -extern void xgi_waitfor_pci_idle(xgi_info_t *info); - - -#endif + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#ifndef _XGI_MISC_H_ +#define _XGI_MISC_H_ + +extern void xgi_dump_register(xgi_info_t * info); +extern void xgi_get_device_info(xgi_info_t * info, xgi_chip_info_t * req); +extern void xgi_get_mmio_info(xgi_info_t * info, xgi_mmio_info_t * req); +extern void xgi_get_screen_info(xgi_info_t * info, xgi_screen_info_t * req); +extern void xgi_put_screen_info(xgi_info_t * info, xgi_screen_info_t * req); +extern void xgi_ge_reset(xgi_info_t * info); +extern void xgi_sarea_info(xgi_info_t * info, xgi_sarea_info_t * req); +extern int xgi_get_cpu_id(struct cpu_info_s *arg); + +extern void xgi_restore_registers(xgi_info_t * info); +extern BOOL xgi_ge_irq_handler(xgi_info_t * info); +extern BOOL xgi_crt_irq_handler(xgi_info_t * info); +extern BOOL xgi_dvi_irq_handler(xgi_info_t * info); +extern void xgi_waitfor_pci_idle(xgi_info_t * info); + +#endif diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 62e2323f..9457770a 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -1,1060 +1,1031 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ - -#include "xgi_types.h" -#include "xgi_linux.h" -#include "xgi_drv.h" -#include "xgi_regs.h" -#include "xgi_pcie.h" -#include "xgi_misc.h" - -static xgi_pcie_heap_t *xgi_pcie_heap = NULL; -static kmem_cache_t *xgi_pcie_cache_block = NULL; -static xgi_pcie_block_t *xgi_pcie_vertex_block = NULL; -static xgi_pcie_block_t *xgi_pcie_cmdlist_block = NULL; -static xgi_pcie_block_t *xgi_pcie_scratchpad_block = NULL; -extern struct list_head xgi_mempid_list; - -static unsigned long xgi_pcie_lut_alloc(unsigned long page_order) -{ - struct page *page; - unsigned long page_addr = 0; - unsigned long page_count = 0; - int i; - - page_count = (1 << page_order); - page_addr = __get_free_pages(GFP_KERNEL, page_order); - - if (page_addr == 0UL) - { - XGI_ERROR("Can't get free pages: 0x%lx from system memory !\n", - page_count); - return 0; - } - - page = virt_to_page(page_addr); - - for (i = 0; i < page_count; i++, page++) - { - XGI_INC_PAGE_COUNT(page); - XGILockPage(page); - } - - XGI_INFO("page_count: 0x%lx page_order: 0x%lx page_addr: 0x%lx \n", - page_count, page_order, page_addr); - return page_addr; -} - -static void xgi_pcie_lut_free(unsigned long page_addr, unsigned long page_order) -{ - struct page *page; - unsigned long page_count = 0; - int i; - - page_count = (1 << page_order); - page = virt_to_page(page_addr); - - for (i = 0; i < page_count; i++, page++) - { - XGI_DEC_PAGE_COUNT(page); - XGIUnlockPage(page); - } - - free_pages(page_addr, page_order); -} - -static int xgi_pcie_lut_init(xgi_info_t *info) -{ - unsigned char *page_addr = NULL; - unsigned long pciePageCount, lutEntryNum, lutPageCount, lutPageOrder; - unsigned long count = 0; - u8 temp = 0; - - /* Jong 06/06/2006 */ - unsigned long pcie_aperture_size; - - info->pcie.size = 128 * 1024 * 1024; - - /* Get current FB aperture size */ - temp = In3x5(0x27); - XGI_INFO("In3x5(0x27): 0x%x \n", temp); - - if (temp & 0x01) /* 256MB; Jong 06/05/2006; 0x10000000 */ - { - /* Jong 06/06/2006; allocate memory */ - pcie_aperture_size=256 * 1024 * 1024; - /* info->pcie.base = 256 * 1024 * 1024; */ /* pcie base is different from fb base */ - } - else /* 128MB; Jong 06/05/2006; 0x08000000 */ - { - /* Jong 06/06/2006; allocate memory */ - pcie_aperture_size=128 * 1024 * 1024; - /* info->pcie.base = 128 * 1024 * 1024; */ - } - - /* Jong 06/06/2006; allocate memory; it can be used for build-in kernel modules */ - /* info->pcie.base=(unsigned long)alloc_bootmem(pcie_mem_size); */ - /* total 496 MB; need 256 MB (0x10000000); start from 240 MB (0x0F000000) */ - /* info->pcie.base=ioremap(0x0F000000, 0x10000000); */ /* Cause system hang */ - info->pcie.base=pcie_aperture_size; /* works */ - /* info->pcie.base=info->fb.base + info->fb.size; */ /* System hang */ - /* info->pcie.base=128 * 1024 * 1024;*/ /* System hang */ - - XGI_INFO("Jong06062006-info->pcie.base: 0x%lx \n", info->pcie.base); - - - /* Get current lookup table page size */ - temp = bReadReg(0xB00C); - if (temp & 0x04) /* 8KB */ - { - info->lutPageSize = 8 * 1024; - } - else /* 4KB */ - { - info->lutPageSize = 4 * 1024; - } - - XGI_INFO("info->lutPageSize: 0x%lx \n", info->lutPageSize); - -#if 0 - /* Get current lookup table location */ - temp = bReadReg(0xB00C); - if (temp & 0x02) /* LFB */ - { - info->isLUTInLFB = TRUE; - /* Current we only support lookup table in LFB */ - temp &= 0xFD; - bWriteReg(0xB00C, temp); - info->isLUTInLFB = FALSE; - } - else /* SFB */ - { - info->isLUTInLFB = FALSE; - } - - XGI_INFO("info->lutPageSize: 0x%lx \n", info->lutPageSize); - - /* Get current SDFB page size */ - temp = bReadReg(0xB00C); - if (temp & 0x08) /* 8MB */ - { - info->sdfbPageSize = 8 * 1024 * 1024; - } - else /* 4MB */ - { - info->sdfbPageSize = 4 * 1024 * 1024; - } -#endif - pciePageCount = (info->pcie.size + PAGE_SIZE - 1) / PAGE_SIZE; - - /* - * Allocate memory for PCIE GART table; - */ - lutEntryNum = pciePageCount; - lutPageCount = (lutEntryNum * 4 + PAGE_SIZE - 1) / PAGE_SIZE; - - /* get page_order base on page_count */ - count = lutPageCount; - for (lutPageOrder = 0; count; count >>= 1, ++lutPageOrder); - - if ((lutPageCount << 1) == (1 << lutPageOrder)) - { - lutPageOrder -= 1; - } - - XGI_INFO("lutEntryNum: 0x%lx lutPageCount: 0x%lx lutPageOrder 0x%lx\n", - lutEntryNum, lutPageCount, lutPageOrder); - - info->lutPageOrder = lutPageOrder; - page_addr = (unsigned char *)xgi_pcie_lut_alloc(lutPageOrder); - - if (!page_addr) - { - XGI_ERROR("cannot allocate PCIE lut page!\n"); - goto fail; - } - info->lut_base = (unsigned long *)page_addr; - - XGI_INFO("page_addr: 0x%p virt_to_phys(page_virtual): 0x%lx \n", - page_addr, virt_to_phys(page_addr)); - - XGI_INFO("info->lut_base: 0x%p __pa(info->lut_base): 0x%lx info->lutPageOrder 0x%lx\n", - info->lut_base, __pa(info->lut_base), info->lutPageOrder); - - /* - * clean all PCIE GART Entry - */ - memset(page_addr, 0, PAGE_SIZE << lutPageOrder); - -#if defined(__i386__) || defined(__x86_64__) - asm volatile ( "wbinvd" ::: "memory" ); -#else - mb(); -#endif - - /* Set GART in SFB */ - bWriteReg(0xB00C, bReadReg(0xB00C) & ~0x02); - /* Set GART base address to HW */ - dwWriteReg(0xB034, __pa(info->lut_base)); - - return 1; -fail: - return 0; -} - -static void xgi_pcie_lut_cleanup(xgi_info_t *info) -{ - if (info->lut_base) - { - XGI_INFO("info->lut_base: 0x%p info->lutPageOrder: 0x%lx \n", - info->lut_base, info->lutPageOrder); - xgi_pcie_lut_free((unsigned long)info->lut_base, info->lutPageOrder); - info->lut_base = NULL; - } -} - -static xgi_pcie_block_t *xgi_pcie_new_node(void) -{ - xgi_pcie_block_t *block = (xgi_pcie_block_t *)kmem_cache_alloc(xgi_pcie_cache_block, GFP_KERNEL); - if (block == NULL) - { - return NULL; - } - - block->offset = 0; /* block's offset in pcie memory, begin from 0 */ - block->size = 0; /* The block size. */ - block->bus_addr = 0; /* CPU access address/bus address */ - block->hw_addr = 0; /* GE access address */ - block->page_count = 0; - block->page_order = 0; - block->page_block = NULL; - block->page_table = NULL; - block->owner = PCIE_INVALID; - - return block; -} - -static void xgi_pcie_block_stuff_free(xgi_pcie_block_t *block) -{ - struct page *page; - xgi_page_block_t *page_block = block->page_block; - xgi_page_block_t *free_block; - unsigned long page_count = 0; - int i; - - //XGI_INFO("block->page_block: 0x%p \n", block->page_block); - while (page_block) - { - page_count = page_block->page_count; - - page = virt_to_page(page_block->virt_addr); - for (i = 0; i < page_count; i++, page++) - { - XGI_DEC_PAGE_COUNT(page); - XGIUnlockPage(page); - } - free_pages(page_block->virt_addr, page_block->page_order); - - page_block->phys_addr = 0; - page_block->virt_addr = 0; - page_block->page_count = 0; - page_block->page_order = 0; - - free_block = page_block; - page_block = page_block->next; - //XGI_INFO("free free_block: 0x%p \n", free_block); - kfree(free_block); - free_block = NULL; - } - - if (block->page_table) - { - //XGI_INFO("free block->page_table: 0x%p \n", block->page_table); - kfree(block->page_table); - block->page_table = NULL; - } -} - -int xgi_pcie_heap_init(xgi_info_t *info) -{ - xgi_pcie_block_t *block; - - if (!xgi_pcie_lut_init(info)) - { - XGI_ERROR("xgi_pcie_lut_init failed\n"); - return 0; - } - - xgi_pcie_heap = (xgi_pcie_heap_t *)kmalloc(sizeof(xgi_pcie_heap_t), GFP_KERNEL); - if(!xgi_pcie_heap) - { - XGI_ERROR("xgi_pcie_heap alloc failed\n"); - goto fail1; - } - INIT_LIST_HEAD(&xgi_pcie_heap->free_list); - INIT_LIST_HEAD(&xgi_pcie_heap->used_list); - INIT_LIST_HEAD(&xgi_pcie_heap->sort_list); - - xgi_pcie_heap->max_freesize = info->pcie.size; - - xgi_pcie_cache_block = kmem_cache_create("xgi_pcie_block", sizeof(xgi_pcie_block_t), - 0, SLAB_HWCACHE_ALIGN, NULL, NULL); - - if (NULL == xgi_pcie_cache_block) - { - XGI_ERROR("Fail to creat xgi_pcie_block\n"); - goto fail2; - } - - block = (xgi_pcie_block_t *)xgi_pcie_new_node(); - if (!block) - { - XGI_ERROR("xgi_pcie_new_node failed\n"); - goto fail3; - } - - block->offset = 0; /* block's offset in pcie memory, begin from 0 */ - block->size = info->pcie.size; - - list_add(&block->list, &xgi_pcie_heap->free_list); - - XGI_INFO("PCIE start address: 0x%lx, memory size : 0x%lx\n", block->offset, block->size); - return 1; -fail3: - if (xgi_pcie_cache_block) - { - kmem_cache_destroy(xgi_pcie_cache_block); - xgi_pcie_cache_block = NULL; - } - -fail2: - if(xgi_pcie_heap) - { - kfree(xgi_pcie_heap); - xgi_pcie_heap = NULL; - } -fail1: - xgi_pcie_lut_cleanup(info); - return 0; -} - -void xgi_pcie_heap_check(void) -{ - struct list_head *useList, *temp; - xgi_pcie_block_t *block; - unsigned int ownerIndex; - char *ownerStr[6] = {"2D", "3D", "3D_CMD", "3D_SCR", "3D_TEX", "ELSE"}; - - if (xgi_pcie_heap) - { - useList = &xgi_pcie_heap->used_list; - temp = useList->next; - XGI_INFO("pcie freemax = 0x%lx\n", xgi_pcie_heap->max_freesize); - while (temp != useList) - { - block = list_entry(temp, struct xgi_pcie_block_s, list); - if (block->owner == PCIE_2D) - ownerIndex = 0; - else if (block->owner > PCIE_3D_TEXTURE || block->owner < PCIE_2D || block->owner < PCIE_3D) - ownerIndex = 5; - else - ownerIndex = block->owner - PCIE_3D + 1; - XGI_INFO("Allocated by %s, block->offset: 0x%lx block->size: 0x%lx \n", - ownerStr[ownerIndex], block->offset, block->size); - temp = temp->next; - } - - } -} - - -void xgi_pcie_heap_cleanup(xgi_info_t *info) -{ - struct list_head *free_list, *temp; - xgi_pcie_block_t *block; - int j; - - xgi_pcie_lut_cleanup(info); - XGI_INFO("xgi_pcie_lut_cleanup scceeded\n"); - - if (xgi_pcie_heap) - { - free_list = &xgi_pcie_heap->free_list; - for (j = 0; j < 3; j++, free_list++) - { - temp = free_list->next; - - while (temp != free_list) - { - block = list_entry(temp, struct xgi_pcie_block_s, list); - XGI_INFO("No. %d block->offset: 0x%lx block->size: 0x%lx \n", - j, block->offset, block->size); - xgi_pcie_block_stuff_free(block); - block->bus_addr = 0; - block->hw_addr = 0; - - temp = temp->next; - //XGI_INFO("No. %d free block: 0x%p \n", j, block); - kmem_cache_free(xgi_pcie_cache_block, block); - block = NULL; - } - } - - XGI_INFO("free xgi_pcie_heap: 0x%p \n", xgi_pcie_heap); - kfree(xgi_pcie_heap); - xgi_pcie_heap = NULL; - } - - if (xgi_pcie_cache_block) - { - kmem_cache_destroy(xgi_pcie_cache_block); - xgi_pcie_cache_block = NULL; - } -} - - -static xgi_pcie_block_t *xgi_pcie_mem_alloc(xgi_info_t *info, - unsigned long originalSize, - enum PcieOwner owner) -{ - struct list_head *free_list; - xgi_pcie_block_t *block, *used_block, *free_block; - xgi_page_block_t *page_block, *prev_page_block; - struct page *page; - unsigned long page_order = 0, count = 0, index =0; - unsigned long page_addr = 0; - unsigned long *lut_addr = NULL; - unsigned long lut_id = 0; - unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK; - int i, j, page_count = 0; - int temp = 0; - - XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-Begin\n"); - XGI_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n", originalSize, size); - - if (owner == PCIE_3D) - { - if (xgi_pcie_vertex_block) - { - XGI_INFO("PCIE Vertex has been created, return directly.\n"); - return xgi_pcie_vertex_block; - } - } - - if (owner == PCIE_3D_CMDLIST) - { - if (xgi_pcie_cmdlist_block) - { - XGI_INFO("PCIE Cmdlist has been created, return directly.\n"); - return xgi_pcie_cmdlist_block; - } - } - - if (owner == PCIE_3D_SCRATCHPAD) - { - if (xgi_pcie_scratchpad_block) - { - XGI_INFO("PCIE Scratchpad has been created, return directly.\n"); - return xgi_pcie_scratchpad_block; - } - } - - if (size == 0) - { - XGI_ERROR("size == 0 \n"); - return (NULL); - } - - XGI_INFO("max_freesize: 0x%lx \n", xgi_pcie_heap->max_freesize); - if (size > xgi_pcie_heap->max_freesize) - { - XGI_ERROR("size: 0x%lx bigger than PCIE total free size: 0x%lx.\n", - size, xgi_pcie_heap->max_freesize); - return (NULL); - } - - /* Jong 05/30/2006; find next free list which has enough space*/ - free_list = xgi_pcie_heap->free_list.next; - while (free_list != &xgi_pcie_heap->free_list) - { - //XGI_INFO("free_list: 0x%px \n", free_list); - block = list_entry(free_list, struct xgi_pcie_block_s, list); - if (size <= block->size) - { - break; - } - free_list = free_list->next; - } - - if (free_list == &xgi_pcie_heap->free_list) - { - XGI_ERROR("Can't allocate %ldk size from PCIE memory !\n", size/1024); - return (NULL); - } - - free_block = block; - XGI_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n", - size, free_block->offset, free_block->size); - - if (size == free_block->size) - { - used_block = free_block; - XGI_INFO("size==free_block->size: free_block = 0x%p\n", free_block); - list_del(&free_block->list); - } - else - { - used_block = xgi_pcie_new_node(); - if (used_block == NULL) - { - return NULL; - } - - if (used_block == free_block) - { - XGI_ERROR("used_block == free_block = 0x%p\n", used_block); - } - - used_block->offset = free_block->offset; - used_block->size = size; - - free_block->offset += size; - free_block->size -= size; - } - - xgi_pcie_heap->max_freesize -= size; - - used_block->bus_addr = info->pcie.base + used_block->offset; - used_block->hw_addr = info->pcie.base + used_block->offset; - used_block->page_count = page_count = size / PAGE_SIZE; - - /* get page_order base on page_count */ - for (used_block->page_order = 0; page_count; page_count >>= 1) - { - ++used_block->page_order; - } - - if ((used_block->page_count << 1) == (1 << used_block->page_order)) - { - used_block->page_order--; - } - XGI_INFO("used_block->offset: 0x%lx, used_block->size: 0x%lx, used_block->bus_addr: 0x%lx, used_block->hw_addr: 0x%lx, used_block->page_count: 0x%lx used_block->page_order: 0x%lx\n", - used_block->offset, used_block->size, used_block->bus_addr, used_block->hw_addr, used_block->page_count, used_block->page_order); - - used_block->page_block = NULL; - //used_block->page_block = (xgi_pages_block_t *)kmalloc(sizeof(xgi_pages_block_t), GFP_KERNEL); - //if (!used_block->page_block) return NULL; - //used_block->page_block->next = NULL; - - used_block->page_table = (xgi_pte_t *)kmalloc(sizeof(xgi_pte_t) * used_block->page_count, GFP_KERNEL); - if (used_block->page_table == NULL) - { - goto fail; - } - - lut_id = (used_block->offset >> PAGE_SHIFT); - lut_addr = info->lut_base; - lut_addr += lut_id; - XGI_INFO("lutAddr: 0x%p lutID: 0x%lx \n", lut_addr, lut_id); - - /* alloc free pages from system */ - page_count = used_block->page_count; - page_block = used_block->page_block; - prev_page_block = used_block->page_block; - for (i = 0; page_count > 0; i++) - { - /* if size is bigger than 2M bytes, it should be split */ - if (page_count > (1 << XGI_PCIE_ALLOC_MAX_ORDER)) - { - page_order = XGI_PCIE_ALLOC_MAX_ORDER; - } - else - { - count = page_count; - for (page_order = 0; count; count >>= 1, ++page_order); - - if ((page_count << 1) == (1 << page_order)) - { - page_order -= 1; - } - } - - count = (1 << page_order); - page_addr = __get_free_pages(GFP_KERNEL, page_order); - XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-page_addr=0x%lx \n", page_addr); - - if (!page_addr) - { - XGI_ERROR("No: %d :Can't get free pages: 0x%lx from system memory !\n", - i, count); - goto fail; - } - - /* Jong 05/30/2006; test */ - memset((unsigned char *)page_addr, 0xFF, PAGE_SIZE << page_order); - /* memset((unsigned char *)page_addr, 0, PAGE_SIZE << page_order); */ - - if (page_block == NULL) - { - page_block = (xgi_page_block_t *)kmalloc(sizeof(xgi_page_block_t), GFP_KERNEL); - if (!page_block) - { - XGI_ERROR("Can't get memory for page_block! \n"); - goto fail; - } - } - - if (prev_page_block == NULL) - { - used_block->page_block = page_block; - prev_page_block = page_block; - } - else - { - prev_page_block->next = page_block; - prev_page_block = page_block; - } - - page_block->next = NULL; - page_block->phys_addr = __pa(page_addr); - page_block->virt_addr = page_addr; - page_block->page_count = count; - page_block->page_order = page_order; - - XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-page_block->phys_addr=0x%lx \n", page_block->phys_addr); - XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-page_block->virt_addr=0x%lx \n", page_block->virt_addr); - - page = virt_to_page(page_addr); - - //XGI_INFO("No: %d page_order: 0x%lx page_count: 0x%x count: 0x%lx index: 0x%lx lut_addr: 0x%p" - // "page_block->phys_addr: 0x%lx page_block->virt_addr: 0x%lx \n", - // i, page_order, page_count, count, index, lut_addr, page_block->phys_addr, page_block->virt_addr); - - for (j = 0 ; j < count; j++, page++, lut_addr++) - { - used_block->page_table[index + j].phys_addr = __pa(page_address(page)); - used_block->page_table[index + j].virt_addr = (unsigned long)page_address(page); - - XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-used_block->page_table[index + j].phys_addr=0x%lx \n", used_block->page_table[index + j].phys_addr); - XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-used_block->page_table[index + j].virt_addr=0x%lx \n", used_block->page_table[index + j].virt_addr); - - *lut_addr = __pa(page_address(page)); - XGI_INC_PAGE_COUNT(page); - XGILockPage(page); - - if (temp) - { - XGI_INFO("__pa(page_address(page)): 0x%lx lutAddr: 0x%p lutAddr No: 0x%x = 0x%lx \n", - __pa(page_address(page)), lut_addr, j, *lut_addr); - temp--; - } - } - - page_block = page_block->next; - page_count -= count; - index += count; - temp = 0; - } - - used_block->owner = owner; - list_add(&used_block->list, &xgi_pcie_heap->used_list); - -#if defined(__i386__) || defined(__x86_64__) - asm volatile ( "wbinvd" ::: "memory" ); -#else - mb(); -#endif - - /* Flush GART Table */ - bWriteReg(0xB03F, 0x40); - bWriteReg(0xB03F, 0x00); - - if (owner == PCIE_3D) - { - xgi_pcie_vertex_block = used_block; - } - - if (owner == PCIE_3D_CMDLIST) - { - xgi_pcie_cmdlist_block = used_block; - } - - if (owner == PCIE_3D_SCRATCHPAD) - { - xgi_pcie_scratchpad_block = used_block; - } - - XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-End \n"); - return (used_block); - -fail: - xgi_pcie_block_stuff_free(used_block); - kmem_cache_free(xgi_pcie_cache_block, used_block); - return NULL; -} - -static xgi_pcie_block_t *xgi_pcie_mem_free(xgi_info_t *info, unsigned long offset) -{ - struct list_head *free_list, *used_list; - xgi_pcie_block_t *used_block, *block = NULL; - xgi_pcie_block_t *prev, *next; - unsigned long upper, lower; - - used_list = xgi_pcie_heap->used_list.next; - while (used_list != &xgi_pcie_heap->used_list) - { - block = list_entry(used_list, struct xgi_pcie_block_s, list); - if (block->offset == offset) - { - break; - } - used_list = used_list->next; - } - - if (used_list == &xgi_pcie_heap->used_list) - { - XGI_ERROR("can't find block: 0x%lx to free!\n", offset); - return (NULL); - } - - used_block = block; - XGI_INFO("used_block: 0x%p, offset = 0x%lx, size = 0x%lx, bus_addr = 0x%lx, hw_addr = 0x%lx\n", - used_block, used_block->offset, used_block->size, used_block->bus_addr, used_block->hw_addr); - - xgi_pcie_block_stuff_free(used_block); - - /* update xgi_pcie_heap */ - xgi_pcie_heap->max_freesize += used_block->size; - - prev = next = NULL; - upper = used_block->offset + used_block->size; - lower = used_block->offset; - - free_list = xgi_pcie_heap->free_list.next; - - while (free_list != &xgi_pcie_heap->free_list) - { - block = list_entry(free_list, struct xgi_pcie_block_s, list); - if (block->offset == upper) - { - next = block; - } - else if ((block->offset + block->size) == lower) - { - prev = block; - } - free_list = free_list->next; - } - - XGI_INFO("next = 0x%p, prev = 0x%p\n", next, prev); - list_del(&used_block->list); - - if (prev && next) - { - prev->size += (used_block->size + next->size); - list_del(&next->list); - XGI_INFO("free node 0x%p\n", next); - kmem_cache_free(xgi_pcie_cache_block, next); - kmem_cache_free(xgi_pcie_cache_block, used_block); - next = NULL; - used_block = NULL; - return (prev); - } - - if (prev) - { - prev->size += used_block->size; - XGI_INFO("free node 0x%p\n", used_block); - kmem_cache_free(xgi_pcie_cache_block, used_block); - used_block = NULL; - return (prev); - } - - if (next) - { - next->size += used_block->size; - next->offset = used_block->offset; - XGI_INFO("free node 0x%p\n", used_block); - kmem_cache_free(xgi_pcie_cache_block, used_block); - used_block = NULL; - return (next); - } - - used_block->bus_addr = 0; - used_block->hw_addr = 0; - used_block->page_count = 0; - used_block->page_order = 0; - list_add(&used_block->list, &xgi_pcie_heap->free_list); - XGI_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n", - used_block, used_block->offset, used_block->size); - return (used_block); -} - -void xgi_pcie_alloc(xgi_info_t *info, unsigned long size, - enum PcieOwner owner, xgi_mem_alloc_t *alloc) -{ - xgi_pcie_block_t *block; - xgi_mem_pid_t *mempid_block; - - xgi_down(info->pcie_sem); - block = xgi_pcie_mem_alloc(info, size, owner); - xgi_up(info->pcie_sem); - - if (block == NULL) - { - alloc->location = INVALID; - alloc->size = 0; - alloc->bus_addr = 0; - alloc->hw_addr = 0; - XGI_ERROR("PCIE RAM allocation failed\n"); - } - else - { - XGI_INFO("PCIE RAM allocation succeeded: offset = 0x%lx, bus_addr = 0x%lx\n", - block->offset, block->bus_addr); - alloc->location = NON_LOCAL; - alloc->size = block->size; - alloc->bus_addr = block->bus_addr; - alloc->hw_addr = block->hw_addr; - - /* - manage mempid, handle PCIE_3D, PCIE_3D_TEXTURE. - PCIE_3D request means a opengl process created. - PCIE_3D_TEXTURE request means texture cannot alloc from fb. - */ - if (owner == PCIE_3D || owner == PCIE_3D_TEXTURE) - { - mempid_block = kmalloc(sizeof(xgi_mem_pid_t), GFP_KERNEL); - if (!mempid_block) - XGI_ERROR("mempid_block alloc failed\n"); - mempid_block->location = NON_LOCAL; - if (owner == PCIE_3D) - mempid_block->bus_addr = 0xFFFFFFFF;/*xgi_pcie_vertex_block has the address*/ - else - mempid_block->bus_addr = alloc->bus_addr; - mempid_block->pid = alloc->pid; - - XGI_INFO("Memory ProcessID add one pcie block pid:%ld successfully! \n", mempid_block->pid); - list_add(&mempid_block->list, &xgi_mempid_list); - } - } -} - -void xgi_pcie_free(xgi_info_t *info, unsigned long bus_addr) -{ - xgi_pcie_block_t *block; - unsigned long offset = bus_addr - info->pcie.base; - xgi_mem_pid_t *mempid_block; - xgi_mem_pid_t *mempid_freeblock = NULL; - struct list_head *mempid_list; - char isvertex = 0; - int processcnt; - - if (xgi_pcie_vertex_block && xgi_pcie_vertex_block->bus_addr == bus_addr) - isvertex = 1; - - if (isvertex) - { - /*check is there any other process using vertex*/ - processcnt = 0; - mempid_list = xgi_mempid_list.next; - while (mempid_list != &xgi_mempid_list) - { - mempid_block = list_entry(mempid_list, struct xgi_mem_pid_s, list); - if (mempid_block->location == NON_LOCAL && mempid_block->bus_addr == 0xFFFFFFFF) - { - ++processcnt; - } - mempid_list = mempid_list->next; - } - if (processcnt > 1) - { - return; - } - } - - xgi_down(info->pcie_sem); - block = xgi_pcie_mem_free(info, offset); - xgi_up(info->pcie_sem); - - if (block == NULL) - { - XGI_ERROR("xgi_pcie_free() failed at base 0x%lx\n", offset); - } - - if (isvertex) - xgi_pcie_vertex_block = NULL; - - /* manage mempid */ - mempid_list = xgi_mempid_list.next; - while (mempid_list != &xgi_mempid_list) - { - mempid_block = list_entry(mempid_list, struct xgi_mem_pid_s, list); - if (mempid_block->location == NON_LOCAL && ((isvertex && mempid_block->bus_addr == 0xFFFFFFFF) || (!isvertex && mempid_block->bus_addr == bus_addr))) - { - mempid_freeblock = mempid_block; - break; - } - mempid_list = mempid_list->next; - } - if (mempid_freeblock) - { - list_del(&mempid_freeblock->list); - XGI_INFO("Memory ProcessID delete one pcie block pid:%ld successfully! \n", mempid_freeblock->pid); - kfree(mempid_freeblock); - } -} - -/* - * given a bus address, fid the pcie mem block - * uses the bus address as the key. - */ -void *xgi_find_pcie_block(xgi_info_t *info, unsigned long address) -{ - struct list_head *used_list; - xgi_pcie_block_t *block; - int i; - - used_list = xgi_pcie_heap->used_list.next; - - while (used_list != &xgi_pcie_heap->used_list) - { - block = list_entry(used_list, struct xgi_pcie_block_s, list); - - if (block->bus_addr == address) - { - return block; - } - - if (block->page_table) - { - for (i = 0; i < block->page_count; i++) - { - unsigned long offset = block->bus_addr; - if ( (address >= offset) && (address < (offset + PAGE_SIZE))) - { - return block; - } - } - } - used_list = used_list->next; - } - - XGI_ERROR("could not find map for vm 0x%lx\n", address); - - return NULL; -} - -/* - address -- GE HW address - return -- CPU virtual address - - assume the CPU VAddr is continuous in not the same block -*/ -void *xgi_find_pcie_virt(xgi_info_t *info, unsigned long address) -{ - struct list_head *used_list; - xgi_pcie_block_t *block; - unsigned long offset_in_page; - unsigned long loc_in_pagetable; - void * ret; - - XGI_INFO("Jong_05292006-xgi_find_pcie_virt-Begin\n"); - - used_list = xgi_pcie_heap->used_list.next; - XGI_INFO("Jong_05292006-used_list=%ul\n", used_list); - - offset_in_page = address & (PAGE_SIZE-1); - XGI_INFO("Jong_05292006-address=0x%px, PAGE_SIZE-1=%ul, offset_in_page=%ul\n", address, PAGE_SIZE-1, offset_in_page); - - while (used_list != &xgi_pcie_heap->used_list) - { - block = list_entry(used_list, struct xgi_pcie_block_s, list); - XGI_INFO("Jong_05292006-block=0x%px\n", block); - XGI_INFO("Jong_05292006-block->hw_addr=0x%px\n", block->hw_addr); - XGI_INFO("Jong_05292006- block->size=%ul\n", block->size); - - if ((address >= block->hw_addr) && (address < (block->hw_addr + block->size))) - { - loc_in_pagetable = (address - block->hw_addr) >> PAGE_SHIFT; - ret = (void*)(block->page_table[loc_in_pagetable].virt_addr + offset_in_page); - - XGI_INFO("Jong_05292006-PAGE_SHIFT=%d\n", PAGE_SHIFT); - XGI_INFO("Jong_05292006-loc_in_pagetable=0x%px\n", loc_in_pagetable); - XGI_INFO("Jong_05292006-block->page_table[loc_in_pagetable].virt_addr=0x%px\n", block->page_table[loc_in_pagetable].virt_addr); - XGI_INFO("Jong_05292006-offset_in_page=%d\n", offset_in_page); - XGI_INFO("Jong_05292006-return(virt_addr)=0x%px\n", ret); - - return ret ; - } - else - { - XGI_INFO("Jong_05292006-used_list = used_list->next;\n"); - used_list = used_list->next; - } - } - - XGI_ERROR("could not find map for vm 0x%lx\n", address); - return NULL; -} - - -void xgi_read_pcie_mem(xgi_info_t *info, xgi_mem_req_t *req) -{ - -} - -void xgi_write_pcie_mem(xgi_info_t *info, xgi_mem_req_t *req) -{ -} - -/* - address -- GE hw address -*/ -void xgi_test_rwinkernel(xgi_info_t *info, unsigned long address) -{ - unsigned long * virtaddr = 0; - if (address == 0) - { - XGI_INFO("[Jong-kd] input GE HW addr is 0x00000000\n"); - return; - } - - virtaddr = (unsigned long *) xgi_find_pcie_virt(info, address); - - XGI_INFO("[Jong-kd] input GE HW addr is 0x%lx\n", address); - XGI_INFO("[Jong-kd] convert to CPU virt addr 0x%px\n", virtaddr); - XGI_INFO("[Jong-kd] origin [virtaddr] = 0x%lx\n", *virtaddr); - if (virtaddr != NULL) - { - *virtaddr = 0x00f00fff; - } - - XGI_INFO("[Jong-kd] modified [virtaddr] = 0x%lx\n", *virtaddr); -} - + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#include "xgi_types.h" +#include "xgi_linux.h" +#include "xgi_drv.h" +#include "xgi_regs.h" +#include "xgi_pcie.h" +#include "xgi_misc.h" + +static xgi_pcie_heap_t *xgi_pcie_heap = NULL; +static kmem_cache_t *xgi_pcie_cache_block = NULL; +static xgi_pcie_block_t *xgi_pcie_vertex_block = NULL; +static xgi_pcie_block_t *xgi_pcie_cmdlist_block = NULL; +static xgi_pcie_block_t *xgi_pcie_scratchpad_block = NULL; +extern struct list_head xgi_mempid_list; + +static unsigned long xgi_pcie_lut_alloc(unsigned long page_order) +{ + struct page *page; + unsigned long page_addr = 0; + unsigned long page_count = 0; + int i; + + page_count = (1 << page_order); + page_addr = __get_free_pages(GFP_KERNEL, page_order); + + if (page_addr == 0UL) { + XGI_ERROR("Can't get free pages: 0x%lx from system memory !\n", + page_count); + return 0; + } + + page = virt_to_page(page_addr); + + for (i = 0; i < page_count; i++, page++) { + XGI_INC_PAGE_COUNT(page); + XGILockPage(page); + } + + XGI_INFO("page_count: 0x%lx page_order: 0x%lx page_addr: 0x%lx \n", + page_count, page_order, page_addr); + return page_addr; +} + +static void xgi_pcie_lut_free(unsigned long page_addr, unsigned long page_order) +{ + struct page *page; + unsigned long page_count = 0; + int i; + + page_count = (1 << page_order); + page = virt_to_page(page_addr); + + for (i = 0; i < page_count; i++, page++) { + XGI_DEC_PAGE_COUNT(page); + XGIUnlockPage(page); + } + + free_pages(page_addr, page_order); +} + +static int xgi_pcie_lut_init(xgi_info_t * info) +{ + unsigned char *page_addr = NULL; + unsigned long pciePageCount, lutEntryNum, lutPageCount, lutPageOrder; + unsigned long count = 0; + u8 temp = 0; + + /* Jong 06/06/2006 */ + unsigned long pcie_aperture_size; + + info->pcie.size = 128 * 1024 * 1024; + + /* Get current FB aperture size */ + temp = In3x5(0x27); + XGI_INFO("In3x5(0x27): 0x%x \n", temp); + + if (temp & 0x01) { /* 256MB; Jong 06/05/2006; 0x10000000 */ + /* Jong 06/06/2006; allocate memory */ + pcie_aperture_size = 256 * 1024 * 1024; + /* info->pcie.base = 256 * 1024 * 1024; *//* pcie base is different from fb base */ + } else { /* 128MB; Jong 06/05/2006; 0x08000000 */ + + /* Jong 06/06/2006; allocate memory */ + pcie_aperture_size = 128 * 1024 * 1024; + /* info->pcie.base = 128 * 1024 * 1024; */ + } + + /* Jong 06/06/2006; allocate memory; it can be used for build-in kernel modules */ + /* info->pcie.base=(unsigned long)alloc_bootmem(pcie_mem_size); */ + /* total 496 MB; need 256 MB (0x10000000); start from 240 MB (0x0F000000) */ + /* info->pcie.base=ioremap(0x0F000000, 0x10000000); *//* Cause system hang */ + info->pcie.base = pcie_aperture_size; /* works */ + /* info->pcie.base=info->fb.base + info->fb.size; *//* System hang */ + /* info->pcie.base=128 * 1024 * 1024; *//* System hang */ + + XGI_INFO("Jong06062006-info->pcie.base: 0x%lx \n", info->pcie.base); + + /* Get current lookup table page size */ + temp = bReadReg(0xB00C); + if (temp & 0x04) { /* 8KB */ + info->lutPageSize = 8 * 1024; + } else { /* 4KB */ + + info->lutPageSize = 4 * 1024; + } + + XGI_INFO("info->lutPageSize: 0x%lx \n", info->lutPageSize); + +#if 0 + /* Get current lookup table location */ + temp = bReadReg(0xB00C); + if (temp & 0x02) { /* LFB */ + info->isLUTInLFB = TRUE; + /* Current we only support lookup table in LFB */ + temp &= 0xFD; + bWriteReg(0xB00C, temp); + info->isLUTInLFB = FALSE; + } else { /* SFB */ + + info->isLUTInLFB = FALSE; + } + + XGI_INFO("info->lutPageSize: 0x%lx \n", info->lutPageSize); + + /* Get current SDFB page size */ + temp = bReadReg(0xB00C); + if (temp & 0x08) { /* 8MB */ + info->sdfbPageSize = 8 * 1024 * 1024; + } else { /* 4MB */ + + info->sdfbPageSize = 4 * 1024 * 1024; + } +#endif + pciePageCount = (info->pcie.size + PAGE_SIZE - 1) / PAGE_SIZE; + + /* + * Allocate memory for PCIE GART table; + */ + lutEntryNum = pciePageCount; + lutPageCount = (lutEntryNum * 4 + PAGE_SIZE - 1) / PAGE_SIZE; + + /* get page_order base on page_count */ + count = lutPageCount; + for (lutPageOrder = 0; count; count >>= 1, ++lutPageOrder) ; + + if ((lutPageCount << 1) == (1 << lutPageOrder)) { + lutPageOrder -= 1; + } + + XGI_INFO("lutEntryNum: 0x%lx lutPageCount: 0x%lx lutPageOrder 0x%lx\n", + lutEntryNum, lutPageCount, lutPageOrder); + + info->lutPageOrder = lutPageOrder; + page_addr = (unsigned char *)xgi_pcie_lut_alloc(lutPageOrder); + + if (!page_addr) { + XGI_ERROR("cannot allocate PCIE lut page!\n"); + goto fail; + } + info->lut_base = (unsigned long *)page_addr; + + XGI_INFO("page_addr: 0x%p virt_to_phys(page_virtual): 0x%lx \n", + page_addr, virt_to_phys(page_addr)); + + XGI_INFO + ("info->lut_base: 0x%p __pa(info->lut_base): 0x%lx info->lutPageOrder 0x%lx\n", + info->lut_base, __pa(info->lut_base), info->lutPageOrder); + + /* + * clean all PCIE GART Entry + */ + memset(page_addr, 0, PAGE_SIZE << lutPageOrder); + +#if defined(__i386__) || defined(__x86_64__) + asm volatile ("wbinvd":::"memory"); +#else + mb(); +#endif + + /* Set GART in SFB */ + bWriteReg(0xB00C, bReadReg(0xB00C) & ~0x02); + /* Set GART base address to HW */ + dwWriteReg(0xB034, __pa(info->lut_base)); + + return 1; + fail: + return 0; +} + +static void xgi_pcie_lut_cleanup(xgi_info_t * info) +{ + if (info->lut_base) { + XGI_INFO("info->lut_base: 0x%p info->lutPageOrder: 0x%lx \n", + info->lut_base, info->lutPageOrder); + xgi_pcie_lut_free((unsigned long)info->lut_base, + info->lutPageOrder); + info->lut_base = NULL; + } +} + +static xgi_pcie_block_t *xgi_pcie_new_node(void) +{ + xgi_pcie_block_t *block = + (xgi_pcie_block_t *) kmem_cache_alloc(xgi_pcie_cache_block, + GFP_KERNEL); + if (block == NULL) { + return NULL; + } + + block->offset = 0; /* block's offset in pcie memory, begin from 0 */ + block->size = 0; /* The block size. */ + block->bus_addr = 0; /* CPU access address/bus address */ + block->hw_addr = 0; /* GE access address */ + block->page_count = 0; + block->page_order = 0; + block->page_block = NULL; + block->page_table = NULL; + block->owner = PCIE_INVALID; + + return block; +} + +static void xgi_pcie_block_stuff_free(xgi_pcie_block_t * block) +{ + struct page *page; + xgi_page_block_t *page_block = block->page_block; + xgi_page_block_t *free_block; + unsigned long page_count = 0; + int i; + + //XGI_INFO("block->page_block: 0x%p \n", block->page_block); + while (page_block) { + page_count = page_block->page_count; + + page = virt_to_page(page_block->virt_addr); + for (i = 0; i < page_count; i++, page++) { + XGI_DEC_PAGE_COUNT(page); + XGIUnlockPage(page); + } + free_pages(page_block->virt_addr, page_block->page_order); + + page_block->phys_addr = 0; + page_block->virt_addr = 0; + page_block->page_count = 0; + page_block->page_order = 0; + + free_block = page_block; + page_block = page_block->next; + //XGI_INFO("free free_block: 0x%p \n", free_block); + kfree(free_block); + free_block = NULL; + } + + if (block->page_table) { + //XGI_INFO("free block->page_table: 0x%p \n", block->page_table); + kfree(block->page_table); + block->page_table = NULL; + } +} + +int xgi_pcie_heap_init(xgi_info_t * info) +{ + xgi_pcie_block_t *block; + + if (!xgi_pcie_lut_init(info)) { + XGI_ERROR("xgi_pcie_lut_init failed\n"); + return 0; + } + + xgi_pcie_heap = + (xgi_pcie_heap_t *) kmalloc(sizeof(xgi_pcie_heap_t), GFP_KERNEL); + if (!xgi_pcie_heap) { + XGI_ERROR("xgi_pcie_heap alloc failed\n"); + goto fail1; + } + INIT_LIST_HEAD(&xgi_pcie_heap->free_list); + INIT_LIST_HEAD(&xgi_pcie_heap->used_list); + INIT_LIST_HEAD(&xgi_pcie_heap->sort_list); + + xgi_pcie_heap->max_freesize = info->pcie.size; + + xgi_pcie_cache_block = + kmem_cache_create("xgi_pcie_block", sizeof(xgi_pcie_block_t), 0, + SLAB_HWCACHE_ALIGN, NULL, NULL); + + if (NULL == xgi_pcie_cache_block) { + XGI_ERROR("Fail to creat xgi_pcie_block\n"); + goto fail2; + } + + block = (xgi_pcie_block_t *) xgi_pcie_new_node(); + if (!block) { + XGI_ERROR("xgi_pcie_new_node failed\n"); + goto fail3; + } + + block->offset = 0; /* block's offset in pcie memory, begin from 0 */ + block->size = info->pcie.size; + + list_add(&block->list, &xgi_pcie_heap->free_list); + + XGI_INFO("PCIE start address: 0x%lx, memory size : 0x%lx\n", + block->offset, block->size); + return 1; + fail3: + if (xgi_pcie_cache_block) { + kmem_cache_destroy(xgi_pcie_cache_block); + xgi_pcie_cache_block = NULL; + } + + fail2: + if (xgi_pcie_heap) { + kfree(xgi_pcie_heap); + xgi_pcie_heap = NULL; + } + fail1: + xgi_pcie_lut_cleanup(info); + return 0; +} + +void xgi_pcie_heap_check(void) +{ + struct list_head *useList, *temp; + xgi_pcie_block_t *block; + unsigned int ownerIndex; + char *ownerStr[6] = + { "2D", "3D", "3D_CMD", "3D_SCR", "3D_TEX", "ELSE" }; + + if (xgi_pcie_heap) { + useList = &xgi_pcie_heap->used_list; + temp = useList->next; + XGI_INFO("pcie freemax = 0x%lx\n", xgi_pcie_heap->max_freesize); + while (temp != useList) { + block = list_entry(temp, struct xgi_pcie_block_s, list); + if (block->owner == PCIE_2D) + ownerIndex = 0; + else if (block->owner > PCIE_3D_TEXTURE + || block->owner < PCIE_2D + || block->owner < PCIE_3D) + ownerIndex = 5; + else + ownerIndex = block->owner - PCIE_3D + 1; + XGI_INFO + ("Allocated by %s, block->offset: 0x%lx block->size: 0x%lx \n", + ownerStr[ownerIndex], block->offset, block->size); + temp = temp->next; + } + + } +} + +void xgi_pcie_heap_cleanup(xgi_info_t * info) +{ + struct list_head *free_list, *temp; + xgi_pcie_block_t *block; + int j; + + xgi_pcie_lut_cleanup(info); + XGI_INFO("xgi_pcie_lut_cleanup scceeded\n"); + + if (xgi_pcie_heap) { + free_list = &xgi_pcie_heap->free_list; + for (j = 0; j < 3; j++, free_list++) { + temp = free_list->next; + + while (temp != free_list) { + block = + list_entry(temp, struct xgi_pcie_block_s, + list); + XGI_INFO + ("No. %d block->offset: 0x%lx block->size: 0x%lx \n", + j, block->offset, block->size); + xgi_pcie_block_stuff_free(block); + block->bus_addr = 0; + block->hw_addr = 0; + + temp = temp->next; + //XGI_INFO("No. %d free block: 0x%p \n", j, block); + kmem_cache_free(xgi_pcie_cache_block, block); + block = NULL; + } + } + + XGI_INFO("free xgi_pcie_heap: 0x%p \n", xgi_pcie_heap); + kfree(xgi_pcie_heap); + xgi_pcie_heap = NULL; + } + + if (xgi_pcie_cache_block) { + kmem_cache_destroy(xgi_pcie_cache_block); + xgi_pcie_cache_block = NULL; + } +} + +static xgi_pcie_block_t *xgi_pcie_mem_alloc(xgi_info_t * info, + unsigned long originalSize, + enum PcieOwner owner) +{ + struct list_head *free_list; + xgi_pcie_block_t *block, *used_block, *free_block; + xgi_page_block_t *page_block, *prev_page_block; + struct page *page; + unsigned long page_order = 0, count = 0, index = 0; + unsigned long page_addr = 0; + unsigned long *lut_addr = NULL; + unsigned long lut_id = 0; + unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK; + int i, j, page_count = 0; + int temp = 0; + + XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-Begin\n"); + XGI_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n", + originalSize, size); + + if (owner == PCIE_3D) { + if (xgi_pcie_vertex_block) { + XGI_INFO + ("PCIE Vertex has been created, return directly.\n"); + return xgi_pcie_vertex_block; + } + } + + if (owner == PCIE_3D_CMDLIST) { + if (xgi_pcie_cmdlist_block) { + XGI_INFO + ("PCIE Cmdlist has been created, return directly.\n"); + return xgi_pcie_cmdlist_block; + } + } + + if (owner == PCIE_3D_SCRATCHPAD) { + if (xgi_pcie_scratchpad_block) { + XGI_INFO + ("PCIE Scratchpad has been created, return directly.\n"); + return xgi_pcie_scratchpad_block; + } + } + + if (size == 0) { + XGI_ERROR("size == 0 \n"); + return (NULL); + } + + XGI_INFO("max_freesize: 0x%lx \n", xgi_pcie_heap->max_freesize); + if (size > xgi_pcie_heap->max_freesize) { + XGI_ERROR + ("size: 0x%lx bigger than PCIE total free size: 0x%lx.\n", + size, xgi_pcie_heap->max_freesize); + return (NULL); + } + + /* Jong 05/30/2006; find next free list which has enough space */ + free_list = xgi_pcie_heap->free_list.next; + while (free_list != &xgi_pcie_heap->free_list) { + //XGI_INFO("free_list: 0x%px \n", free_list); + block = list_entry(free_list, struct xgi_pcie_block_s, list); + if (size <= block->size) { + break; + } + free_list = free_list->next; + } + + if (free_list == &xgi_pcie_heap->free_list) { + XGI_ERROR("Can't allocate %ldk size from PCIE memory !\n", + size / 1024); + return (NULL); + } + + free_block = block; + XGI_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n", + size, free_block->offset, free_block->size); + + if (size == free_block->size) { + used_block = free_block; + XGI_INFO("size==free_block->size: free_block = 0x%p\n", + free_block); + list_del(&free_block->list); + } else { + used_block = xgi_pcie_new_node(); + if (used_block == NULL) { + return NULL; + } + + if (used_block == free_block) { + XGI_ERROR("used_block == free_block = 0x%p\n", + used_block); + } + + used_block->offset = free_block->offset; + used_block->size = size; + + free_block->offset += size; + free_block->size -= size; + } + + xgi_pcie_heap->max_freesize -= size; + + used_block->bus_addr = info->pcie.base + used_block->offset; + used_block->hw_addr = info->pcie.base + used_block->offset; + used_block->page_count = page_count = size / PAGE_SIZE; + + /* get page_order base on page_count */ + for (used_block->page_order = 0; page_count; page_count >>= 1) { + ++used_block->page_order; + } + + if ((used_block->page_count << 1) == (1 << used_block->page_order)) { + used_block->page_order--; + } + XGI_INFO + ("used_block->offset: 0x%lx, used_block->size: 0x%lx, used_block->bus_addr: 0x%lx, used_block->hw_addr: 0x%lx, used_block->page_count: 0x%lx used_block->page_order: 0x%lx\n", + used_block->offset, used_block->size, used_block->bus_addr, + used_block->hw_addr, used_block->page_count, + used_block->page_order); + + used_block->page_block = NULL; + //used_block->page_block = (xgi_pages_block_t *)kmalloc(sizeof(xgi_pages_block_t), GFP_KERNEL); + //if (!used_block->page_block) return NULL; + //used_block->page_block->next = NULL; + + used_block->page_table = + (xgi_pte_t *) kmalloc(sizeof(xgi_pte_t) * used_block->page_count, + GFP_KERNEL); + if (used_block->page_table == NULL) { + goto fail; + } + + lut_id = (used_block->offset >> PAGE_SHIFT); + lut_addr = info->lut_base; + lut_addr += lut_id; + XGI_INFO("lutAddr: 0x%p lutID: 0x%lx \n", lut_addr, lut_id); + + /* alloc free pages from system */ + page_count = used_block->page_count; + page_block = used_block->page_block; + prev_page_block = used_block->page_block; + for (i = 0; page_count > 0; i++) { + /* if size is bigger than 2M bytes, it should be split */ + if (page_count > (1 << XGI_PCIE_ALLOC_MAX_ORDER)) { + page_order = XGI_PCIE_ALLOC_MAX_ORDER; + } else { + count = page_count; + for (page_order = 0; count; count >>= 1, ++page_order) ; + + if ((page_count << 1) == (1 << page_order)) { + page_order -= 1; + } + } + + count = (1 << page_order); + page_addr = __get_free_pages(GFP_KERNEL, page_order); + XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-page_addr=0x%lx \n", + page_addr); + + if (!page_addr) { + XGI_ERROR + ("No: %d :Can't get free pages: 0x%lx from system memory !\n", + i, count); + goto fail; + } + + /* Jong 05/30/2006; test */ + memset((unsigned char *)page_addr, 0xFF, + PAGE_SIZE << page_order); + /* memset((unsigned char *)page_addr, 0, PAGE_SIZE << page_order); */ + + if (page_block == NULL) { + page_block = + (xgi_page_block_t *) + kmalloc(sizeof(xgi_page_block_t), GFP_KERNEL); + if (!page_block) { + XGI_ERROR + ("Can't get memory for page_block! \n"); + goto fail; + } + } + + if (prev_page_block == NULL) { + used_block->page_block = page_block; + prev_page_block = page_block; + } else { + prev_page_block->next = page_block; + prev_page_block = page_block; + } + + page_block->next = NULL; + page_block->phys_addr = __pa(page_addr); + page_block->virt_addr = page_addr; + page_block->page_count = count; + page_block->page_order = page_order; + + XGI_INFO + ("Jong05302006-xgi_pcie_mem_alloc-page_block->phys_addr=0x%lx \n", + page_block->phys_addr); + XGI_INFO + ("Jong05302006-xgi_pcie_mem_alloc-page_block->virt_addr=0x%lx \n", + page_block->virt_addr); + + page = virt_to_page(page_addr); + + //XGI_INFO("No: %d page_order: 0x%lx page_count: 0x%x count: 0x%lx index: 0x%lx lut_addr: 0x%p" + // "page_block->phys_addr: 0x%lx page_block->virt_addr: 0x%lx \n", + // i, page_order, page_count, count, index, lut_addr, page_block->phys_addr, page_block->virt_addr); + + for (j = 0; j < count; j++, page++, lut_addr++) { + used_block->page_table[index + j].phys_addr = + __pa(page_address(page)); + used_block->page_table[index + j].virt_addr = + (unsigned long)page_address(page); + + XGI_INFO + ("Jong05302006-xgi_pcie_mem_alloc-used_block->page_table[index + j].phys_addr=0x%lx \n", + used_block->page_table[index + j].phys_addr); + XGI_INFO + ("Jong05302006-xgi_pcie_mem_alloc-used_block->page_table[index + j].virt_addr=0x%lx \n", + used_block->page_table[index + j].virt_addr); + + *lut_addr = __pa(page_address(page)); + XGI_INC_PAGE_COUNT(page); + XGILockPage(page); + + if (temp) { + XGI_INFO + ("__pa(page_address(page)): 0x%lx lutAddr: 0x%p lutAddr No: 0x%x = 0x%lx \n", + __pa(page_address(page)), lut_addr, j, + *lut_addr); + temp--; + } + } + + page_block = page_block->next; + page_count -= count; + index += count; + temp = 0; + } + + used_block->owner = owner; + list_add(&used_block->list, &xgi_pcie_heap->used_list); + +#if defined(__i386__) || defined(__x86_64__) + asm volatile ("wbinvd":::"memory"); +#else + mb(); +#endif + + /* Flush GART Table */ + bWriteReg(0xB03F, 0x40); + bWriteReg(0xB03F, 0x00); + + if (owner == PCIE_3D) { + xgi_pcie_vertex_block = used_block; + } + + if (owner == PCIE_3D_CMDLIST) { + xgi_pcie_cmdlist_block = used_block; + } + + if (owner == PCIE_3D_SCRATCHPAD) { + xgi_pcie_scratchpad_block = used_block; + } + + XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-End \n"); + return (used_block); + + fail: + xgi_pcie_block_stuff_free(used_block); + kmem_cache_free(xgi_pcie_cache_block, used_block); + return NULL; +} + +static xgi_pcie_block_t *xgi_pcie_mem_free(xgi_info_t * info, + unsigned long offset) +{ + struct list_head *free_list, *used_list; + xgi_pcie_block_t *used_block, *block = NULL; + xgi_pcie_block_t *prev, *next; + unsigned long upper, lower; + + used_list = xgi_pcie_heap->used_list.next; + while (used_list != &xgi_pcie_heap->used_list) { + block = list_entry(used_list, struct xgi_pcie_block_s, list); + if (block->offset == offset) { + break; + } + used_list = used_list->next; + } + + if (used_list == &xgi_pcie_heap->used_list) { + XGI_ERROR("can't find block: 0x%lx to free!\n", offset); + return (NULL); + } + + used_block = block; + XGI_INFO + ("used_block: 0x%p, offset = 0x%lx, size = 0x%lx, bus_addr = 0x%lx, hw_addr = 0x%lx\n", + used_block, used_block->offset, used_block->size, + used_block->bus_addr, used_block->hw_addr); + + xgi_pcie_block_stuff_free(used_block); + + /* update xgi_pcie_heap */ + xgi_pcie_heap->max_freesize += used_block->size; + + prev = next = NULL; + upper = used_block->offset + used_block->size; + lower = used_block->offset; + + free_list = xgi_pcie_heap->free_list.next; + + while (free_list != &xgi_pcie_heap->free_list) { + block = list_entry(free_list, struct xgi_pcie_block_s, list); + if (block->offset == upper) { + next = block; + } else if ((block->offset + block->size) == lower) { + prev = block; + } + free_list = free_list->next; + } + + XGI_INFO("next = 0x%p, prev = 0x%p\n", next, prev); + list_del(&used_block->list); + + if (prev && next) { + prev->size += (used_block->size + next->size); + list_del(&next->list); + XGI_INFO("free node 0x%p\n", next); + kmem_cache_free(xgi_pcie_cache_block, next); + kmem_cache_free(xgi_pcie_cache_block, used_block); + next = NULL; + used_block = NULL; + return (prev); + } + + if (prev) { + prev->size += used_block->size; + XGI_INFO("free node 0x%p\n", used_block); + kmem_cache_free(xgi_pcie_cache_block, used_block); + used_block = NULL; + return (prev); + } + + if (next) { + next->size += used_block->size; + next->offset = used_block->offset; + XGI_INFO("free node 0x%p\n", used_block); + kmem_cache_free(xgi_pcie_cache_block, used_block); + used_block = NULL; + return (next); + } + + used_block->bus_addr = 0; + used_block->hw_addr = 0; + used_block->page_count = 0; + used_block->page_order = 0; + list_add(&used_block->list, &xgi_pcie_heap->free_list); + XGI_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n", + used_block, used_block->offset, used_block->size); + return (used_block); +} + +void xgi_pcie_alloc(xgi_info_t * info, unsigned long size, + enum PcieOwner owner, xgi_mem_alloc_t * alloc) +{ + xgi_pcie_block_t *block; + xgi_mem_pid_t *mempid_block; + + xgi_down(info->pcie_sem); + block = xgi_pcie_mem_alloc(info, size, owner); + xgi_up(info->pcie_sem); + + if (block == NULL) { + alloc->location = INVALID; + alloc->size = 0; + alloc->bus_addr = 0; + alloc->hw_addr = 0; + XGI_ERROR("PCIE RAM allocation failed\n"); + } else { + XGI_INFO + ("PCIE RAM allocation succeeded: offset = 0x%lx, bus_addr = 0x%lx\n", + block->offset, block->bus_addr); + alloc->location = NON_LOCAL; + alloc->size = block->size; + alloc->bus_addr = block->bus_addr; + alloc->hw_addr = block->hw_addr; + + /* + manage mempid, handle PCIE_3D, PCIE_3D_TEXTURE. + PCIE_3D request means a opengl process created. + PCIE_3D_TEXTURE request means texture cannot alloc from fb. + */ + if (owner == PCIE_3D || owner == PCIE_3D_TEXTURE) { + mempid_block = + kmalloc(sizeof(xgi_mem_pid_t), GFP_KERNEL); + if (!mempid_block) + XGI_ERROR("mempid_block alloc failed\n"); + mempid_block->location = NON_LOCAL; + if (owner == PCIE_3D) + mempid_block->bus_addr = 0xFFFFFFFF; /*xgi_pcie_vertex_block has the address */ + else + mempid_block->bus_addr = alloc->bus_addr; + mempid_block->pid = alloc->pid; + + XGI_INFO + ("Memory ProcessID add one pcie block pid:%ld successfully! \n", + mempid_block->pid); + list_add(&mempid_block->list, &xgi_mempid_list); + } + } +} + +void xgi_pcie_free(xgi_info_t * info, unsigned long bus_addr) +{ + xgi_pcie_block_t *block; + unsigned long offset = bus_addr - info->pcie.base; + xgi_mem_pid_t *mempid_block; + xgi_mem_pid_t *mempid_freeblock = NULL; + struct list_head *mempid_list; + char isvertex = 0; + int processcnt; + + if (xgi_pcie_vertex_block + && xgi_pcie_vertex_block->bus_addr == bus_addr) + isvertex = 1; + + if (isvertex) { + /*check is there any other process using vertex */ + processcnt = 0; + mempid_list = xgi_mempid_list.next; + while (mempid_list != &xgi_mempid_list) { + mempid_block = + list_entry(mempid_list, struct xgi_mem_pid_s, list); + if (mempid_block->location == NON_LOCAL + && mempid_block->bus_addr == 0xFFFFFFFF) { + ++processcnt; + } + mempid_list = mempid_list->next; + } + if (processcnt > 1) { + return; + } + } + + xgi_down(info->pcie_sem); + block = xgi_pcie_mem_free(info, offset); + xgi_up(info->pcie_sem); + + if (block == NULL) { + XGI_ERROR("xgi_pcie_free() failed at base 0x%lx\n", offset); + } + + if (isvertex) + xgi_pcie_vertex_block = NULL; + + /* manage mempid */ + mempid_list = xgi_mempid_list.next; + while (mempid_list != &xgi_mempid_list) { + mempid_block = + list_entry(mempid_list, struct xgi_mem_pid_s, list); + if (mempid_block->location == NON_LOCAL + && ((isvertex && mempid_block->bus_addr == 0xFFFFFFFF) + || (!isvertex && mempid_block->bus_addr == bus_addr))) { + mempid_freeblock = mempid_block; + break; + } + mempid_list = mempid_list->next; + } + if (mempid_freeblock) { + list_del(&mempid_freeblock->list); + XGI_INFO + ("Memory ProcessID delete one pcie block pid:%ld successfully! \n", + mempid_freeblock->pid); + kfree(mempid_freeblock); + } +} + +/* + * given a bus address, fid the pcie mem block + * uses the bus address as the key. + */ +void *xgi_find_pcie_block(xgi_info_t * info, unsigned long address) +{ + struct list_head *used_list; + xgi_pcie_block_t *block; + int i; + + used_list = xgi_pcie_heap->used_list.next; + + while (used_list != &xgi_pcie_heap->used_list) { + block = list_entry(used_list, struct xgi_pcie_block_s, list); + + if (block->bus_addr == address) { + return block; + } + + if (block->page_table) { + for (i = 0; i < block->page_count; i++) { + unsigned long offset = block->bus_addr; + if ((address >= offset) + && (address < (offset + PAGE_SIZE))) { + return block; + } + } + } + used_list = used_list->next; + } + + XGI_ERROR("could not find map for vm 0x%lx\n", address); + + return NULL; +} + +/* + address -- GE HW address + return -- CPU virtual address + + assume the CPU VAddr is continuous in not the same block +*/ +void *xgi_find_pcie_virt(xgi_info_t * info, unsigned long address) +{ + struct list_head *used_list; + xgi_pcie_block_t *block; + unsigned long offset_in_page; + unsigned long loc_in_pagetable; + void *ret; + + XGI_INFO("Jong_05292006-xgi_find_pcie_virt-Begin\n"); + + used_list = xgi_pcie_heap->used_list.next; + XGI_INFO("Jong_05292006-used_list=%ul\n", used_list); + + offset_in_page = address & (PAGE_SIZE - 1); + XGI_INFO + ("Jong_05292006-address=0x%px, PAGE_SIZE-1=%ul, offset_in_page=%ul\n", + address, PAGE_SIZE - 1, offset_in_page); + + while (used_list != &xgi_pcie_heap->used_list) { + block = list_entry(used_list, struct xgi_pcie_block_s, list); + XGI_INFO("Jong_05292006-block=0x%px\n", block); + XGI_INFO("Jong_05292006-block->hw_addr=0x%px\n", + block->hw_addr); + XGI_INFO("Jong_05292006- block->size=%ul\n", block->size); + + if ((address >= block->hw_addr) + && (address < (block->hw_addr + block->size))) { + loc_in_pagetable = + (address - block->hw_addr) >> PAGE_SHIFT; + ret = + (void *)(block->page_table[loc_in_pagetable]. + virt_addr + offset_in_page); + + XGI_INFO("Jong_05292006-PAGE_SHIFT=%d\n", PAGE_SHIFT); + XGI_INFO("Jong_05292006-loc_in_pagetable=0x%px\n", + loc_in_pagetable); + XGI_INFO + ("Jong_05292006-block->page_table[loc_in_pagetable].virt_addr=0x%px\n", + block->page_table[loc_in_pagetable].virt_addr); + XGI_INFO("Jong_05292006-offset_in_page=%d\n", + offset_in_page); + XGI_INFO("Jong_05292006-return(virt_addr)=0x%px\n", + ret); + + return ret; + } else { + XGI_INFO + ("Jong_05292006-used_list = used_list->next;\n"); + used_list = used_list->next; + } + } + + XGI_ERROR("could not find map for vm 0x%lx\n", address); + return NULL; +} + +void xgi_read_pcie_mem(xgi_info_t * info, xgi_mem_req_t * req) +{ + +} + +void xgi_write_pcie_mem(xgi_info_t * info, xgi_mem_req_t * req) +{ +} + +/* + address -- GE hw address +*/ +void xgi_test_rwinkernel(xgi_info_t * info, unsigned long address) +{ + unsigned long *virtaddr = 0; + if (address == 0) { + XGI_INFO("[Jong-kd] input GE HW addr is 0x00000000\n"); + return; + } + + virtaddr = (unsigned long *)xgi_find_pcie_virt(info, address); + + XGI_INFO("[Jong-kd] input GE HW addr is 0x%lx\n", address); + XGI_INFO("[Jong-kd] convert to CPU virt addr 0x%px\n", virtaddr); + XGI_INFO("[Jong-kd] origin [virtaddr] = 0x%lx\n", *virtaddr); + if (virtaddr != NULL) { + *virtaddr = 0x00f00fff; + } + + XGI_INFO("[Jong-kd] modified [virtaddr] = 0x%lx\n", *virtaddr); +} diff --git a/linux-core/xgi_pcie.h b/linux-core/xgi_pcie.h index cd5f85b8..32c2b584 100644 --- a/linux-core/xgi_pcie.h +++ b/linux-core/xgi_pcie.h @@ -1,73 +1,73 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ - -#ifndef _XGI_PCIE_H_ -#define _XGI_PCIE_H_ - -#ifndef XGI_PCIE_ALLOC_MAX_ORDER -#define XGI_PCIE_ALLOC_MAX_ORDER 1 /* 8K in Kernel 2.4.* */ -#endif - -typedef struct xgi_page_block_s { - struct xgi_page_block_s *next; - unsigned long phys_addr; - unsigned long virt_addr; - unsigned long page_count; - unsigned long page_order; -} xgi_page_block_t; - -typedef struct xgi_pcie_block_s { - struct list_head list; - unsigned long offset; /* block's offset in pcie memory, begin from 0 */ - unsigned long size; /* The block size. */ - unsigned long bus_addr; /* CPU access address/bus address */ - unsigned long hw_addr; /* GE access address */ - - unsigned long page_count; - unsigned long page_order; - xgi_page_block_t *page_block; - xgi_pte_t *page_table; /* list of physical pages allocated */ - - atomic_t use_count; - enum PcieOwner owner; - unsigned long processID; -} xgi_pcie_block_t; - -typedef struct xgi_pcie_list_s { - xgi_pcie_block_t *head; - xgi_pcie_block_t *tail; -} xgi_pcie_list_t; - -typedef struct xgi_pcie_heap_s { - struct list_head free_list; - struct list_head used_list; - struct list_head sort_list; - unsigned long max_freesize; -} xgi_pcie_heap_t; - -#endif + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#ifndef _XGI_PCIE_H_ +#define _XGI_PCIE_H_ + +#ifndef XGI_PCIE_ALLOC_MAX_ORDER +#define XGI_PCIE_ALLOC_MAX_ORDER 1 /* 8K in Kernel 2.4.* */ +#endif + +typedef struct xgi_page_block_s { + struct xgi_page_block_s *next; + unsigned long phys_addr; + unsigned long virt_addr; + unsigned long page_count; + unsigned long page_order; +} xgi_page_block_t; + +typedef struct xgi_pcie_block_s { + struct list_head list; + unsigned long offset; /* block's offset in pcie memory, begin from 0 */ + unsigned long size; /* The block size. */ + unsigned long bus_addr; /* CPU access address/bus address */ + unsigned long hw_addr; /* GE access address */ + + unsigned long page_count; + unsigned long page_order; + xgi_page_block_t *page_block; + xgi_pte_t *page_table; /* list of physical pages allocated */ + + atomic_t use_count; + enum PcieOwner owner; + unsigned long processID; +} xgi_pcie_block_t; + +typedef struct xgi_pcie_list_s { + xgi_pcie_block_t *head; + xgi_pcie_block_t *tail; +} xgi_pcie_list_t; + +typedef struct xgi_pcie_heap_s { + struct list_head free_list; + struct list_head used_list; + struct list_head sort_list; + unsigned long max_freesize; +} xgi_pcie_heap_t; + +#endif diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h index 18448139..487a7e15 100644 --- a/linux-core/xgi_regs.h +++ b/linux-core/xgi_regs.h @@ -1,410 +1,404 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ - - -#ifndef _XGI_REGS_H_ -#define _XGI_REGS_H_ - -#ifndef XGI_MMIO - #define XGI_MMIO 1 -#endif - -#if XGI_MMIO -#define OUTB(port, value) writeb(value, info->mmio.vbase + port) -#define INB(port) readb(info->mmio.vbase + port) -#define OUTW(port, value) writew(value, info->mmio.vbase + port) -#define INW(port) readw(info->mmio.vbase + port) -#define OUTDW(port, value) writel(value, info->mmio.vbase + port) -#define INDW(port) readl(info->mmio.vbase + port) -#else -#define OUTB(port, value) outb(value, port) -#define INB(port) inb(port) -#define OUTW(port, value) outw(value, port) -#define INW(port) inw(port) -#define OUTDW(port, value) outl(value, port) -#define INDW(port) inl(port) -#endif - -/* Hardware access functions */ -static inline void OUT3C5B(xgi_info_t *info, u8 index, u8 data) -{ - OUTB(0x3C4, index); - OUTB(0x3C5, data); -} - -static inline void OUT3X5B(xgi_info_t *info, u8 index, u8 data) -{ - OUTB(0x3D4, index); - OUTB(0x3D5, data); -} - -static inline void OUT3CFB(xgi_info_t *info, u8 index, u8 data) -{ - OUTB(0x3CE, index); - OUTB(0x3CF, data); -} - -static inline u8 IN3C5B(xgi_info_t *info, u8 index) -{ - volatile u8 data=0; - OUTB(0x3C4, index); - data = INB(0x3C5); - return data; -} - -static inline u8 IN3X5B(xgi_info_t *info, u8 index) -{ - volatile u8 data=0; - OUTB(0x3D4, index); - data = INB(0x3D5); - return data; -} - -static inline u8 IN3CFB(xgi_info_t *info, u8 index) -{ - volatile u8 data=0; - OUTB(0x3CE, index); - data = INB(0x3CF); - return data; -} - -static inline void OUT3C5W(xgi_info_t *info, u8 index, u16 data) -{ - OUTB(0x3C4, index); - OUTB(0x3C5, data); -} - -static inline void OUT3X5W(xgi_info_t *info, u8 index, u16 data) -{ - OUTB(0x3D4, index); - OUTB(0x3D5, data); -} - -static inline void OUT3CFW(xgi_info_t *info, u8 index, u8 data) -{ - OUTB(0x3CE, index); - OUTB(0x3CF, data); -} - -static inline u8 IN3C5W(xgi_info_t *info, u8 index) -{ - volatile u8 data=0; - OUTB(0x3C4, index); - data = INB(0x3C5); - return data; -} - -static inline u8 IN3X5W(xgi_info_t *info, u8 index) -{ - volatile u8 data=0; - OUTB(0x3D4, index); - data = INB(0x3D5); - return data; -} - -static inline u8 IN3CFW(xgi_info_t *info, u8 index) -{ - volatile u8 data=0; - OUTB(0x3CE, index); - data = INB(0x3CF); - return data; -} - -static inline u8 readAttr(xgi_info_t *info, u8 index) -{ - INB(0x3DA); /* flip-flop to index */ - OUTB(0x3C0, index); - return INB(0x3C1); -} - -static inline void writeAttr(xgi_info_t *info, u8 index, u8 value) -{ - INB(0x3DA); /* flip-flop to index */ - OUTB(0x3C0, index); - OUTB(0x3C0, value); -} - -/* - * Graphic engine register (2d/3d) acessing interface - */ -static inline void WriteRegDWord(xgi_info_t *info, u32 addr, u32 data) -{ - /* Jong 05/25/2006 */ - XGI_INFO("Jong-WriteRegDWord()-Begin \n"); - XGI_INFO("Jong-WriteRegDWord()-info->mmio.vbase=0x%lx \n", info->mmio.vbase); - XGI_INFO("Jong-WriteRegDWord()-addr=0x%lx \n", addr); - XGI_INFO("Jong-WriteRegDWord()-data=0x%lx \n", data); - /* return; */ - - *(volatile u32*)(info->mmio.vbase + addr) = (data); - XGI_INFO("Jong-WriteRegDWord()-End \n"); -} - -static inline void WriteRegWord(xgi_info_t *info, u32 addr, u16 data) -{ - *(volatile u16*)(info->mmio.vbase + addr) = (data); -} - -static inline void WriteRegByte(xgi_info_t *info, u32 addr, u8 data) -{ - *(volatile u8*)(info->mmio.vbase + addr) = (data); -} - -static inline u32 ReadRegDWord(xgi_info_t *info, u32 addr) -{ - volatile u32 data; - data = *(volatile u32*)(info->mmio.vbase + addr); - return data; -} - -static inline u16 ReadRegWord(xgi_info_t *info, u32 addr) -{ - volatile u16 data; - data = *(volatile u16*)(info->mmio.vbase + addr); - return data; -} - -static inline u8 ReadRegByte(xgi_info_t *info, u32 addr) -{ - volatile u8 data; - data = *(volatile u8*)(info->mmio.vbase + addr); - return data; -} -#if 0 -extern void OUT3C5B(xgi_info_t *info, u8 index, u8 data); -extern void OUT3X5B(xgi_info_t *info, u8 index, u8 data); -extern void OUT3CFB(xgi_info_t *info, u8 index, u8 data); -extern u8 IN3C5B(xgi_info_t *info, u8 index); -extern u8 IN3X5B(xgi_info_t *info, u8 index); -extern u8 IN3CFB(xgi_info_t *info, u8 index); -extern void OUT3C5W(xgi_info_t *info, u8 index, u8 data); -extern void OUT3X5W(xgi_info_t *info, u8 index, u8 data); -extern void OUT3CFW(xgi_info_t *info, u8 index, u8 data); -extern u8 IN3C5W(xgi_info_t *info, u8 index); -extern u8 IN3X5W(xgi_info_t *info, u8 index); -extern u8 IN3CFW(xgi_info_t *info, u8 index); - -extern void WriteRegDWord(xgi_info_t *info, u32 addr, u32 data); -extern void WriteRegWord(xgi_info_t *info, u32 addr, u16 data); -extern void WriteRegByte(xgi_info_t *info, u32 addr, u8 data); -extern u32 ReadRegDWord(xgi_info_t *info, u32 addr); -extern u16 ReadRegWord(xgi_info_t *info, u32 addr); -extern u8 ReadRegByte(xgi_info_t *info, u32 addr); - -extern void EnableProtect(); -extern void DisableProtect(); -#endif - -#define Out(port, data) OUTB(port, data) -#define bOut(port, data) OUTB(port, data) -#define wOut(port, data) OUTW(port, data) -#define dwOut(port, data) OUTDW(port, data) - -#define Out3x5(index, data) OUT3X5B(info, index, data) -#define bOut3x5(index, data) OUT3X5B(info, index, data) -#define wOut3x5(index, data) OUT3X5W(info, index, data) - -#define Out3c5(index, data) OUT3C5B(info, index, data) -#define bOut3c5(index, data) OUT3C5B(info, index, data) -#define wOut3c5(index, data) OUT3C5W(info, index, data) - -#define Out3cf(index, data) OUT3CFB(info, index, data) -#define bOut3cf(index, data) OUT3CFB(info, index, data) -#define wOut3cf(index, data) OUT3CFW(info, index, data) - -#define In(port) INB(port) -#define bIn(port) INB(port) -#define wIn(port) INW(port) -#define dwIn(port) INDW(port) - -#define In3x5(index) IN3X5B(info, index) -#define bIn3x5(index) IN3X5B(info, index) -#define wIn3x5(index) IN3X5W(info, index) - -#define In3c5(index) IN3C5B(info, index) -#define bIn3c5(index) IN3C5B(info, index) -#define wIn3c5(index) IN3C5W(info, index) - -#define In3cf(index) IN3CFB(info, index) -#define bIn3cf(index) IN3CFB(info, index) -#define wIn3cf(index) IN3CFW(info, index) - -#define dwWriteReg(addr, data) WriteRegDWord(info, addr, data) -#define wWriteReg(addr, data) WriteRegWord(info, addr, data) -#define bWriteReg(addr, data) WriteRegByte(info, addr, data) -#define dwReadReg(addr) ReadRegDWord(info, addr) -#define wReadReg(addr) ReadRegWord(info, addr) -#define bReadReg(addr) ReadRegByte(info, addr) - -static inline void xgi_protect_all(xgi_info_t *info) -{ - OUTB(0x3C4, 0x11); - OUTB(0x3C5, 0x92); -} - -static inline void xgi_unprotect_all(xgi_info_t *info) -{ - OUTB(0x3C4, 0x11); - OUTB(0x3C5, 0x92); -} - -static inline void xgi_enable_mmio(xgi_info_t *info) -{ - u8 protect = 0; - - /* Unprotect registers */ - outb(0x11, 0x3C4); - protect = inb(0x3C5); - outb(0x92, 0x3C5); - - outb(0x3A, 0x3D4); - outb(inb(0x3D5) | 0x20, 0x3D5); - - /* Enable MMIO */ - outb(0x39, 0x3D4); - outb(inb(0x3D5) | 0x01, 0x3D5); - - OUTB(0x3C4, 0x11); - OUTB(0x3C5, protect); -} - -static inline void xgi_disable_mmio(xgi_info_t *info) -{ - u8 protect = 0; - - /* unprotect registers */ - OUTB(0x3C4, 0x11); - protect = INB(0x3C5); - OUTB(0x3C5, 0x92); - - /* Disable MMIO access */ - OUTB(0x3D4, 0x39); - OUTB(0x3D5, INB(0x3D5) & 0xFE); - - /* Protect registers */ - outb(0x11, 0x3C4); - outb(protect, 0x3C5); -} - -static inline void xgi_enable_ge(xgi_info_t *info) -{ - unsigned char bOld3cf2a = 0; - int wait = 0; - - // Enable GE - OUTW(0x3C4, 0x9211); - - // Save and close dynamic gating - bOld3cf2a = bIn3cf(0x2a); - bOut3cf(0x2a, bOld3cf2a & 0xfe); - - // Reset both 3D and 2D engine - bOut3x5(0x36, 0x84); - wait = 10; - while (wait--) - { - bIn(0x36); - } - bOut3x5(0x36, 0x94); - wait = 10; - while (wait--) - { - bIn(0x36); - } - bOut3x5(0x36, 0x84); - wait = 10; - while (wait--) - { - bIn(0x36); - } - // Enable 2D engine only - bOut3x5(0x36, 0x80); - - // Enable 2D+3D engine - bOut3x5(0x36, 0x84); - - // Restore dynamic gating - bOut3cf(0x2a, bOld3cf2a); -} - -static inline void xgi_disable_ge(xgi_info_t *info) -{ - int wait = 0; - - // Reset both 3D and 2D engine - bOut3x5(0x36, 0x84); - - wait = 10; - while (wait--) - { - bIn(0x36); - } - bOut3x5(0x36, 0x94); - - wait = 10; - while (wait--) - { - bIn(0x36); - } - bOut3x5(0x36, 0x84); - - wait = 10; - while (wait--) - { - bIn(0x36); - } - - // Disable 2D engine only - bOut3x5(0x36, 0); -} - -static inline void xgi_enable_dvi_interrupt(xgi_info_t *info) -{ - Out3cf(0x39, In3cf(0x39) & ~0x01); //Set 3cf.39 bit 0 to 0 - Out3cf(0x39, In3cf(0x39) | 0x01); //Set 3cf.39 bit 0 to 1 - Out3cf(0x39, In3cf(0x39) | 0x02); -} -static inline void xgi_disable_dvi_interrupt(xgi_info_t *info) -{ - Out3cf(0x39,In3cf(0x39) & ~0x02); -} - -static inline void xgi_enable_crt1_interrupt(xgi_info_t *info) -{ - Out3cf(0x3d,In3cf(0x3d) | 0x04); - Out3cf(0x3d,In3cf(0x3d) & ~0x04); - Out3cf(0x3d,In3cf(0x3d) | 0x08); -} - -static inline void xgi_disable_crt1_interrupt(xgi_info_t *info) -{ - Out3cf(0x3d,In3cf(0x3d) & ~0x08); -} - -#endif - + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#ifndef _XGI_REGS_H_ +#define _XGI_REGS_H_ + +#ifndef XGI_MMIO +#define XGI_MMIO 1 +#endif + +#if XGI_MMIO +#define OUTB(port, value) writeb(value, info->mmio.vbase + port) +#define INB(port) readb(info->mmio.vbase + port) +#define OUTW(port, value) writew(value, info->mmio.vbase + port) +#define INW(port) readw(info->mmio.vbase + port) +#define OUTDW(port, value) writel(value, info->mmio.vbase + port) +#define INDW(port) readl(info->mmio.vbase + port) +#else +#define OUTB(port, value) outb(value, port) +#define INB(port) inb(port) +#define OUTW(port, value) outw(value, port) +#define INW(port) inw(port) +#define OUTDW(port, value) outl(value, port) +#define INDW(port) inl(port) +#endif + +/* Hardware access functions */ +static inline void OUT3C5B(xgi_info_t * info, u8 index, u8 data) +{ + OUTB(0x3C4, index); + OUTB(0x3C5, data); +} + +static inline void OUT3X5B(xgi_info_t * info, u8 index, u8 data) +{ + OUTB(0x3D4, index); + OUTB(0x3D5, data); +} + +static inline void OUT3CFB(xgi_info_t * info, u8 index, u8 data) +{ + OUTB(0x3CE, index); + OUTB(0x3CF, data); +} + +static inline u8 IN3C5B(xgi_info_t * info, u8 index) +{ + volatile u8 data = 0; + OUTB(0x3C4, index); + data = INB(0x3C5); + return data; +} + +static inline u8 IN3X5B(xgi_info_t * info, u8 index) +{ + volatile u8 data = 0; + OUTB(0x3D4, index); + data = INB(0x3D5); + return data; +} + +static inline u8 IN3CFB(xgi_info_t * info, u8 index) +{ + volatile u8 data = 0; + OUTB(0x3CE, index); + data = INB(0x3CF); + return data; +} + +static inline void OUT3C5W(xgi_info_t * info, u8 index, u16 data) +{ + OUTB(0x3C4, index); + OUTB(0x3C5, data); +} + +static inline void OUT3X5W(xgi_info_t * info, u8 index, u16 data) +{ + OUTB(0x3D4, index); + OUTB(0x3D5, data); +} + +static inline void OUT3CFW(xgi_info_t * info, u8 index, u8 data) +{ + OUTB(0x3CE, index); + OUTB(0x3CF, data); +} + +static inline u8 IN3C5W(xgi_info_t * info, u8 index) +{ + volatile u8 data = 0; + OUTB(0x3C4, index); + data = INB(0x3C5); + return data; +} + +static inline u8 IN3X5W(xgi_info_t * info, u8 index) +{ + volatile u8 data = 0; + OUTB(0x3D4, index); + data = INB(0x3D5); + return data; +} + +static inline u8 IN3CFW(xgi_info_t * info, u8 index) +{ + volatile u8 data = 0; + OUTB(0x3CE, index); + data = INB(0x3CF); + return data; +} + +static inline u8 readAttr(xgi_info_t * info, u8 index) +{ + INB(0x3DA); /* flip-flop to index */ + OUTB(0x3C0, index); + return INB(0x3C1); +} + +static inline void writeAttr(xgi_info_t * info, u8 index, u8 value) +{ + INB(0x3DA); /* flip-flop to index */ + OUTB(0x3C0, index); + OUTB(0x3C0, value); +} + +/* + * Graphic engine register (2d/3d) acessing interface + */ +static inline void WriteRegDWord(xgi_info_t * info, u32 addr, u32 data) +{ + /* Jong 05/25/2006 */ + XGI_INFO("Jong-WriteRegDWord()-Begin \n"); + XGI_INFO("Jong-WriteRegDWord()-info->mmio.vbase=0x%lx \n", + info->mmio.vbase); + XGI_INFO("Jong-WriteRegDWord()-addr=0x%lx \n", addr); + XGI_INFO("Jong-WriteRegDWord()-data=0x%lx \n", data); + /* return; */ + + *(volatile u32 *)(info->mmio.vbase + addr) = (data); + XGI_INFO("Jong-WriteRegDWord()-End \n"); +} + +static inline void WriteRegWord(xgi_info_t * info, u32 addr, u16 data) +{ + *(volatile u16 *)(info->mmio.vbase + addr) = (data); +} + +static inline void WriteRegByte(xgi_info_t * info, u32 addr, u8 data) +{ + *(volatile u8 *)(info->mmio.vbase + addr) = (data); +} + +static inline u32 ReadRegDWord(xgi_info_t * info, u32 addr) +{ + volatile u32 data; + data = *(volatile u32 *)(info->mmio.vbase + addr); + return data; +} + +static inline u16 ReadRegWord(xgi_info_t * info, u32 addr) +{ + volatile u16 data; + data = *(volatile u16 *)(info->mmio.vbase + addr); + return data; +} + +static inline u8 ReadRegByte(xgi_info_t * info, u32 addr) +{ + volatile u8 data; + data = *(volatile u8 *)(info->mmio.vbase + addr); + return data; +} + +#if 0 +extern void OUT3C5B(xgi_info_t * info, u8 index, u8 data); +extern void OUT3X5B(xgi_info_t * info, u8 index, u8 data); +extern void OUT3CFB(xgi_info_t * info, u8 index, u8 data); +extern u8 IN3C5B(xgi_info_t * info, u8 index); +extern u8 IN3X5B(xgi_info_t * info, u8 index); +extern u8 IN3CFB(xgi_info_t * info, u8 index); +extern void OUT3C5W(xgi_info_t * info, u8 index, u8 data); +extern void OUT3X5W(xgi_info_t * info, u8 index, u8 data); +extern void OUT3CFW(xgi_info_t * info, u8 index, u8 data); +extern u8 IN3C5W(xgi_info_t * info, u8 index); +extern u8 IN3X5W(xgi_info_t * info, u8 index); +extern u8 IN3CFW(xgi_info_t * info, u8 index); + +extern void WriteRegDWord(xgi_info_t * info, u32 addr, u32 data); +extern void WriteRegWord(xgi_info_t * info, u32 addr, u16 data); +extern void WriteRegByte(xgi_info_t * info, u32 addr, u8 data); +extern u32 ReadRegDWord(xgi_info_t * info, u32 addr); +extern u16 ReadRegWord(xgi_info_t * info, u32 addr); +extern u8 ReadRegByte(xgi_info_t * info, u32 addr); + +extern void EnableProtect(); +extern void DisableProtect(); +#endif + +#define Out(port, data) OUTB(port, data) +#define bOut(port, data) OUTB(port, data) +#define wOut(port, data) OUTW(port, data) +#define dwOut(port, data) OUTDW(port, data) + +#define Out3x5(index, data) OUT3X5B(info, index, data) +#define bOut3x5(index, data) OUT3X5B(info, index, data) +#define wOut3x5(index, data) OUT3X5W(info, index, data) + +#define Out3c5(index, data) OUT3C5B(info, index, data) +#define bOut3c5(index, data) OUT3C5B(info, index, data) +#define wOut3c5(index, data) OUT3C5W(info, index, data) + +#define Out3cf(index, data) OUT3CFB(info, index, data) +#define bOut3cf(index, data) OUT3CFB(info, index, data) +#define wOut3cf(index, data) OUT3CFW(info, index, data) + +#define In(port) INB(port) +#define bIn(port) INB(port) +#define wIn(port) INW(port) +#define dwIn(port) INDW(port) + +#define In3x5(index) IN3X5B(info, index) +#define bIn3x5(index) IN3X5B(info, index) +#define wIn3x5(index) IN3X5W(info, index) + +#define In3c5(index) IN3C5B(info, index) +#define bIn3c5(index) IN3C5B(info, index) +#define wIn3c5(index) IN3C5W(info, index) + +#define In3cf(index) IN3CFB(info, index) +#define bIn3cf(index) IN3CFB(info, index) +#define wIn3cf(index) IN3CFW(info, index) + +#define dwWriteReg(addr, data) WriteRegDWord(info, addr, data) +#define wWriteReg(addr, data) WriteRegWord(info, addr, data) +#define bWriteReg(addr, data) WriteRegByte(info, addr, data) +#define dwReadReg(addr) ReadRegDWord(info, addr) +#define wReadReg(addr) ReadRegWord(info, addr) +#define bReadReg(addr) ReadRegByte(info, addr) + +static inline void xgi_protect_all(xgi_info_t * info) +{ + OUTB(0x3C4, 0x11); + OUTB(0x3C5, 0x92); +} + +static inline void xgi_unprotect_all(xgi_info_t * info) +{ + OUTB(0x3C4, 0x11); + OUTB(0x3C5, 0x92); +} + +static inline void xgi_enable_mmio(xgi_info_t * info) +{ + u8 protect = 0; + + /* Unprotect registers */ + outb(0x11, 0x3C4); + protect = inb(0x3C5); + outb(0x92, 0x3C5); + + outb(0x3A, 0x3D4); + outb(inb(0x3D5) | 0x20, 0x3D5); + + /* Enable MMIO */ + outb(0x39, 0x3D4); + outb(inb(0x3D5) | 0x01, 0x3D5); + + OUTB(0x3C4, 0x11); + OUTB(0x3C5, protect); +} + +static inline void xgi_disable_mmio(xgi_info_t * info) +{ + u8 protect = 0; + + /* unprotect registers */ + OUTB(0x3C4, 0x11); + protect = INB(0x3C5); + OUTB(0x3C5, 0x92); + + /* Disable MMIO access */ + OUTB(0x3D4, 0x39); + OUTB(0x3D5, INB(0x3D5) & 0xFE); + + /* Protect registers */ + outb(0x11, 0x3C4); + outb(protect, 0x3C5); +} + +static inline void xgi_enable_ge(xgi_info_t * info) +{ + unsigned char bOld3cf2a = 0; + int wait = 0; + + // Enable GE + OUTW(0x3C4, 0x9211); + + // Save and close dynamic gating + bOld3cf2a = bIn3cf(0x2a); + bOut3cf(0x2a, bOld3cf2a & 0xfe); + + // Reset both 3D and 2D engine + bOut3x5(0x36, 0x84); + wait = 10; + while (wait--) { + bIn(0x36); + } + bOut3x5(0x36, 0x94); + wait = 10; + while (wait--) { + bIn(0x36); + } + bOut3x5(0x36, 0x84); + wait = 10; + while (wait--) { + bIn(0x36); + } + // Enable 2D engine only + bOut3x5(0x36, 0x80); + + // Enable 2D+3D engine + bOut3x5(0x36, 0x84); + + // Restore dynamic gating + bOut3cf(0x2a, bOld3cf2a); +} + +static inline void xgi_disable_ge(xgi_info_t * info) +{ + int wait = 0; + + // Reset both 3D and 2D engine + bOut3x5(0x36, 0x84); + + wait = 10; + while (wait--) { + bIn(0x36); + } + bOut3x5(0x36, 0x94); + + wait = 10; + while (wait--) { + bIn(0x36); + } + bOut3x5(0x36, 0x84); + + wait = 10; + while (wait--) { + bIn(0x36); + } + + // Disable 2D engine only + bOut3x5(0x36, 0); +} + +static inline void xgi_enable_dvi_interrupt(xgi_info_t * info) +{ + Out3cf(0x39, In3cf(0x39) & ~0x01); //Set 3cf.39 bit 0 to 0 + Out3cf(0x39, In3cf(0x39) | 0x01); //Set 3cf.39 bit 0 to 1 + Out3cf(0x39, In3cf(0x39) | 0x02); +} +static inline void xgi_disable_dvi_interrupt(xgi_info_t * info) +{ + Out3cf(0x39, In3cf(0x39) & ~0x02); +} + +static inline void xgi_enable_crt1_interrupt(xgi_info_t * info) +{ + Out3cf(0x3d, In3cf(0x3d) | 0x04); + Out3cf(0x3d, In3cf(0x3d) & ~0x04); + Out3cf(0x3d, In3cf(0x3d) | 0x08); +} + +static inline void xgi_disable_crt1_interrupt(xgi_info_t * info) +{ + Out3cf(0x3d, In3cf(0x3d) & ~0x08); +} + +#endif diff --git a/linux-core/xgi_types.h b/linux-core/xgi_types.h index 24cb8f3c..65ec498b 100644 --- a/linux-core/xgi_types.h +++ b/linux-core/xgi_types.h @@ -1,68 +1,67 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ - -#ifndef _XGI_TYPES_H_ -#define _XGI_TYPES_H_ - -/**************************************************************************** - * Typedefs * - ***************************************************************************/ - -typedef unsigned char V8; /* "void": enumerated or multiple fields */ -typedef unsigned short V16; /* "void": enumerated or multiple fields */ -typedef unsigned char U8; /* 0 to 255 */ -typedef unsigned short U16; /* 0 to 65535 */ -typedef signed char S8; /* -128 to 127 */ -typedef signed short S16; /* -32768 to 32767 */ -typedef float F32; /* IEEE Single Precision (S1E8M23) */ -typedef double F64; /* IEEE Double Precision (S1E11M52) */ -typedef unsigned long BOOL; -/* - * mainly for 64-bit linux, where long is 64 bits - * and win9x, where int is 16 bit. - */ -#if defined(vxworks) -typedef unsigned int V32; /* "void": enumerated or multiple fields */ -typedef unsigned int U32; /* 0 to 4294967295 */ -typedef signed int S32; /* -2147483648 to 2147483647 */ -#else -typedef unsigned long V32; /* "void": enumerated or multiple fields */ -typedef unsigned long U32; /* 0 to 4294967295 */ -typedef signed long S32; /* -2147483648 to 2147483647 */ -#endif - -#ifndef TRUE -#define TRUE 1UL -#endif - -#ifndef FALSE -#define FALSE 0UL -#endif - -#endif - + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * * + * All Rights Reserved. * + * * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#ifndef _XGI_TYPES_H_ +#define _XGI_TYPES_H_ + +/**************************************************************************** + * Typedefs * + ***************************************************************************/ + +typedef unsigned char V8; /* "void": enumerated or multiple fields */ +typedef unsigned short V16; /* "void": enumerated or multiple fields */ +typedef unsigned char U8; /* 0 to 255 */ +typedef unsigned short U16; /* 0 to 65535 */ +typedef signed char S8; /* -128 to 127 */ +typedef signed short S16; /* -32768 to 32767 */ +typedef float F32; /* IEEE Single Precision (S1E8M23) */ +typedef double F64; /* IEEE Double Precision (S1E11M52) */ +typedef unsigned long BOOL; +/* + * mainly for 64-bit linux, where long is 64 bits + * and win9x, where int is 16 bit. + */ +#if defined(vxworks) +typedef unsigned int V32; /* "void": enumerated or multiple fields */ +typedef unsigned int U32; /* 0 to 4294967295 */ +typedef signed int S32; /* -2147483648 to 2147483647 */ +#else +typedef unsigned long V32; /* "void": enumerated or multiple fields */ +typedef unsigned long U32; /* 0 to 4294967295 */ +typedef signed long S32; /* -2147483648 to 2147483647 */ +#endif + +#ifndef TRUE +#define TRUE 1UL +#endif + +#ifndef FALSE +#define FALSE 0UL +#endif + +#endif -- cgit v1.2.3 From ec9e494eb99d409a7e1e97bb6c5f71e9bb5a4486 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 26 Jun 2007 13:15:22 -0700 Subject: Gut support for pre-2.6 kernels. --- linux-core/xgi_linux.h | 96 +++----------------------------------------------- 1 file changed, 4 insertions(+), 92 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_linux.h b/linux-core/xgi_linux.h index 67c1af82..77660ee0 100644 --- a/linux-core/xgi_linux.h +++ b/linux-core/xgi_linux.h @@ -35,20 +35,8 @@ #include #endif -#ifndef KERNEL_VERSION /* pre-2.1.90 didn't have it */ -#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) -#endif - -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) -# error "This driver does not support pre-2.4 kernels!" -#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) -#define KERNEL_2_4 -#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) -# error "This driver does not support 2.5 kernels!" -#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 7, 0) -#define KERNEL_2_6 -#else -# error "This driver does not support development kernels!" +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) +# error "This driver does not support pre-2.6 kernels!" #endif #if defined (CONFIG_SMP) && !defined (__SMP__) @@ -59,10 +47,6 @@ #define MODVERSIONS #endif -#if defined (MODVERSIONS) && !defined (KERNEL_2_6) -#include -#endif - #include /* printk */ #include @@ -88,12 +72,10 @@ #define XGI_SCAN_PROCESS(p) for_each_task(p) #endif -#ifdef KERNEL_2_6 #include /* module_param() */ #include /* kernel_locked */ #include /* flush_tlb(), flush_tlb_all() */ #include /* page table entry lookup */ -#endif #include /* pci_find_class, etc */ #include /* tasklets, interrupt helpers */ @@ -141,13 +123,8 @@ #endif #ifndef MAX_ORDER -#ifdef KERNEL_2_4 -#define MAX_ORDER 10 -#endif -#ifdef KERNEL_2_6 #define MAX_ORDER 11 #endif -#endif #ifndef module_init #define module_init(x) int init_module(void) { return x(); } @@ -171,39 +148,20 @@ typedef void irqreturn_t; pos = pos->next, prefetch(pos->next)) #endif -#ifdef KERNEL_2_4 -#define XGI_PCI_FOR_EACH_DEV(dev) pci_for_each_dev(dev) -#endif -#ifdef KERNEL_2_6 extern struct list_head pci_devices; /* list of all devices */ #define XGI_PCI_FOR_EACH_DEV(dev) \ for(dev = pci_dev_g(pci_devices.next); dev != pci_dev_g(&pci_devices); dev = pci_dev_g(dev->global_list.next)) -#endif /* * the following macro causes problems when used in the same module * as module_param(); undef it so we don't accidentally mix the two */ -#if defined (KERNEL_2_6) #undef MODULE_PARM -#endif #ifdef EXPORT_NO_SYMBOLS EXPORT_NO_SYMBOLS; #endif -#if defined (KERNEL_2_4) -#define XGI_IS_SUSER() suser() -#define XGI_PCI_DEVICE_NAME(dev) ((dev)->name) -#define XGI_NUM_CPUS() smp_num_cpus -#define XGI_CLI() __cli() -#define XGI_SAVE_FLAGS(eflags) __save_flags(eflags) -#define XGI_RESTORE_FLAGS(eflags) __restore_flags(eflags) -#define XGI_MAY_SLEEP() (!in_interrupt()) -#define XGI_MODULE_PARAMETER(x) MODULE_PARM(x, "i") -#endif - -#if defined (KERNEL_2_6) #define XGI_IS_SUSER() capable(CAP_SYS_ADMIN) #define XGI_PCI_DEVICE_NAME(dev) ((dev)->pretty_name) #define XGI_NUM_CPUS() num_online_cpus() @@ -212,7 +170,7 @@ EXPORT_NO_SYMBOLS; #define XGI_RESTORE_FLAGS(eflags) local_irq_restore(eflags) #define XGI_MAY_SLEEP() (!in_interrupt() && !in_atomic()) #define XGI_MODULE_PARAMETER(x) module_param(x, int, 0) -#endif + /* Earlier 2.4.x kernels don't have pci_disable_device() */ #ifdef XGI_PCI_DISABLE_DEVICE_PRESENT @@ -255,7 +213,7 @@ EXPORT_NO_SYMBOLS; * model is not sufficient for full acpi support. it may work in some cases, * but not enough for us to officially support this configuration. */ -#if defined(CONFIG_ACPI) && defined(KERNEL_2_6) +#if defined(CONFIG_ACPI) #define XGI_PM_SUPPORT_ACPI #endif @@ -264,7 +222,6 @@ EXPORT_NO_SYMBOLS; #endif #if defined(CONFIG_DEVFS_FS) -#if defined(KERNEL_2_6) typedef void *devfs_handle_t; #define XGI_DEVFS_REGISTER(_name, _minor) \ ({ \ @@ -281,39 +238,10 @@ typedef void *devfs_handle_t; */ #define XGI_DEVFS_REMOVE_CONTROL() devfs_remove("xgi_ctl") #define XGI_DEVFS_REMOVE_DEVICE(i) devfs_remove("xgi") -#else // defined(KERNEL_2_4) -#define XGI_DEVFS_REGISTER(_name, _minor) \ - ({ \ - devfs_handle_t __handle = devfs_register(NULL, _name, DEVFS_FL_AUTO_DEVNUM, \ - XGI_DEV_MAJOR, _minor, \ - S_IFCHR | S_IRUGO | S_IWUGO, &xgi_fops, NULL); \ - __handle; \ - }) - -#define XGI_DEVFS_REMOVE_DEVICE(i) \ - ({ \ - if (xgi_devfs_handles[i] != NULL) \ - { \ - devfs_unregister(xgi_devfs_handles[i]); \ - } \ - }) -#define XGI_DEVFS_REMOVE_CONTROL() \ - ({ \ - if (xgi_devfs_handles[0] != NULL) \ - { \ - devfs_unregister(xgi_devfs_handles[0]); \ - } \ - }) -#endif /* defined(KERNEL_2_4) */ #endif /* defined(CONFIG_DEVFS_FS) */ -#if defined(CONFIG_DEVFS_FS) && !defined(KERNEL_2_6) -#define XGI_REGISTER_CHRDEV(x...) devfs_register_chrdev(x) -#define XGI_UNREGISTER_CHRDEV(x...) devfs_unregister_chrdev(x) -#else #define XGI_REGISTER_CHRDEV(x...) register_chrdev(x) #define XGI_UNREGISTER_CHRDEV(x...) unregister_chrdev(x) -#endif #if defined(XGI_REMAP_PFN_RANGE_PRESENT) #define XGI_REMAP_PAGE_RANGE(from, offset, x...) \ @@ -519,17 +447,6 @@ static inline void XGI_SET_PAGE_ATTRIB_CACHED(xgi_pte_t * page_ptr) #define XGI_SET_PAGE_ATTRIB_CACHED(page_list) #endif -#ifdef KERNEL_2_4 -#define XGI_INC_PAGE_COUNT(page) atomic_inc(&(page)->count) -#define XGI_DEC_PAGE_COUNT(page) atomic_dec(&(page)->count) -#define XGI_PAGE_COUNT(page) atomic_read(&(page)->count) -#define XGI_SET_PAGE_COUNT(page,v) atomic_set(&(page)->count, v) - -#define XGILockPage(page) set_bit(PG_locked, &(page)->flags) -#define XGIUnlockPage(page) clear_bit(PG_locked, &(page)->flags) -#endif - -#ifdef KERNEL_2_6 /* add for SUSE 9, Jill*/ #if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 4) #define XGI_INC_PAGE_COUNT(page) atomic_inc(&(page)->count) @@ -544,7 +461,6 @@ static inline void XGI_SET_PAGE_ATTRIB_CACHED(xgi_pte_t * page_ptr) #endif #define XGILockPage(page) SetPageLocked(page) #define XGIUnlockPage(page) ClearPageLocked(page) -#endif /* * hide a pointer to struct xgi_info_t in a file-private info @@ -564,11 +480,7 @@ typedef struct { /* for the card devices */ #define XGI_INFO_FROM_FP(filp) (XGI_GET_FP(filp)->info) -#ifdef KERNEL_2_0 -#define INODE_FROM_FP(filp) ((filp)->f_inode) -#else #define INODE_FROM_FP(filp) ((filp)->f_dentry->d_inode) -#endif #define XGI_ATOMIC_SET(data,val) atomic_set(&(data), (val)) #define XGI_ATOMIC_INC(data) atomic_inc(&(data)) -- cgit v1.2.3 From 7a053306a9f8152462fda521e1a8322ac2bdf9fd Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 26 Jun 2007 13:16:04 -0700 Subject: linux/config.h is deprecated or gone. --- linux-core/xgi_linux.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_linux.h b/linux-core/xgi_linux.h index 77660ee0..28349470 100644 --- a/linux-core/xgi_linux.h +++ b/linux-core/xgi_linux.h @@ -29,8 +29,6 @@ #ifndef _XGI_LINUX_H_ #define _XGI_LINUX_H_ -#include - #ifndef LINUX_VERSION_CODE #include #endif -- cgit v1.2.3 From 47bf6239aaefb977cc17e421af273c3278eb127c Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 26 Jun 2007 13:20:15 -0700 Subject: Clean up compile-time kernel feature detection. --- linux-core/xgi_linux.h | 22 ++++++---------------- 1 file changed, 6 insertions(+), 16 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_linux.h b/linux-core/xgi_linux.h index 28349470..8cf304c7 100644 --- a/linux-core/xgi_linux.h +++ b/linux-core/xgi_linux.h @@ -37,6 +37,12 @@ # error "This driver does not support pre-2.6 kernels!" #endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10) +# define XGI_REMAP_PFN_RANGE_PRESENT +#else +# define XGI_REMAP_PAGE_RANGE_5 +#endif + #if defined (CONFIG_SMP) && !defined (__SMP__) #define __SMP__ #endif @@ -170,12 +176,7 @@ EXPORT_NO_SYMBOLS; #define XGI_MODULE_PARAMETER(x) module_param(x, int, 0) -/* Earlier 2.4.x kernels don't have pci_disable_device() */ -#ifdef XGI_PCI_DISABLE_DEVICE_PRESENT #define XGI_PCI_DISABLE_DEVICE(dev) pci_disable_device(dev) -#else -#define XGI_PCI_DISABLE_DEVICE(dev) -#endif /* common defines */ #define GET_MODULE_SYMBOL(mod,sym) (const void *) inter_module_get(sym) @@ -195,15 +196,9 @@ EXPORT_NO_SYMBOLS; #define XGI_PCI_SLOT_NUMBER(dev) PCI_SLOT((dev)->devfn) #ifdef XGI_PCI_GET_CLASS_PRESENT -#define XGI_PCI_DEV_PUT(dev) pci_dev_put(dev) #define XGI_PCI_GET_DEVICE(vendor,device,from) pci_get_device(vendor,device,from) -#define XGI_PCI_GET_SLOT(bus,devfn) pci_get_slot(pci_find_bus(0,bus),devfn) -#define XGI_PCI_GET_CLASS(class,from) pci_get_class(class,from) #else -#define XGI_PCI_DEV_PUT(dev) #define XGI_PCI_GET_DEVICE(vendor,device,from) pci_find_device(vendor,device,from) -#define XGI_PCI_GET_SLOT(bus,devfn) pci_find_slot(bus,devfn) -#define XGI_PCI_GET_CLASS(class,from) pci_find_class(class,from) #endif /* @@ -429,7 +424,6 @@ typedef struct xgi_pte_s { * 2.4.20 is the first kernel to address it properly. The * page_attr API provides the means to solve the problem. */ -#if defined(XGI_CHANGE_PAGE_ATTR_PRESENT) static inline void XGI_SET_PAGE_ATTRIB_UNCACHED(xgi_pte_t * page_ptr) { struct page *page = virt_to_page(__va(page_ptr->phys_addr)); @@ -440,10 +434,6 @@ static inline void XGI_SET_PAGE_ATTRIB_CACHED(xgi_pte_t * page_ptr) struct page *page = virt_to_page(__va(page_ptr->phys_addr)); change_page_attr(page, 1, PAGE_KERNEL); } -#else -#define XGI_SET_PAGE_ATTRIB_UNCACHED(page_list) -#define XGI_SET_PAGE_ATTRIB_CACHED(page_list) -#endif /* add for SUSE 9, Jill*/ #if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 4) -- cgit v1.2.3 From 3a776fa01e61c1dc40a0a1803a80c98bf7e77164 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 26 Jun 2007 13:26:10 -0700 Subject: Add XGI driver to Makefiles. --- linux-core/Makefile | 9 ++++++++- linux-core/Makefile.kernel | 2 ++ 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/Makefile b/linux-core/Makefile index 1758777c..2052459d 100644 --- a/linux-core/Makefile +++ b/linux-core/Makefile @@ -58,7 +58,7 @@ endif # Modules for all architectures MODULE_LIST := drm.o tdfx.o r128.o radeon.o mga.o sis.o savage.o via.o \ - mach64.o nv.o nouveau.o + mach64.o nv.o nouveau.o xgi.o # Modules only for ix86 architectures ifneq (,$(findstring 86,$(MACHINE))) @@ -91,6 +91,8 @@ MACH64HEADERS = mach64_drv.h mach64_drm.h $(DRMHEADERS) NVHEADERS = nv_drv.h $(DRMHEADERS) FFBHEADERS = ffb_drv.h $(DRMHEADERS) NOUVEAUHEADERS = nouveau_drv.h nouveau_drm.h nouveau_reg.h $(DRMHEADERS) +XGIHEADERS = xgi_cmdlist.h xgi_drv.h xgi_fb.h xgi_linux.h xgi_misc.h \ + xgi_pcie.h xgi_regs.h xgi_types.h PROGS = dristat drmstat @@ -284,6 +286,7 @@ CONFIG_DRM_VIA := n CONFIG_DRM_MACH64 := n CONFIG_DRM_NV := n CONFIG_DRM_NOUVEAU := n +CONFIG_DRM_XGI := n # Enable module builds for the modules requested/supported. @@ -320,6 +323,9 @@ endif ifneq (,$(findstring nouveau,$(DRM_MODULES))) CONFIG_DRM_NOUVEAU := m endif +ifneq (,$(findstring xgi,$(DRM_MODULES))) +CONFIG_DRM_XGI := m +endif # These require AGP support @@ -347,6 +353,7 @@ $(via-objs): $(VIAHEADERS) $(mach64-objs): $(MACH64HEADERS) $(nv-objs): $(NVHEADERS) $(nouveau-objs): $(NOUVEAUHEADERS) +$(xgi-objs): $(XGIHEADERS) endif diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 6f5b021b..d9865f5a 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -35,6 +35,7 @@ via-objs := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o \ via_video.o via_dmablit.o via_fence.o via_buffer.o mach64-objs := mach64_drv.o mach64_dma.o mach64_irq.o mach64_state.o nv-objs := nv_drv.o +xgi-objs := xgi_cmdlist.o xgi_drv.o xgi_fb.o xgi_misc.o xgi_pcie.o ifeq ($(CONFIG_COMPAT),y) drm-objs += drm_ioc32.o @@ -59,3 +60,4 @@ obj-$(CONFIG_DRM_VIA) += via.o obj-$(CONFIG_DRM_MACH64)+= mach64.o obj-$(CONFIG_DRM_NV) += nv.o obj-$(CONFIG_DRM_NOUVEAU) += nouveau.o +obj-$(CONFIG_DRM_XGI) += xgi.o \ No newline at end of file -- cgit v1.2.3 From 3547fbda63925217a5be24de5d5abec3b53d3fe1 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 26 Jun 2007 13:29:28 -0700 Subject: Revert over-zealous change from previous commit. --- linux-core/xgi_linux.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'linux-core') diff --git a/linux-core/xgi_linux.h b/linux-core/xgi_linux.h index 8cf304c7..465feb3c 100644 --- a/linux-core/xgi_linux.h +++ b/linux-core/xgi_linux.h @@ -195,9 +195,12 @@ EXPORT_NO_SYMBOLS; #define XGI_PCI_BUS_NUMBER(dev) (dev)->bus->number #define XGI_PCI_SLOT_NUMBER(dev) PCI_SLOT((dev)->devfn) +#define XGI_PCI_GET_CLASS_PRESENT #ifdef XGI_PCI_GET_CLASS_PRESENT +#define XGI_PCI_DEV_PUT(dev) pci_dev_put(dev) #define XGI_PCI_GET_DEVICE(vendor,device,from) pci_get_device(vendor,device,from) #else +#define XGI_PCI_DEV_PUT(dev) #define XGI_PCI_GET_DEVICE(vendor,device,from) pci_find_device(vendor,device,from) #endif -- cgit v1.2.3 From b9ef1467fed9e96c5e7bd453d01511f8ce98583c Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 26 Jun 2007 13:39:01 -0700 Subject: Clean up mixed declarations and code. --- linux-core/xgi_cmdlist.c | 3 ++- linux-core/xgi_misc.c | 17 ++++++++++++----- 2 files changed, 14 insertions(+), 6 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index e00ea228..99be2145 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -196,9 +196,10 @@ void xgi_submit_cmdlist(xgi_info_t * info, xgi_cmd_info_t * pCmdInfo) /* Jong 06/13/2006; remove marked for system hang test */ /* xgi_waitfor_pci_idle(info); */ } else { + U32 *lastBatchVirtAddr; + XGI_INFO ("Jong-xgi_submit_cmdlist-s_cmdring._lastBatchStartAddr != 0 \n"); - U32 *lastBatchVirtAddr; /* Jong 05/25/2006 */ /* return; */ diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 61e40594..06cf0160 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -200,13 +200,15 @@ BOOL xgi_ge_irq_handler(xgi_info_t * info) STALL_INTERRUPT_RESET_THRESHOLD) { continoue_int_count = 0; } else if (continoue_int_count >= 3) { + int time_out; + continoue_int_count = 0; // GE Hung up, need reset. XGI_INFO("Reset GE!\n"); *(mmio_vbase + 0xb057) = 8; - int time_out = 0xffff; + time_out = 0xffff; while (0 != (ge_3d_status[0x00] & 0xf0000000)) { @@ -214,6 +216,11 @@ BOOL xgi_ge_irq_handler(xgi_info_t * info) ((--time_out) & 0xfff)) ; if (0 == time_out) { + U8 old_3ce; + U8 old_3cf; + U8 old_index; + U8 old_36; + XGI_INFO ("Can not reset back 0x%lx!\n", ge_3d_status @@ -222,24 +229,24 @@ BOOL xgi_ge_irq_handler(xgi_info_t * info) 0xb057) = 0; // Have to use 3x5.36 to reset. // Save and close dynamic gating - U8 old_3ce = + old_3ce = *(mmio_vbase + 0x3ce); *(mmio_vbase + 0x3ce) = 0x2a; - U8 old_3cf = + old_3cf = *(mmio_vbase + 0x3cf); *(mmio_vbase + 0x3cf) = old_3cf & 0xfe; // Reset GE - U8 old_index = + old_index = *(mmio_vbase + 0x3d4); *(mmio_vbase + 0x3d4) = 0x36; - U8 old_36 = + old_36 = *(mmio_vbase + 0x3d5); *(mmio_vbase + -- cgit v1.2.3 From 8cee7dca95bc2114eb90640cf83ac87c29243683 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 26 Jun 2007 13:46:36 -0700 Subject: Clean up warnings about unused variables and functions. --- linux-core/xgi_drv.c | 11 ----------- linux-core/xgi_misc.c | 34 ++-------------------------------- linux-core/xgi_pcie.c | 2 ++ 3 files changed, 4 insertions(+), 43 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 0c37d00e..75204283 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -153,11 +153,6 @@ static inline void xgi_check_pci_config(xgi_info_t * info, int line) pci_write_config_word(info->dev, PCI_COMMAND, cmd); } -static int xgi_post_vbios(xgi_ioctl_post_vbios_t * info) -{ - return 1; -} - /* * struct pci_device_id { * unsigned int vendor, device; // Vendor and device ID or PCI_ANY_ID @@ -1484,14 +1479,8 @@ static int __init xgi_init_module(void) void __exit xgi_exit_module(void) { int i; - xgi_info_t *info, *max_devices; #ifdef CONFIG_DEVFS_FS - /* - XGI_DEVFS_REMOVE_CONTROL(); - for (i = 0; i < XGI_MAX_DEVICES; i++) - XGI_DEVFS_REMOVE_DEVICE(i); - */ XGI_DEVFS_REMOVE_DEVICE(xgi_num_devices); #endif diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 06cf0160..8d0e81b6 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -131,7 +131,7 @@ BOOL xgi_ge_irq_handler(xgi_info_t * info) BOOL is_wrong_signal = FALSE; static U32 last_int_tick_low, last_int_tick_high; - static U32 new_int_tick_low, new_int_tick_high; + static U32 new_int_tick_low; static U32 continoue_int_count = 0; // OE II is busy. while (old_ge_status & 0x001c0000) { @@ -290,9 +290,6 @@ BOOL xgi_ge_irq_handler(xgi_info_t * info) BOOL xgi_crt_irq_handler(xgi_info_t * info) { BOOL ret = FALSE; - U8 *mmio_vbase = info->mmio.vbase; - U32 device_status = 0; - U32 hw_status = 0; U8 save_3ce = bReadReg(0x3ce); if (bIn3cf(0x37) & 0x01) // CRT1 interrupt just happened @@ -303,15 +300,6 @@ BOOL xgi_crt_irq_handler(xgi_info_t * info) // What happened? op3cf_37 = bIn3cf(0x37); -#if 0 - if (op3cf_37 & 0x04) - device_status |= GDEVST_CONNECT; - else - device_status &= ~GDEVST_CONNECT; - - device_status |= GDEVST_DEVICE_CHANGED; - hw_status |= HWST_DEVICE_CHANGED; -#endif // Clear CRT interrupt op3cf_3d = bIn3cf(0x3d); bOut3cf(0x3d, (op3cf_3d | 0x04)); @@ -326,9 +314,6 @@ BOOL xgi_crt_irq_handler(xgi_info_t * info) BOOL xgi_dvi_irq_handler(xgi_info_t * info) { BOOL ret = FALSE; - U8 *mmio_vbase = info->mmio.vbase; - U32 device_status = 0; - U32 hw_status = 0; U8 save_3ce = bReadReg(0x3ce); if (bIn3cf(0x38) & 0x20) // DVI interrupt just happened @@ -340,28 +325,13 @@ BOOL xgi_dvi_irq_handler(xgi_info_t * info) // What happened? op3cf_37 = bIn3cf(0x37); -#if 0 - //Also update our internal flag - if (op3cf_37 & 0x10) // Second Monitor plugged In - { - device_status |= GDEVST_CONNECT; - //Because currenly we cannot determine if DVI digital - //or DVI analog is connected according to DVI interrupt - //We should still call BIOS to check it when utility ask us - device_status &= ~GDEVST_CHECKED; - } else { - device_status &= ~GDEVST_CONNECT; - } -#endif + //Notify BIOS that DVI plug/unplug happened op3x5_5a = bIn3x5(0x5a); bOut3x5(0x5a, op3x5_5a & 0xf7); bWriteReg(0x3d4, save_3x4); - //device_status |= GDEVST_DEVICE_CHANGED; - //hw_status |= HWST_DEVICE_CHANGED; - // Clear DVI interrupt op3cf_39 = bIn3cf(0x39); bOut3c5(0x39, (op3cf_39 & ~0x01)); //Set 3cf.39 bit 0 to 0 diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 9457770a..8b024e4a 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -350,8 +350,10 @@ void xgi_pcie_heap_check(void) struct list_head *useList, *temp; xgi_pcie_block_t *block; unsigned int ownerIndex; +#ifdef XGI_DEBUG char *ownerStr[6] = { "2D", "3D", "3D_CMD", "3D_SCR", "3D_TEX", "ELSE" }; +#endif if (xgi_pcie_heap) { useList = &xgi_pcie_heap->used_list; -- cgit v1.2.3 From 695599f18d907bb277805581bbe208b0e083e7d9 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Sun, 24 Jun 2007 19:03:35 +1000 Subject: nouveau: Nuke DMA_OBJECT_INIT ioctl (bumps interface to 0.0.7) For various reasons, this ioctl was a bad idea. At channel creation we now automatically create DMA objects covering available VRAM and GART memory, where the client used to do this themselves. However, there is still a need to be able to create DMA objects pointing at specific areas of memory (ie. notifiers). Each channel is now allocated a small amount of memory from which a client can suballocate things (such as notifiers), and have a DMA object created which covers the suballocated area. The NOTIFIER_ALLOC ioctl exposes this functionality. --- linux-core/Makefile.kernel | 2 +- linux-core/nouveau_notifier.c | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 120000 linux-core/nouveau_notifier.c (limited to 'linux-core') diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 9427a04b..6ab17a49 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -21,7 +21,7 @@ i810-objs := i810_drv.o i810_dma.o i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \ i915_buffer.o nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ - nouveau_object.o nouveau_irq.o \ + nouveau_object.o nouveau_irq.o nouveau_notifier.o \ nv04_timer.o \ nv04_mc.o nv40_mc.o \ nv04_fb.o nv10_fb.o nv40_fb.o \ diff --git a/linux-core/nouveau_notifier.c b/linux-core/nouveau_notifier.c new file mode 120000 index 00000000..285469c5 --- /dev/null +++ b/linux-core/nouveau_notifier.c @@ -0,0 +1 @@ +../shared-core/nouveau_notifier.c \ No newline at end of file -- cgit v1.2.3 From ce0d528d3ca78348a7c1ad7c402757824fb6cf95 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Sun, 24 Jun 2007 20:49:19 +1000 Subject: nouveau/nv50: skeletal backend --- linux-core/Makefile.kernel | 6 +++--- linux-core/nv50_fifo.c | 1 + linux-core/nv50_graph.c | 1 + linux-core/nv50_mc.c | 1 + 4 files changed, 6 insertions(+), 3 deletions(-) create mode 120000 linux-core/nv50_fifo.c create mode 120000 linux-core/nv50_graph.c create mode 120000 linux-core/nv50_mc.c (limited to 'linux-core') diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 6ab17a49..478c4df0 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -23,11 +23,11 @@ i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \ nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ nouveau_object.o nouveau_irq.o nouveau_notifier.o \ nv04_timer.o \ - nv04_mc.o nv40_mc.o \ + nv04_mc.o nv40_mc.o nv50_mc.o \ nv04_fb.o nv10_fb.o nv40_fb.o \ - nv04_fifo.o nv10_fifo.o nv40_fifo.o \ + nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \ nv04_graph.o nv10_graph.o nv20_graph.o nv30_graph.o \ - nv40_graph.o + nv40_graph.o nv50_graph.o radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o sis-objs := sis_drv.o sis_mm.o ffb-objs := ffb_drv.o ffb_context.o diff --git a/linux-core/nv50_fifo.c b/linux-core/nv50_fifo.c new file mode 120000 index 00000000..4c9990a9 --- /dev/null +++ b/linux-core/nv50_fifo.c @@ -0,0 +1 @@ +../shared-core/nv50_fifo.c \ No newline at end of file diff --git a/linux-core/nv50_graph.c b/linux-core/nv50_graph.c new file mode 120000 index 00000000..03f69e68 --- /dev/null +++ b/linux-core/nv50_graph.c @@ -0,0 +1 @@ +../shared-core/nv50_graph.c \ No newline at end of file diff --git a/linux-core/nv50_mc.c b/linux-core/nv50_mc.c new file mode 120000 index 00000000..f4bb369e --- /dev/null +++ b/linux-core/nv50_mc.c @@ -0,0 +1 @@ +../shared-core/nv50_mc.c \ No newline at end of file -- cgit v1.2.3 From 11ffe4632a097e3d579d084634eeccc63348249b Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 28 Jun 2007 22:20:13 -0700 Subject: Convert comment header of xgi_find_pcie_virt to kernel doc format. --- linux-core/xgi_pcie.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 8b024e4a..b29b083d 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -938,12 +938,13 @@ void *xgi_find_pcie_block(xgi_info_t * info, unsigned long address) return NULL; } -/* - address -- GE HW address - return -- CPU virtual address - - assume the CPU VAddr is continuous in not the same block -*/ +/** + * xgi_find_pcie_virt + * @address: GE HW address + * + * Returns CPU virtual address. Assumes the CPU VAddr is continuous in not + * the same block + */ void *xgi_find_pcie_virt(xgi_info_t * info, unsigned long address) { struct list_head *used_list; -- cgit v1.2.3 From 9c85fb866dc7954092b7ffd0ca9f76eb5354ace8 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 28 Jun 2007 22:26:39 -0700 Subject: Clean up debug log messages in xgi_find_pcie_block. --- linux-core/xgi_pcie.c | 36 +++++++++++++----------------------- 1 file changed, 13 insertions(+), 23 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index b29b083d..b449a5fd 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -953,22 +953,18 @@ void *xgi_find_pcie_virt(xgi_info_t * info, unsigned long address) unsigned long loc_in_pagetable; void *ret; - XGI_INFO("Jong_05292006-xgi_find_pcie_virt-Begin\n"); - used_list = xgi_pcie_heap->used_list.next; - XGI_INFO("Jong_05292006-used_list=%ul\n", used_list); - offset_in_page = address & (PAGE_SIZE - 1); - XGI_INFO - ("Jong_05292006-address=0x%px, PAGE_SIZE-1=%ul, offset_in_page=%ul\n", - address, PAGE_SIZE - 1, offset_in_page); + + XGI_INFO("begin (used_list = 0x%p, address = 0x%lx, " + "PAGE_SIZE - 1 = %lu, offset_in_page = %lu)\n", + used_list, address, PAGE_SIZE - 1, offset_in_page); while (used_list != &xgi_pcie_heap->used_list) { block = list_entry(used_list, struct xgi_pcie_block_s, list); - XGI_INFO("Jong_05292006-block=0x%px\n", block); - XGI_INFO("Jong_05292006-block->hw_addr=0x%px\n", - block->hw_addr); - XGI_INFO("Jong_05292006- block->size=%ul\n", block->size); + + XGI_INFO("block = 0x%p (hw_addr = 0x%lx, size=%lu)\n", + block, block->hw_addr, block->size); if ((address >= block->hw_addr) && (address < (block->hw_addr + block->size))) { @@ -978,21 +974,15 @@ void *xgi_find_pcie_virt(xgi_info_t * info, unsigned long address) (void *)(block->page_table[loc_in_pagetable]. virt_addr + offset_in_page); - XGI_INFO("Jong_05292006-PAGE_SHIFT=%d\n", PAGE_SHIFT); - XGI_INFO("Jong_05292006-loc_in_pagetable=0x%px\n", - loc_in_pagetable); - XGI_INFO - ("Jong_05292006-block->page_table[loc_in_pagetable].virt_addr=0x%px\n", - block->page_table[loc_in_pagetable].virt_addr); - XGI_INFO("Jong_05292006-offset_in_page=%d\n", - offset_in_page); - XGI_INFO("Jong_05292006-return(virt_addr)=0x%px\n", - ret); + XGI_INFO("PAGE_SHIFT = %d\n", PAGE_SHIFT); + XGI_INFO("block->page_table[0x%lx].virt_addr = 0x%lx\n", + loc_in_pagetable, + block->page_table[loc_in_pagetable].virt_addr); + XGI_INFO("return 0x%p\n", ret); return ret; } else { - XGI_INFO - ("Jong_05292006-used_list = used_list->next;\n"); + XGI_INFO("used_list = used_list->next;\n"); used_list = used_list->next; } } -- cgit v1.2.3 From 8fa24c53f5851a2d3ad2da31ee56a4fd5abbd543 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 28 Jun 2007 22:32:11 -0700 Subject: Minor clean up of variable declarations in xgi_find_pcie_virt. --- linux-core/xgi_pcie.c | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index b449a5fd..d9da30e8 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -947,30 +947,25 @@ void *xgi_find_pcie_block(xgi_info_t * info, unsigned long address) */ void *xgi_find_pcie_virt(xgi_info_t * info, unsigned long address) { - struct list_head *used_list; - xgi_pcie_block_t *block; - unsigned long offset_in_page; - unsigned long loc_in_pagetable; - void *ret; - - used_list = xgi_pcie_heap->used_list.next; - offset_in_page = address & (PAGE_SIZE - 1); + struct list_head *used_list = xgi_pcie_heap->used_list.next; + const unsigned long offset_in_page = address & (PAGE_SIZE - 1); XGI_INFO("begin (used_list = 0x%p, address = 0x%lx, " "PAGE_SIZE - 1 = %lu, offset_in_page = %lu)\n", used_list, address, PAGE_SIZE - 1, offset_in_page); while (used_list != &xgi_pcie_heap->used_list) { - block = list_entry(used_list, struct xgi_pcie_block_s, list); + xgi_pcie_block_t *block = + list_entry(used_list, struct xgi_pcie_block_s, list); XGI_INFO("block = 0x%p (hw_addr = 0x%lx, size=%lu)\n", block, block->hw_addr, block->size); if ((address >= block->hw_addr) && (address < (block->hw_addr + block->size))) { - loc_in_pagetable = + const unsigned long loc_in_pagetable = (address - block->hw_addr) >> PAGE_SHIFT; - ret = + void *const ret = (void *)(block->page_table[loc_in_pagetable]. virt_addr + offset_in_page); -- cgit v1.2.3 From 475c1e67bacabb89c568c7482991451d223c53ae Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 28 Jun 2007 23:40:36 -0700 Subject: Remove unused type 'struct xgi_pcie_list_s' / xgi_pcie_list_t. --- linux-core/xgi_pcie.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_pcie.h b/linux-core/xgi_pcie.h index 32c2b584..6e8e45b9 100644 --- a/linux-core/xgi_pcie.h +++ b/linux-core/xgi_pcie.h @@ -58,11 +58,6 @@ typedef struct xgi_pcie_block_s { unsigned long processID; } xgi_pcie_block_t; -typedef struct xgi_pcie_list_s { - xgi_pcie_block_t *head; - xgi_pcie_block_t *tail; -} xgi_pcie_list_t; - typedef struct xgi_pcie_heap_s { struct list_head free_list; struct list_head used_list; -- cgit v1.2.3 From 00f1a66f22d52c212bb9334a0103a4785af69bc1 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 29 Jun 2007 12:50:12 +0200 Subject: Fence object reference / dereference cleanup. Buffer object dereference cleanup. Add a struct drm_device member to fence objects: This can simplify code, particularly in drivers. --- linux-core/drm_bo.c | 109 +++++++++++++++++++----------------------- linux-core/drm_bo_move.c | 6 +-- linux-core/drm_fence.c | 122 +++++++++++++++++++++++++++-------------------- linux-core/drm_objects.h | 21 ++++---- linux-core/drm_vm.c | 3 +- 5 files changed, 134 insertions(+), 127 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index f1ca0b44..ab257825 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -269,31 +269,25 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t * bo, int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, int no_wait) { - - drm_fence_object_t *fence = bo->fence; int ret; DRM_ASSERT_LOCKED(&bo->mutex); - if (fence) { - drm_device_t *dev = bo->dev; - if (drm_fence_object_signaled(dev, fence, bo->fence_type, 0)) { - drm_fence_usage_deref_unlocked(dev, fence); - bo->fence = NULL; + if (bo->fence) { + if (drm_fence_object_signaled(bo->fence, bo->fence_type, 0)) { + drm_fence_usage_deref_unlocked(&bo->fence); return 0; } if (no_wait) { return -EBUSY; } ret = - drm_fence_object_wait(dev, fence, lazy, ignore_signals, + drm_fence_object_wait(bo->fence, lazy, ignore_signals, bo->fence_type); if (ret) return ret; - drm_fence_usage_deref_unlocked(dev, fence); - bo->fence = NULL; - + drm_fence_usage_deref_unlocked(&bo->fence); } return 0; } @@ -321,10 +315,8 @@ static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors) "Evicting buffer.\n"); } } - if (bo->fence) { - drm_fence_usage_deref_unlocked(dev, bo->fence); - bo->fence = NULL; - } + if (bo->fence) + drm_fence_usage_deref_unlocked(&bo->fence); } return 0; } @@ -348,11 +340,9 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all) DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); - if (bo->fence && drm_fence_object_signaled(dev, bo->fence, - bo->fence_type, 0)) { - drm_fence_usage_deref_unlocked(dev, bo->fence); - bo->fence = NULL; - } + if (bo->fence && drm_fence_object_signaled(bo->fence, + bo->fence_type, 0)) + drm_fence_usage_deref_unlocked(&bo->fence); if (bo->fence && remove_all) (void)drm_bo_expire_fence(bo, 0); @@ -383,7 +373,7 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all) } if (list_empty(&bo->ddestroy)) { - drm_fence_object_flush(dev, bo->fence, bo->fence_type); + drm_fence_object_flush(bo->fence, bo->fence_type); list_add_tail(&bo->ddestroy, &bm->ddestroy); schedule_delayed_work(&bm->wq, ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100); @@ -503,12 +493,15 @@ static void drm_bo_delayed_workqueue(struct work_struct *work) mutex_unlock(&dev->struct_mutex); } -void drm_bo_usage_deref_locked(drm_buffer_object_t * bo) +void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo) { - DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); + struct drm_buffer_object *tmp_bo = *bo; + bo = NULL; - if (atomic_dec_and_test(&bo->usage)) { - drm_bo_destroy_locked(bo); + DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex); + + if (atomic_dec_and_test(&tmp_bo->usage)) { + drm_bo_destroy_locked(tmp_bo); } } @@ -520,17 +513,19 @@ static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo) DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); drm_bo_takedown_vm_locked(bo); - drm_bo_usage_deref_locked(bo); + drm_bo_usage_deref_locked(&bo); } -static void drm_bo_usage_deref_unlocked(drm_buffer_object_t * bo) +static void drm_bo_usage_deref_unlocked(drm_buffer_object_t ** bo) { - drm_device_t *dev = bo->dev; + struct drm_buffer_object *tmp_bo = *bo; + drm_device_t *dev = tmp_bo->dev; - if (atomic_dec_and_test(&bo->usage)) { + *bo = NULL; + if (atomic_dec_and_test(&tmp_bo->usage)) { mutex_lock(&dev->struct_mutex); - if (atomic_read(&bo->usage) == 0) - drm_bo_destroy_locked(bo); + if (atomic_read(&tmp_bo->usage) == 0) + drm_bo_destroy_locked(tmp_bo); mutex_unlock(&dev->struct_mutex); } } @@ -616,16 +611,15 @@ int drm_fence_buffer_objects(drm_file_t * priv, if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) { count++; if (entry->fence) - drm_fence_usage_deref_locked(dev, entry->fence); - entry->fence = fence; - atomic_inc(&fence->usage); + drm_fence_usage_deref_locked(&entry->fence); + entry->fence = drm_fence_reference_locked(fence); DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); DRM_WAKEUP(&entry->event_queue); drm_bo_add_to_lru(entry); } mutex_unlock(&entry->mutex); - drm_bo_usage_deref_locked(entry); + drm_bo_usage_deref_locked(&entry); l = f_list.next; } DRM_DEBUG("Fenced %d buffers\n", count); @@ -742,7 +736,7 @@ static int drm_bo_mem_force_space(drm_device_t * dev, ret = drm_bo_evict(entry, mem_type, no_wait); mutex_unlock(&entry->mutex); - drm_bo_usage_deref_unlocked(entry); + drm_bo_usage_deref_unlocked(&entry); if (ret) return ret; mutex_lock(&dev->struct_mutex); @@ -962,10 +956,8 @@ static int drm_bo_quick_busy(drm_buffer_object_t * bo) BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); if (fence) { - drm_device_t *dev = bo->dev; - if (drm_fence_object_signaled(dev, fence, bo->fence_type, 0)) { - drm_fence_usage_deref_unlocked(dev, fence); - bo->fence = NULL; + if (drm_fence_object_signaled(fence, bo->fence_type, 0)) { + drm_fence_usage_deref_unlocked(&bo->fence); return 0; } return 1; @@ -984,16 +976,13 @@ static int drm_bo_busy(drm_buffer_object_t * bo) BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); if (fence) { - drm_device_t *dev = bo->dev; - if (drm_fence_object_signaled(dev, fence, bo->fence_type, 0)) { - drm_fence_usage_deref_unlocked(dev, fence); - bo->fence = NULL; + if (drm_fence_object_signaled(fence, bo->fence_type, 0)) { + drm_fence_usage_deref_unlocked(&bo->fence); return 0; } - drm_fence_object_flush(dev, fence, DRM_FENCE_TYPE_EXE); - if (drm_fence_object_signaled(dev, fence, bo->fence_type, 0)) { - drm_fence_usage_deref_unlocked(dev, fence); - bo->fence = NULL; + drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE); + if (drm_fence_object_signaled(fence, bo->fence_type, 0)) { + drm_fence_usage_deref_unlocked(&bo->fence); return 0; } return 1; @@ -1190,7 +1179,7 @@ static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle, drm_bo_fill_rep_arg(bo, rep); out: mutex_unlock(&bo->mutex); - drm_bo_usage_deref_unlocked(bo); + drm_bo_usage_deref_unlocked(&bo); return ret; } @@ -1216,7 +1205,7 @@ static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle) } drm_remove_ref_object(priv, ro); - drm_bo_usage_deref_locked(bo); + drm_bo_usage_deref_locked(&bo); out: mutex_unlock(&dev->struct_mutex); return ret; @@ -1512,7 +1501,7 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, mutex_unlock(&bo->mutex); - drm_bo_usage_deref_unlocked(bo); + drm_bo_usage_deref_unlocked(&bo); return ret; } @@ -1534,7 +1523,7 @@ static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle, (void)drm_bo_busy(bo); drm_bo_fill_rep_arg(bo, rep); mutex_unlock(&bo->mutex); - drm_bo_usage_deref_unlocked(bo); + drm_bo_usage_deref_unlocked(&bo); return 0; } @@ -1566,7 +1555,7 @@ static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle, out: mutex_unlock(&bo->mutex); - drm_bo_usage_deref_unlocked(bo); + drm_bo_usage_deref_unlocked(&bo); return ret; } @@ -1651,7 +1640,7 @@ int drm_buffer_object_create(drm_device_t *dev, out_err: mutex_unlock(&bo->mutex); - drm_bo_usage_deref_unlocked(bo); + drm_bo_usage_deref_unlocked(&bo); return ret; } @@ -1728,7 +1717,7 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS) mask & DRM_BO_FLAG_SHAREABLE); if (rep.ret) - drm_bo_usage_deref_unlocked(entry); + drm_bo_usage_deref_unlocked(&entry); if (rep.ret) break; @@ -1957,7 +1946,7 @@ restart: allow_errors); mutex_lock(&dev->struct_mutex); - drm_bo_usage_deref_locked(entry); + drm_bo_usage_deref_locked(&entry); if (ret) return ret; @@ -1967,10 +1956,8 @@ restart: do_restart = ((next->prev != list) && (next->prev != prev)); - if (nentry != NULL && do_restart) { - drm_bo_usage_deref_locked(nentry); - nentry = NULL; - } + if (nentry != NULL && do_restart) + drm_bo_usage_deref_locked(&nentry); if (do_restart) goto restart; @@ -2365,7 +2352,7 @@ static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo) drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ); list->map = NULL; list->user_token = 0ULL; - drm_bo_usage_deref_locked(bo); + drm_bo_usage_deref_locked(&bo); } static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo) diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 4f752065..8ef2a8ff 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -306,7 +306,7 @@ int drm_buffer_object_transfer(drm_buffer_object_t * bo, INIT_LIST_HEAD(&fbo->p_mm_list); #endif - atomic_inc(&bo->fence->usage); + drm_fence_reference_unlocked(&fbo->fence, bo->fence); fbo->pinned_node = NULL; fbo->mem.mm_node->private = (void *)fbo; atomic_set(&fbo->usage, 1); @@ -339,7 +339,7 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo, drm_buffer_object_t *old_obj; if (bo->fence) - drm_fence_usage_deref_unlocked(dev, bo->fence); + drm_fence_usage_deref_unlocked(&bo->fence); ret = drm_fence_object_create(dev, fence_class, fence_type, fence_flags | DRM_FENCE_FLAG_EMIT, &bo->fence); @@ -396,7 +396,7 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo, DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); drm_bo_add_to_lru(old_obj); - drm_bo_usage_deref_locked(old_obj); + drm_bo_usage_deref_locked(&old_obj); mutex_unlock(&dev->struct_mutex); } diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index b5fc2235..ace70d51 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -124,56 +124,76 @@ static void drm_fence_unring(drm_device_t * dev, struct list_head *ring) write_unlock_irqrestore(&fm->lock, flags); } -void drm_fence_usage_deref_locked(drm_device_t * dev, - drm_fence_object_t * fence) +void drm_fence_usage_deref_locked(drm_fence_object_t ** fence) { + struct drm_fence_object *tmp_fence = *fence; + struct drm_device *dev = tmp_fence->dev; drm_fence_manager_t *fm = &dev->fm; DRM_ASSERT_LOCKED(&dev->struct_mutex); - - if (atomic_dec_and_test(&fence->usage)) { - drm_fence_unring(dev, &fence->ring); + *fence = NULL; + if (atomic_dec_and_test(&tmp_fence->usage)) { + drm_fence_unring(dev, &tmp_fence->ring); DRM_DEBUG("Destroyed a fence object 0x%08lx\n", - fence->base.hash.key); + tmp_fence->base.hash.key); atomic_dec(&fm->count); - BUG_ON(!list_empty(&fence->base.list)); - drm_ctl_free(fence, sizeof(*fence), DRM_MEM_FENCE); + BUG_ON(!list_empty(&tmp_fence->base.list)); + drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE); } } -void drm_fence_usage_deref_unlocked(drm_device_t * dev, - drm_fence_object_t * fence) +void drm_fence_usage_deref_unlocked(drm_fence_object_t ** fence) { + struct drm_fence_object *tmp_fence = *fence; + struct drm_device *dev = tmp_fence->dev; drm_fence_manager_t *fm = &dev->fm; - if (atomic_dec_and_test(&fence->usage)) { + *fence = NULL; + if (atomic_dec_and_test(&tmp_fence->usage)) { mutex_lock(&dev->struct_mutex); - if (atomic_read(&fence->usage) == 0) { - drm_fence_unring(dev, &fence->ring); + if (atomic_read(&tmp_fence->usage) == 0) { + drm_fence_unring(dev, &tmp_fence->ring); atomic_dec(&fm->count); - BUG_ON(!list_empty(&fence->base.list)); - drm_ctl_free(fence, sizeof(*fence), DRM_MEM_FENCE); + BUG_ON(!list_empty(&tmp_fence->base.list)); + drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE); } mutex_unlock(&dev->struct_mutex); } } -static void drm_fence_object_destroy(drm_file_t * priv, - drm_user_object_t * base) +struct drm_fence_object +*drm_fence_reference_locked(struct drm_fence_object *src) +{ + DRM_ASSERT_LOCKED(&src->dev->struct_mutex); + + atomic_inc(&src->usage); + return src; +} + +void drm_fence_reference_unlocked(struct drm_fence_object **dst, + struct drm_fence_object *src) +{ + mutex_lock(&src->dev->struct_mutex); + *dst = src; + atomic_inc(&src->usage); + mutex_unlock(&src->dev->struct_mutex); +} + + +static void drm_fence_object_destroy(drm_file_t *priv, drm_user_object_t * base) { - drm_device_t *dev = priv->head->dev; drm_fence_object_t *fence = drm_user_object_entry(base, drm_fence_object_t, base); - drm_fence_usage_deref_locked(dev, fence); + drm_fence_usage_deref_locked(&fence); } -int drm_fence_object_signaled(drm_device_t * dev, - drm_fence_object_t * fence, - uint32_t mask, int poke_flush) +int drm_fence_object_signaled(drm_fence_object_t * fence, + uint32_t mask, int poke_flush) { unsigned long flags; int signaled; + struct drm_device *dev = fence->dev; drm_fence_manager_t *fm = &dev->fm; drm_fence_driver_t *driver = dev->driver->fence_driver; @@ -204,10 +224,10 @@ static void drm_fence_flush_exe(drm_fence_class_manager_t * fc, } } -int drm_fence_object_flush(drm_device_t * dev, - drm_fence_object_t * fence, +int drm_fence_object_flush(drm_fence_object_t * fence, uint32_t type) { + struct drm_device *dev = fence->dev; drm_fence_manager_t *fm = &dev->fm; drm_fence_class_manager_t *fc = &fm->class[fence->class]; drm_fence_driver_t *driver = dev->driver->fence_driver; @@ -270,24 +290,23 @@ void drm_fence_flush_old(drm_device_t * dev, uint32_t class, uint32_t sequence) mutex_unlock(&dev->struct_mutex); return; } - fence = list_entry(fc->ring.next, drm_fence_object_t, ring); - atomic_inc(&fence->usage); + fence = drm_fence_reference_locked(list_entry(fc->ring.next, drm_fence_object_t, ring)); mutex_unlock(&dev->struct_mutex); diff = (old_sequence - fence->sequence) & driver->sequence_mask; read_unlock_irqrestore(&fm->lock, flags); if (diff < driver->wrap_diff) { - drm_fence_object_flush(dev, fence, fence->type); + drm_fence_object_flush(fence, fence->type); } - drm_fence_usage_deref_unlocked(dev, fence); + drm_fence_usage_deref_unlocked(&fence); } EXPORT_SYMBOL(drm_fence_flush_old); -static int drm_fence_lazy_wait(drm_device_t *dev, - drm_fence_object_t *fence, +static int drm_fence_lazy_wait(drm_fence_object_t *fence, int ignore_signals, uint32_t mask) { + struct drm_device *dev = fence->dev; drm_fence_manager_t *fm = &dev->fm; drm_fence_class_manager_t *fc = &fm->class[fence->class]; int signaled; @@ -296,13 +315,13 @@ static int drm_fence_lazy_wait(drm_device_t *dev, do { DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ, - (signaled = drm_fence_object_signaled(dev, fence, mask, 1))); + (signaled = drm_fence_object_signaled(fence, mask, 1))); if (signaled) return 0; if (time_after_eq(jiffies, _end)) break; } while (ret == -EINTR && ignore_signals); - if (drm_fence_object_signaled(dev, fence, mask, 0)) + if (drm_fence_object_signaled(fence, mask, 0)) return 0; if (time_after_eq(jiffies, _end)) ret = -EBUSY; @@ -317,10 +336,10 @@ static int drm_fence_lazy_wait(drm_device_t *dev, return 0; } -int drm_fence_object_wait(drm_device_t * dev, - drm_fence_object_t * fence, +int drm_fence_object_wait(drm_fence_object_t * fence, int lazy, int ignore_signals, uint32_t mask) { + struct drm_device *dev = fence->dev; drm_fence_driver_t *driver = dev->driver->fence_driver; int ret = 0; unsigned long _end; @@ -332,16 +351,16 @@ int drm_fence_object_wait(drm_device_t * dev, return -EINVAL; } - if (drm_fence_object_signaled(dev, fence, mask, 0)) + if (drm_fence_object_signaled(fence, mask, 0)) return 0; _end = jiffies + 3 * DRM_HZ; - drm_fence_object_flush(dev, fence, mask); + drm_fence_object_flush(fence, mask); if (lazy && driver->lazy_capable) { - ret = drm_fence_lazy_wait(dev, fence, ignore_signals, mask); + ret = drm_fence_lazy_wait(fence, ignore_signals, mask); if (ret) return ret; @@ -349,7 +368,7 @@ int drm_fence_object_wait(drm_device_t * dev, if (driver->has_irq(dev, fence->class, DRM_FENCE_TYPE_EXE)) { - ret = drm_fence_lazy_wait(dev, fence, ignore_signals, + ret = drm_fence_lazy_wait(fence, ignore_signals, DRM_FENCE_TYPE_EXE); if (ret) return ret; @@ -357,13 +376,13 @@ int drm_fence_object_wait(drm_device_t * dev, if (driver->has_irq(dev, fence->class, mask & ~DRM_FENCE_TYPE_EXE)) { - ret = drm_fence_lazy_wait(dev, fence, ignore_signals, + ret = drm_fence_lazy_wait(fence, ignore_signals, mask); if (ret) return ret; } } - if (drm_fence_object_signaled(dev, fence, mask, 0)) + if (drm_fence_object_signaled(fence, mask, 0)) return 0; /* @@ -375,7 +394,7 @@ int drm_fence_object_wait(drm_device_t * dev, #endif do { schedule(); - signaled = drm_fence_object_signaled(dev, fence, mask, 1); + signaled = drm_fence_object_signaled(fence, mask, 1); } while (!signaled && !time_after_eq(jiffies, _end)); if (!signaled) @@ -384,9 +403,10 @@ int drm_fence_object_wait(drm_device_t * dev, return 0; } -int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence, +int drm_fence_object_emit(drm_fence_object_t * fence, uint32_t fence_flags, uint32_t class, uint32_t type) { + struct drm_device *dev = fence->dev; drm_fence_manager_t *fm = &dev->fm; drm_fence_driver_t *driver = dev->driver->fence_driver; drm_fence_class_manager_t *fc = &fm->class[fence->class]; @@ -436,9 +456,10 @@ static int drm_fence_object_init(drm_device_t * dev, uint32_t class, fence->submitted_flush = 0; fence->signaled = 0; fence->sequence = 0; + fence->dev = dev; write_unlock_irqrestore(&fm->lock, flags); if (fence_flags & DRM_FENCE_FLAG_EMIT) { - ret = drm_fence_object_emit(dev, fence, fence_flags, + ret = drm_fence_object_emit(fence, fence_flags, fence->class, type); } return ret; @@ -476,7 +497,7 @@ int drm_fence_object_create(drm_device_t * dev, uint32_t class, uint32_t type, return -ENOMEM; ret = drm_fence_object_init(dev, class, type, flags, fence); if (ret) { - drm_fence_usage_deref_unlocked(dev, fence); + drm_fence_usage_deref_unlocked(&fence); return ret; } *c_fence = fence; @@ -533,8 +554,7 @@ drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle) mutex_unlock(&dev->struct_mutex); return NULL; } - fence = drm_user_object_entry(uo, drm_fence_object_t, base); - atomic_inc(&fence->usage); + fence = drm_fence_reference_locked(drm_user_object_entry(uo, drm_fence_object_t, base)); mutex_unlock(&dev->struct_mutex); return fence; } @@ -568,7 +588,7 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS) arg.flags & DRM_FENCE_FLAG_SHAREABLE); if (ret) { - drm_fence_usage_deref_unlocked(dev, fence); + drm_fence_usage_deref_unlocked(&fence); return ret; } arg.handle = fence->base.hash.key; @@ -603,14 +623,14 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS) fence = drm_lookup_fence_object(priv, arg.handle); if (!fence) return -EINVAL; - ret = drm_fence_object_flush(dev, fence, arg.type); + ret = drm_fence_object_flush(fence, arg.type); break; case drm_fence_wait: fence = drm_lookup_fence_object(priv, arg.handle); if (!fence) return -EINVAL; ret = - drm_fence_object_wait(dev, fence, + drm_fence_object_wait(fence, arg.flags & DRM_FENCE_FLAG_WAIT_LAZY, 0, arg.type); break; @@ -619,7 +639,7 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS) fence = drm_lookup_fence_object(priv, arg.handle); if (!fence) return -EINVAL; - ret = drm_fence_object_emit(dev, fence, arg.flags, arg.class, + ret = drm_fence_object_emit(fence, arg.flags, arg.class, arg.type); break; case drm_fence_buffers: @@ -647,7 +667,7 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS) arg.type = fence->type; arg.signaled = fence->signaled; read_unlock_irqrestore(&fm->lock, flags); - drm_fence_usage_deref_unlocked(dev, fence); + drm_fence_usage_deref_unlocked(&fence); DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return ret; diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 59c8902d..f82d6628 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -141,6 +141,7 @@ extern int drm_user_object_unref(drm_file_t * priv, uint32_t user_token, typedef struct drm_fence_object { drm_user_object_t base; + struct drm_device *dev; atomic_t usage; /* @@ -196,17 +197,15 @@ extern void drm_fence_manager_init(struct drm_device *dev); extern void drm_fence_manager_takedown(struct drm_device *dev); extern void drm_fence_flush_old(struct drm_device *dev, uint32_t class, uint32_t sequence); -extern int drm_fence_object_flush(struct drm_device *dev, - drm_fence_object_t * fence, uint32_t type); -extern int drm_fence_object_signaled(struct drm_device *dev, - drm_fence_object_t * fence, +extern int drm_fence_object_flush(drm_fence_object_t * fence, uint32_t type); +extern int drm_fence_object_signaled(drm_fence_object_t * fence, uint32_t type, int flush); -extern void drm_fence_usage_deref_locked(struct drm_device *dev, - drm_fence_object_t * fence); -extern void drm_fence_usage_deref_unlocked(struct drm_device *dev, - drm_fence_object_t * fence); -extern int drm_fence_object_wait(struct drm_device *dev, - drm_fence_object_t * fence, +extern void drm_fence_usage_deref_locked(drm_fence_object_t ** fence); +extern void drm_fence_usage_deref_unlocked(drm_fence_object_t ** fence); +extern struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src); +extern void drm_fence_reference_unlocked(struct drm_fence_object **dst, + struct drm_fence_object *src); +extern int drm_fence_object_wait(drm_fence_object_t * fence, int lazy, int ignore_signals, uint32_t mask); extern int drm_fence_object_create(struct drm_device *dev, uint32_t type, uint32_t fence_flags, uint32_t class, @@ -441,7 +440,7 @@ extern int drm_bo_pci_offset(struct drm_device *dev, unsigned long *bus_size); extern int drm_mem_reg_is_pci(struct drm_device *dev, drm_bo_mem_reg_t * mem); -extern void drm_bo_usage_deref_locked(drm_buffer_object_t * bo); +extern void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo); extern int drm_fence_buffer_objects(drm_file_t * priv, struct list_head *list, uint32_t fence_flags, diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index f2c43508..72d63c10 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -840,7 +840,8 @@ static void drm_bo_vm_close(struct vm_area_struct *vma) #ifdef DRM_ODD_MM_COMPAT drm_bo_delete_vma(bo, vma); #endif - drm_bo_usage_deref_locked(bo); + drm_bo_usage_deref_locked((struct drm_buffer_object **) + &vma->vm_private_data); mutex_unlock(&dev->struct_mutex); } return; -- cgit v1.2.3 From a27af4c4a665864df09123f177ca7269e48f6171 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 29 Jun 2007 15:22:28 +0200 Subject: Avoid hitting BUG() for kernel-only fence objects. --- linux-core/drm_fence.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'linux-core') diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index ace70d51..5215feb6 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -450,6 +450,12 @@ static int drm_fence_object_init(drm_device_t * dev, uint32_t class, write_lock_irqsave(&fm->lock, flags); INIT_LIST_HEAD(&fence->ring); + + /* + * Avoid hitting BUG() for kernel-only fence objects. + */ + + INIT_LIST_HEAD(&fence->base.list); fence->class = class; fence->type = type; fence->flush_mask = 0; -- cgit v1.2.3 From 33b8476dfb0f9b5045103c3a9781ba82bcae4a9d Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 29 Jun 2007 09:30:02 -0700 Subject: Fix return type of xgi_find_pcie_block. This function used to return 'void *', which was then cast to 'xgi_pcie_block_t *' at the only caller. I changed the return type to 'struct xgi_pcie_block_s *' and removed the explicit cast. --- linux-core/xgi_drv.c | 5 +---- linux-core/xgi_drv.h | 3 ++- linux-core/xgi_pcie.c | 3 ++- 3 files changed, 5 insertions(+), 6 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 75204283..a01b3c22 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -730,10 +730,7 @@ int xgi_kern_mmap(struct file *filp, struct vm_area_struct *vma) (info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) { xgi_down(info->pcie_sem); - block = - (xgi_pcie_block_t *) xgi_find_pcie_block(info, - XGI_VMA_OFFSET - (vma)); + block = xgi_find_pcie_block(info, XGI_VMA_OFFSET(vma)); if (block == NULL) { XGI_ERROR("couldn't find pre-allocated PCIE memory!\n"); diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 429719a7..5d76b632 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -353,7 +353,8 @@ extern void xgi_pcie_alloc(xgi_info_t * info, unsigned long size, enum PcieOwner owner, xgi_mem_alloc_t * alloc); extern void xgi_pcie_free(xgi_info_t * info, unsigned long offset); extern void xgi_pcie_heap_check(void); -extern void *xgi_find_pcie_block(xgi_info_t * info, unsigned long address); +extern struct xgi_pcie_block_s *xgi_find_pcie_block(xgi_info_t * info, + unsigned long address); extern void *xgi_find_pcie_virt(xgi_info_t * info, unsigned long address); extern void xgi_read_pcie_mem(xgi_info_t * info, xgi_mem_req_t * req); diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index d9da30e8..1a4d8e12 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -906,7 +906,8 @@ void xgi_pcie_free(xgi_info_t * info, unsigned long bus_addr) * given a bus address, fid the pcie mem block * uses the bus address as the key. */ -void *xgi_find_pcie_block(xgi_info_t * info, unsigned long address) +struct xgi_pcie_block_s *xgi_find_pcie_block(xgi_info_t * info, + unsigned long address) { struct list_head *used_list; xgi_pcie_block_t *block; -- cgit v1.2.3 From 88328d4ef007c781874aafedfef59aae0d21a37c Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 29 Jun 2007 15:27:38 -0700 Subject: Eliminate structure typedefs Documentation/CodingStyle says that 'typedef struct foo foo_t' is evil. I tend to agree. Elminate all uses of such construct. --- linux-core/xgi_cmdlist.c | 26 +++++----- linux-core/xgi_cmdlist.h | 12 ++--- linux-core/xgi_drv.c | 108 +++++++++++++++++++-------------------- linux-core/xgi_drv.h | 130 +++++++++++++++++++++++------------------------ linux-core/xgi_fb.c | 110 +++++++++++++++++++-------------------- linux-core/xgi_fb.h | 31 ++--------- linux-core/xgi_linux.h | 20 +++----- linux-core/xgi_misc.c | 32 ++++++------ linux-core/xgi_misc.h | 26 +++++----- linux-core/xgi_pcie.c | 116 +++++++++++++++++++++--------------------- linux-core/xgi_pcie.h | 18 +++---- linux-core/xgi_regs.h | 98 +++++++++++++++++------------------ 12 files changed, 350 insertions(+), 377 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 99be2145..2cdf714f 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -47,17 +47,17 @@ U32 s_flush2D[AGPCMDLIST_FLUSH_CMD_LEN] = { FLUSH_2D }; -xgi_cmdring_info_t s_cmdring; +struct xgi_cmdring_info s_cmdring; -static void addFlush2D(xgi_info_t * info); -static U32 getCurBatchBeginPort(xgi_cmd_info_t * pCmdInfo); -static void triggerHWCommandList(xgi_info_t * info, U32 triggerCounter); +static void addFlush2D(struct xgi_info * info); +static U32 getCurBatchBeginPort(struct xgi_cmd_info * pCmdInfo); +static void triggerHWCommandList(struct xgi_info * info, U32 triggerCounter); static void xgi_cmdlist_reset(void); -int xgi_cmdlist_initialize(xgi_info_t * info, U32 size) +int xgi_cmdlist_initialize(struct xgi_info * info, U32 size) { - //xgi_mem_req_t mem_req; - xgi_mem_alloc_t mem_alloc; + //struct xgi_mem_req mem_req; + struct xgi_mem_alloc mem_alloc; //mem_req.size = size; @@ -76,7 +76,7 @@ int xgi_cmdlist_initialize(xgi_info_t * info, U32 size) return 1; } -void xgi_submit_cmdlist(xgi_info_t * info, xgi_cmd_info_t * pCmdInfo) +void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo) { U32 beginPort; /** XGI_INFO("Jong-xgi_submit_cmdlist-Begin \n"); **/ @@ -238,7 +238,7 @@ void xgi_submit_cmdlist(xgi_info_t * info, xgi_cmd_info_t * pCmdInfo) 2 - fb 3 - logout */ -void xgi_state_change(xgi_info_t * info, xgi_state_info_t * pStateInfo) +void xgi_state_change(struct xgi_info * info, struct xgi_state_info * pStateInfo) { #define STATE_CONSOLE 0 #define STATE_GRAPHIC 1 @@ -273,7 +273,7 @@ void xgi_cmdlist_reset(void) s_cmdring._cmdRingOffset = 0; } -void xgi_cmdlist_cleanup(xgi_info_t * info) +void xgi_cmdlist_cleanup(struct xgi_info * info) { if (s_cmdring._cmdRingBuffer != 0) { xgi_pcie_free(info, s_cmdring._cmdRingBusAddr); @@ -283,7 +283,7 @@ void xgi_cmdlist_cleanup(xgi_info_t * info) } } -static void triggerHWCommandList(xgi_info_t * info, U32 triggerCounter) +static void triggerHWCommandList(struct xgi_info * info, U32 triggerCounter) { static U32 s_triggerID = 1; @@ -295,7 +295,7 @@ static void triggerHWCommandList(xgi_info_t * info, U32 triggerCounter) } } -static U32 getCurBatchBeginPort(xgi_cmd_info_t * pCmdInfo) +static U32 getCurBatchBeginPort(struct xgi_cmd_info * pCmdInfo) { // Convert the batch type to begin port ID switch (pCmdInfo->_firstBeginType) { @@ -313,7 +313,7 @@ static U32 getCurBatchBeginPort(xgi_cmd_info_t * pCmdInfo) } } -static void addFlush2D(xgi_info_t * info) +static void addFlush2D(struct xgi_info * info) { U32 *flushBatchVirtAddr; U32 flushBatchHWAddr; diff --git a/linux-core/xgi_cmdlist.h b/linux-core/xgi_cmdlist.h index 5fe1de71..b11511ff 100644 --- a/linux-core/xgi_cmdlist.h +++ b/linux-core/xgi_cmdlist.h @@ -57,20 +57,20 @@ typedef enum { AGPCMDLIST_DUMY_END_BATCH_LEN = AGPCMDLIST_BEGIN_SIZE } CMD_SIZE; -typedef struct xgi_cmdring_info_s { +struct xgi_cmdring_info { U32 _cmdRingSize; U32 _cmdRingBuffer; U32 _cmdRingBusAddr; U32 _lastBatchStartAddr; U32 _cmdRingOffset; -} xgi_cmdring_info_t; +}; -extern int xgi_cmdlist_initialize(xgi_info_t * info, U32 size); +extern int xgi_cmdlist_initialize(struct xgi_info * info, U32 size); -extern void xgi_submit_cmdlist(xgi_info_t * info, xgi_cmd_info_t * pCmdInfo); +extern void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo); -extern void xgi_state_change(xgi_info_t * info, xgi_state_info_t * pStateInfo); +extern void xgi_state_change(struct xgi_info * info, struct xgi_state_info * pStateInfo); -extern void xgi_cmdlist_cleanup(xgi_info_t * info); +extern void xgi_cmdlist_cleanup(struct xgi_info * info); #endif /* _XGI_CMDLIST_H_ */ diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index a01b3c22..44b003a8 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -53,14 +53,14 @@ int xgi_major = XGI_DEV_MAJOR; /* xgi reserved major device number. */ static int xgi_num_devices = 0; -xgi_info_t xgi_devices[XGI_MAX_DEVICES]; +struct xgi_info xgi_devices[XGI_MAX_DEVICES]; #if defined(XGI_PM_SUPPORT_APM) static struct pm_dev *apm_xgi_dev[XGI_MAX_DEVICES] = { 0 }; #endif /* add one for the control device */ -xgi_info_t xgi_ctl_device; +struct xgi_info xgi_ctl_device; wait_queue_head_t xgi_ctl_waitqueue; #ifdef CONFIG_PROC_FS @@ -74,7 +74,7 @@ devfs_handle_t xgi_devfs_handles[XGI_MAX_DEVICES]; struct list_head xgi_mempid_list; /* xgi_ functions.. do not take a state device parameter */ -static int xgi_post_vbios(xgi_ioctl_post_vbios_t * info); +static int xgi_post_vbios(struct xgi_ioctl_post_vbios * info); static void xgi_proc_create(void); static void xgi_proc_remove_all(struct proc_dir_entry *); static void xgi_proc_remove(void); @@ -110,7 +110,7 @@ unsigned int xgi_kern_ctl_poll(struct file *, poll_table *); void xgi_kern_isr_bh(unsigned long); irqreturn_t xgi_kern_isr(int, void *, struct pt_regs *); -static void xgi_lock_init(xgi_info_t * info); +static void xgi_lock_init(struct xgi_info * info); #if defined(XGI_PM_SUPPORT_ACPI) int xgi_kern_acpi_standby(struct pci_dev *, u32); @@ -128,7 +128,7 @@ int xgi_kern_acpi_resume(struct pci_dev *); #define XGI_CHECK_PCI_CONFIG(xgi) \ xgi_check_pci_config(xgi, __LINE__) -static inline void xgi_check_pci_config(xgi_info_t * info, int line) +static inline void xgi_check_pci_config(struct xgi_info * info, int line) { unsigned short cmd, flag = 0; @@ -208,7 +208,7 @@ static struct pci_driver xgi_pci_driver = { */ int xgi_kern_probe(struct pci_dev *dev, const struct pci_device_id *id_table) { - xgi_info_t *info; + struct xgi_info *info; if ((dev->vendor != PCI_VENDOR_ID_XGI) || (dev->class != (PCI_CLASS_DISPLAY_VGA << 8))) { @@ -361,8 +361,8 @@ void xgi_kern_vma_open(struct vm_area_struct *vma) vma->vm_start, vma->vm_end, XGI_VMA_OFFSET(vma)); if (XGI_VMA_PRIVATE(vma)) { - xgi_pcie_block_t *block = - (xgi_pcie_block_t *) XGI_VMA_PRIVATE(vma); + struct xgi_pcie_block *block = + (struct xgi_pcie_block *) XGI_VMA_PRIVATE(vma); XGI_ATOMIC_INC(block->use_count); } } @@ -373,8 +373,8 @@ void xgi_kern_vma_release(struct vm_area_struct *vma) vma->vm_start, vma->vm_end, XGI_VMA_OFFSET(vma)); if (XGI_VMA_PRIVATE(vma)) { - xgi_pcie_block_t *block = - (xgi_pcie_block_t *) XGI_VMA_PRIVATE(vma); + struct xgi_pcie_block *block = + (struct xgi_pcie_block *) XGI_VMA_PRIVATE(vma); XGI_ATOMIC_DEC(block->use_count); /* @@ -393,7 +393,7 @@ void xgi_kern_vma_release(struct vm_area_struct *vma) struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, unsigned long address, int *type) { - xgi_pcie_block_t *block = (xgi_pcie_block_t *) XGI_VMA_PRIVATE(vma); + struct xgi_pcie_block *block = (struct xgi_pcie_block *) XGI_VMA_PRIVATE(vma); struct page *page = NOPAGE_SIGBUS; unsigned long offset = 0; unsigned long page_addr = 0; @@ -436,7 +436,7 @@ struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, unsigned long address, int write_access) { - xgi_pcie_block_t *block = (xgi_pcie_block_t *) XGI_VMA_PRIVATE(vma); + struct xgi_pcie_block *block = (struct xgi_pcie_block *) XGI_VMA_PRIVATE(vma); struct page *page = NOPAGE_SIGBUS; unsigned long offset = 0; unsigned long page_addr = 0; @@ -496,15 +496,15 @@ static struct file_operations xgi_fops = { .release = xgi_kern_release, }; -static xgi_file_private_t *xgi_alloc_file_private(void) +static struct xgi_file_private *xgi_alloc_file_private(void) { - xgi_file_private_t *fp; + struct xgi_file_private *fp; - XGI_KMALLOC(fp, sizeof(xgi_file_private_t)); + XGI_KMALLOC(fp, sizeof(struct xgi_file_private)); if (!fp) return NULL; - memset(fp, 0, sizeof(xgi_file_private_t)); + memset(fp, 0, sizeof(struct xgi_file_private)); /* initialize this file's event queue */ init_waitqueue_head(&fp->wait_queue); @@ -514,17 +514,17 @@ static xgi_file_private_t *xgi_alloc_file_private(void) return fp; } -static void xgi_free_file_private(xgi_file_private_t * fp) +static void xgi_free_file_private(struct xgi_file_private * fp) { if (fp == NULL) return; - XGI_KFREE(fp, sizeof(xgi_file_private_t)); + XGI_KFREE(fp, sizeof(struct xgi_file_private)); } int xgi_kern_open(struct inode *inode, struct file *filp) { - xgi_info_t *info = NULL; + struct xgi_info *info = NULL; int dev_num; int result = 0, status; @@ -621,7 +621,7 @@ int xgi_kern_open(struct inode *inode, struct file *filp) int xgi_kern_release(struct inode *inode, struct file *filp) { - xgi_info_t *info = XGI_INFO_FROM_FP(filp); + struct xgi_info *info = XGI_INFO_FROM_FP(filp); XGI_CHECK_PCI_CONFIG(info); @@ -674,8 +674,8 @@ int xgi_kern_release(struct inode *inode, struct file *filp) int xgi_kern_mmap(struct file *filp, struct vm_area_struct *vma) { //struct inode *inode = INODE_FROM_FP(filp); - xgi_info_t *info = XGI_INFO_FROM_FP(filp); - xgi_pcie_block_t *block; + struct xgi_info *info = XGI_INFO_FROM_FP(filp); + struct xgi_pcie_block *block; int pages = 0; unsigned long prot; @@ -792,8 +792,8 @@ int xgi_kern_mmap(struct file *filp, struct vm_area_struct *vma) unsigned int xgi_kern_poll(struct file *filp, struct poll_table_struct *wait) { - xgi_file_private_t *fp; - xgi_info_t *info; + struct xgi_file_private *fp; + struct xgi_info *info; unsigned int mask = 0; unsigned long eflags; @@ -828,8 +828,8 @@ unsigned int xgi_kern_poll(struct file *filp, struct poll_table_struct *wait) int xgi_kern_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - xgi_info_t *info; - xgi_mem_alloc_t *alloc = NULL; + struct xgi_info *info; + struct xgi_mem_alloc *alloc = NULL; int status = 0; void *arg_copy; @@ -880,21 +880,21 @@ int xgi_kern_ioctl(struct inode *inode, struct file *filp, } else XGI_INFO("Jong-copy_from_user-OK! \n"); - alloc = (xgi_mem_alloc_t *) arg_copy; + alloc = (struct xgi_mem_alloc *) arg_copy; XGI_INFO("Jong-succeeded in copy_from_user 0x%lx, 0x%x bytes.\n", arg, arg_size); switch (_IOC_NR(cmd)) { case XGI_ESC_DEVICE_INFO: XGI_INFO("Jong-xgi_ioctl_get_device_info \n"); - xgi_get_device_info(info, (struct xgi_chip_info_s *)arg_copy); + xgi_get_device_info(info, (struct xgi_chip_info *)arg_copy); break; case XGI_ESC_POST_VBIOS: XGI_INFO("Jong-xgi_ioctl_post_vbios \n"); break; case XGI_ESC_FB_ALLOC: XGI_INFO("Jong-xgi_ioctl_fb_alloc \n"); - xgi_fb_alloc(info, (struct xgi_mem_req_s *)arg_copy, alloc); + xgi_fb_alloc(info, (struct xgi_mem_req *)arg_copy, alloc); break; case XGI_ESC_FB_FREE: XGI_INFO("Jong-xgi_ioctl_fb_free \n"); @@ -906,8 +906,8 @@ int xgi_kern_ioctl(struct inode *inode, struct file *filp, break; case XGI_ESC_PCIE_ALLOC: XGI_INFO("Jong-xgi_ioctl_pcie_alloc \n"); - xgi_pcie_alloc(info, ((xgi_mem_req_t *) arg_copy)->size, - ((xgi_mem_req_t *) arg_copy)->owner, alloc); + xgi_pcie_alloc(info, ((struct xgi_mem_req *) arg_copy)->size, + ((struct xgi_mem_req *) arg_copy)->owner, alloc); break; case XGI_ESC_PCIE_FREE: XGI_INFO("Jong-xgi_ioctl_pcie_free: bus_addr = 0x%lx \n", @@ -920,15 +920,15 @@ int xgi_kern_ioctl(struct inode *inode, struct file *filp, break; case XGI_ESC_GET_SCREEN_INFO: XGI_INFO("Jong-xgi_get_screen_info \n"); - xgi_get_screen_info(info, (struct xgi_screen_info_s *)arg_copy); + xgi_get_screen_info(info, (struct xgi_screen_info *)arg_copy); break; case XGI_ESC_PUT_SCREEN_INFO: XGI_INFO("Jong-xgi_put_screen_info \n"); - xgi_put_screen_info(info, (struct xgi_screen_info_s *)arg_copy); + xgi_put_screen_info(info, (struct xgi_screen_info *)arg_copy); break; case XGI_ESC_MMIO_INFO: XGI_INFO("Jong-xgi_ioctl_get_mmio_info \n"); - xgi_get_mmio_info(info, (struct xgi_mmio_info_s *)arg_copy); + xgi_get_mmio_info(info, (struct xgi_mmio_info *)arg_copy); break; case XGI_ESC_GE_RESET: XGI_INFO("Jong-xgi_ioctl_ge_reset \n"); @@ -936,7 +936,7 @@ int xgi_kern_ioctl(struct inode *inode, struct file *filp, break; case XGI_ESC_SAREA_INFO: XGI_INFO("Jong-xgi_ioctl_sarea_info \n"); - xgi_sarea_info(info, (struct xgi_sarea_info_s *)arg_copy); + xgi_sarea_info(info, (struct xgi_sarea_info *)arg_copy); break; case XGI_ESC_DUMP_REGISTER: XGI_INFO("Jong-xgi_ioctl_dump_register \n"); @@ -945,12 +945,12 @@ int xgi_kern_ioctl(struct inode *inode, struct file *filp, case XGI_ESC_DEBUG_INFO: XGI_INFO("Jong-xgi_ioctl_restore_registers \n"); xgi_restore_registers(info); - //xgi_write_pcie_mem(info, (struct xgi_mem_req_s *) arg_copy); - //xgi_read_pcie_mem(info, (struct xgi_mem_req_s *) arg_copy); + //xgi_write_pcie_mem(info, (struct xgi_mem_req *) arg_copy); + //xgi_read_pcie_mem(info, (struct xgi_mem_req *) arg_copy); break; case XGI_ESC_SUBMIT_CMDLIST: XGI_INFO("Jong-xgi_ioctl_submit_cmdlist \n"); - xgi_submit_cmdlist(info, (xgi_cmd_info_t *) arg_copy); + xgi_submit_cmdlist(info, (struct xgi_cmd_info *) arg_copy); break; case XGI_ESC_TEST_RWINKERNEL: XGI_INFO("Jong-xgi_test_rwinkernel \n"); @@ -958,11 +958,11 @@ int xgi_kern_ioctl(struct inode *inode, struct file *filp, break; case XGI_ESC_STATE_CHANGE: XGI_INFO("Jong-xgi_state_change \n"); - xgi_state_change(info, (xgi_state_info_t *) arg_copy); + xgi_state_change(info, (struct xgi_state_info *) arg_copy); break; case XGI_ESC_CPUID: XGI_INFO("Jong-XGI_ESC_CPUID \n"); - xgi_get_cpu_id((struct cpu_info_s *)arg_copy); + xgi_get_cpu_id((struct cpu_info *)arg_copy); break; default: XGI_INFO("Jong-xgi_ioctl_default \n"); @@ -985,7 +985,7 @@ int xgi_kern_ioctl(struct inode *inode, struct file *filp, */ int xgi_kern_ctl_open(struct inode *inode, struct file *filp) { - xgi_info_t *info = &xgi_ctl_device; + struct xgi_info *info = &xgi_ctl_device; int rc = 0; @@ -1011,7 +1011,7 @@ int xgi_kern_ctl_open(struct inode *inode, struct file *filp) int xgi_kern_ctl_close(struct inode *inode, struct file *filp) { - xgi_info_t *info = XGI_INFO_FROM_FP(filp); + struct xgi_info *info = XGI_INFO_FROM_FP(filp); XGI_INFO("Jong-xgi_kern_ctl_close\n"); @@ -1031,7 +1031,7 @@ int xgi_kern_ctl_close(struct inode *inode, struct file *filp) unsigned int xgi_kern_ctl_poll(struct file *filp, poll_table * wait) { - //xgi_info_t *info = XGI_INFO_FROM_FP(filp);; + //struct xgi_info *info = XGI_INFO_FROM_FP(filp);; unsigned int ret = 0; if (!(filp->f_flags & O_NONBLOCK)) { @@ -1073,7 +1073,7 @@ static u8 xgi_find_pcie_capability(struct pci_dev *dev) return 0; } -static struct pci_dev *xgi_get_pci_device(xgi_info_t * info) +static struct pci_dev *xgi_get_pci_device(struct xgi_info * info) { struct pci_dev *dev; @@ -1095,8 +1095,8 @@ int xgi_kern_read_card_info(char *page, char **start, off_t off, char *type; int len = 0; - xgi_info_t *info; - info = (xgi_info_t *) data; + struct xgi_info *info; + info = (struct xgi_info *) data; dev = xgi_get_pci_device(info); if (!dev) @@ -1143,8 +1143,8 @@ static void xgi_proc_create(void) struct proc_dir_entry *entry; struct proc_dir_entry *proc_xgi_pcie, *proc_xgi_cards; - xgi_info_t *info; - xgi_info_t *xgi_max_devices; + struct xgi_info *info; + struct xgi_info *xgi_max_devices; /* world readable directory */ int flags = S_IFDIR | S_IRUGO | S_IXUGO; @@ -1268,7 +1268,7 @@ static void xgi_proc_remove(void) */ irqreturn_t xgi_kern_isr(int irq, void *dev_id, struct pt_regs *regs) { - xgi_info_t *info = (xgi_info_t *) dev_id; + struct xgi_info *info = (struct xgi_info *) dev_id; u32 need_to_run_bottom_half = 0; //XGI_INFO("xgi_kern_isr \n"); @@ -1286,7 +1286,7 @@ irqreturn_t xgi_kern_isr(int irq, void *dev_id, struct pt_regs *regs) void xgi_kern_isr_bh(unsigned long data) { - xgi_info_t *info = (xgi_info_t *) data; + struct xgi_info *info = (struct xgi_info *) data; XGI_INFO("xgi_kern_isr_bh \n"); @@ -1295,7 +1295,7 @@ void xgi_kern_isr_bh(unsigned long data) XGI_CHECK_PCI_CONFIG(info); } -static void xgi_lock_init(xgi_info_t * info) +static void xgi_lock_init(struct xgi_info * info) { if (info == NULL) return; @@ -1309,7 +1309,7 @@ static void xgi_lock_init(xgi_info_t * info) XGI_ATOMIC_SET(info->use_count, 0); } -static void xgi_dev_init(xgi_info_t * info) +static void xgi_dev_init(struct xgi_info * info) { struct pci_dev *pdev = NULL; struct xgi_dev *dev; @@ -1354,7 +1354,7 @@ static void xgi_dev_init(xgi_info_t * info) static int __init xgi_init_module(void) { - xgi_info_t *info = &xgi_devices[xgi_num_devices]; + struct xgi_info *info = &xgi_devices[xgi_num_devices]; int i, result; XGI_INFO("Jong-xgi kernel driver %s initializing\n", XGI_DRV_VERSION); @@ -1421,7 +1421,7 @@ static int __init xgi_init_module(void) /* init the xgi control device */ { - xgi_info_t *info_ctl = &xgi_ctl_device; + struct xgi_info *info_ctl = &xgi_ctl_device; xgi_lock_init(info_ctl); } diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 5d76b632..32ee5e81 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -93,26 +93,26 @@ /* need a fake device number for control device; just to flag it for msgs */ #define XGI_CONTROL_DEVICE_NUMBER 100 -typedef struct { +struct xgi_aperture { U32 base; // pcie base is different from fb base U32 size; U8 *vbase; -} xgi_aperture_t; +}; -typedef struct xgi_screen_info_s { +struct xgi_screen_info { U32 scrn_start; U32 scrn_xres; U32 scrn_yres; U32 scrn_bpp; U32 scrn_pitch; -} xgi_screen_info_t; +}; -typedef struct xgi_sarea_info_s { +struct xgi_sarea_info { U32 bus_addr; U32 size; -} xgi_sarea_info_t; +}; -typedef struct xgi_info_s { +struct xgi_info { struct pci_dev *dev; int flags; int device_number; @@ -123,11 +123,11 @@ typedef struct xgi_info_s { U8 revision_id; /* physical characteristics */ - xgi_aperture_t mmio; - xgi_aperture_t fb; - xgi_aperture_t pcie; - xgi_screen_info_t scrn_info; - xgi_sarea_info_t sarea_info; + struct xgi_aperture mmio; + struct xgi_aperture fb; + struct xgi_aperture pcie; + struct xgi_screen_info scrn_info; + struct xgi_sarea_info sarea_info; /* look up table parameters */ U32 *lut_base; @@ -150,18 +150,18 @@ typedef struct xgi_info_s { struct semaphore info_sem; struct semaphore fb_sem; struct semaphore pcie_sem; -} xgi_info_t; +}; -typedef struct xgi_ioctl_post_vbios { +struct xgi_ioctl_post_vbios { U32 bus; U32 slot; -} xgi_ioctl_post_vbios_t; +}; -typedef enum xgi_mem_location_s { +enum xgi_mem_location { NON_LOCAL = 0, LOCAL = 1, INVALID = 0x7fffffff -} xgi_mem_location_t; +}; enum PcieOwner { PCIE_2D = 0, @@ -176,23 +176,23 @@ enum PcieOwner { PCIE_INVALID = 0x7fffffff }; -typedef struct xgi_mem_req_s { - xgi_mem_location_t location; +struct xgi_mem_req { + enum xgi_mem_location location; unsigned long size; unsigned long is_front; enum PcieOwner owner; unsigned long pid; -} xgi_mem_req_t; +}; -typedef struct xgi_mem_alloc_s { - xgi_mem_location_t location; +struct xgi_mem_alloc { + enum xgi_mem_location location; unsigned long size; unsigned long bus_addr; unsigned long hw_addr; unsigned long pid; -} xgi_mem_alloc_t; +}; -typedef struct xgi_chip_info_s { +struct xgi_chip_info { U32 device_id; char device_name[32]; U32 vendor_id; @@ -200,17 +200,17 @@ typedef struct xgi_chip_info_s { U32 fb_size; U32 sarea_bus_addr; U32 sarea_size; -} xgi_chip_info_t; +}; -typedef struct xgi_opengl_cmd_s { +struct xgi_opengl_cmd { U32 cmd; -} xgi_opengl_cmd_t; +}; -typedef struct xgi_mmio_info_s { - xgi_opengl_cmd_t cmd_head; +struct xgi_mmio_info { + struct xgi_opengl_cmd cmd_head; void *mmioBase; int size; -} xgi_mmio_info_t; +}; typedef enum { BTYPE_2D = 0, @@ -220,33 +220,33 @@ typedef enum { BTYPE_NONE = 0x7fffffff } BATCH_TYPE; -typedef struct xgi_cmd_info_s { +struct xgi_cmd_info { BATCH_TYPE _firstBeginType; U32 _firstBeginAddr; U32 _firstSize; U32 _curDebugID; U32 _lastBeginAddr; U32 _beginCount; -} xgi_cmd_info_t; +}; -typedef struct xgi_state_info_s { +struct xgi_state_info { U32 _fromState; U32 _toState; -} xgi_state_info_t; +}; -typedef struct cpu_info_s { +struct cpu_info { U32 _eax; U32 _ebx; U32 _ecx; U32 _edx; -} cpu_info_t; +}; -typedef struct xgi_mem_pid_s { +struct xgi_mem_pid { struct list_head list; - xgi_mem_location_t location; + enum xgi_mem_location location; unsigned long bus_addr; unsigned long pid; -} xgi_mem_pid_t; +}; /* * Ioctl definitions @@ -278,32 +278,32 @@ typedef struct xgi_mem_pid_s { #define XGI_ESC_CPUID (XGI_IOCTL_BASE + 20) #define XGI_ESC_MEM_COLLECT (XGI_IOCTL_BASE + 21) -#define XGI_IOCTL_DEVICE_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_DEVICE_INFO, xgi_chip_info_t) +#define XGI_IOCTL_DEVICE_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_DEVICE_INFO, struct xgi_chip_info) #define XGI_IOCTL_POST_VBIOS _IO(XGI_IOCTL_MAGIC, XGI_ESC_POST_VBIOS) #define XGI_IOCTL_FB_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_FB_INIT) -#define XGI_IOCTL_FB_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_FB_ALLOC, xgi_mem_req_t) +#define XGI_IOCTL_FB_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_FB_ALLOC, struct xgi_mem_req) #define XGI_IOCTL_FB_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_FB_FREE, unsigned long) #define XGI_IOCTL_PCIE_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_INIT) -#define XGI_IOCTL_PCIE_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_ALLOC, xgi_mem_req_t) +#define XGI_IOCTL_PCIE_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_ALLOC, struct xgi_mem_req) #define XGI_IOCTL_PCIE_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_FREE, unsigned long) -#define XGI_IOCTL_PUT_SCREEN_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PUT_SCREEN_INFO, xgi_screen_info_t) -#define XGI_IOCTL_GET_SCREEN_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_GET_SCREEN_INFO, xgi_screen_info_t) +#define XGI_IOCTL_PUT_SCREEN_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PUT_SCREEN_INFO, struct xgi_screen_info) +#define XGI_IOCTL_GET_SCREEN_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_GET_SCREEN_INFO, struct xgi_screen_info) #define XGI_IOCTL_GE_RESET _IO(XGI_IOCTL_MAGIC, XGI_ESC_GE_RESET) -#define XGI_IOCTL_SAREA_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_SAREA_INFO, xgi_sarea_info_t) +#define XGI_IOCTL_SAREA_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_SAREA_INFO, struct xgi_sarea_info) #define XGI_IOCTL_DUMP_REGISTER _IO(XGI_IOCTL_MAGIC, XGI_ESC_DUMP_REGISTER) #define XGI_IOCTL_DEBUG_INFO _IO(XGI_IOCTL_MAGIC, XGI_ESC_DEBUG_INFO) -#define XGI_IOCTL_MMIO_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_MMIO_INFO, xgi_mmio_info_t) +#define XGI_IOCTL_MMIO_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_MMIO_INFO, struct xgi_mmio_info) -#define XGI_IOCTL_SUBMIT_CMDLIST _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_SUBMIT_CMDLIST, xgi_cmd_info_t) +#define XGI_IOCTL_SUBMIT_CMDLIST _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_SUBMIT_CMDLIST, struct xgi_cmd_info) #define XGI_IOCTL_TEST_RWINKERNEL _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_TEST_RWINKERNEL, unsigned long) -#define XGI_IOCTL_STATE_CHANGE _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_STATE_CHANGE, xgi_state_info_t) +#define XGI_IOCTL_STATE_CHANGE _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_STATE_CHANGE, struct xgi_state_info) #define XGI_IOCTL_PCIE_CHECK _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_CHECK) -#define XGI_IOCTL_CPUID _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_CPUID, cpu_info_t) +#define XGI_IOCTL_CPUID _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_CPUID, struct cpu_info) #define XGI_IOCTL_MAXNR 30 /* @@ -338,28 +338,28 @@ typedef struct xgi_mem_pid_s { (((offset) >= (info)->pcie.base) \ && (((offset) + (length)) <= (info)->pcie.base + (info)->pcie.size)) -extern int xgi_fb_heap_init(xgi_info_t * info); -extern void xgi_fb_heap_cleanup(xgi_info_t * info); +extern int xgi_fb_heap_init(struct xgi_info * info); +extern void xgi_fb_heap_cleanup(struct xgi_info * info); -extern void xgi_fb_alloc(xgi_info_t * info, xgi_mem_req_t * req, - xgi_mem_alloc_t * alloc); -extern void xgi_fb_free(xgi_info_t * info, unsigned long offset); -extern void xgi_mem_collect(xgi_info_t * info, unsigned int *pcnt); +extern void xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_req * req, + struct xgi_mem_alloc * alloc); +extern void xgi_fb_free(struct xgi_info * info, unsigned long offset); +extern void xgi_mem_collect(struct xgi_info * info, unsigned int *pcnt); -extern int xgi_pcie_heap_init(xgi_info_t * info); -extern void xgi_pcie_heap_cleanup(xgi_info_t * info); +extern int xgi_pcie_heap_init(struct xgi_info * info); +extern void xgi_pcie_heap_cleanup(struct xgi_info * info); -extern void xgi_pcie_alloc(xgi_info_t * info, unsigned long size, - enum PcieOwner owner, xgi_mem_alloc_t * alloc); -extern void xgi_pcie_free(xgi_info_t * info, unsigned long offset); +extern void xgi_pcie_alloc(struct xgi_info * info, unsigned long size, + enum PcieOwner owner, struct xgi_mem_alloc * alloc); +extern void xgi_pcie_free(struct xgi_info * info, unsigned long offset); extern void xgi_pcie_heap_check(void); -extern struct xgi_pcie_block_s *xgi_find_pcie_block(xgi_info_t * info, +extern struct xgi_pcie_block *xgi_find_pcie_block(struct xgi_info * info, unsigned long address); -extern void *xgi_find_pcie_virt(xgi_info_t * info, unsigned long address); +extern void *xgi_find_pcie_virt(struct xgi_info * info, unsigned long address); -extern void xgi_read_pcie_mem(xgi_info_t * info, xgi_mem_req_t * req); -extern void xgi_write_pcie_mem(xgi_info_t * info, xgi_mem_req_t * req); +extern void xgi_read_pcie_mem(struct xgi_info * info, struct xgi_mem_req * req); +extern void xgi_write_pcie_mem(struct xgi_info * info, struct xgi_mem_req * req); -extern void xgi_test_rwinkernel(xgi_info_t * info, unsigned long address); +extern void xgi_test_rwinkernel(struct xgi_info * info, unsigned long address); #endif diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index fab99ae2..56cc589b 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -33,19 +33,19 @@ #define XGI_FB_HEAP_START 0x1000000 -static xgi_mem_heap_t *xgi_fb_heap; -static kmem_cache_t *xgi_fb_cache_block = NULL; +static struct xgi_mem_heap *xgi_fb_heap; +static struct kmem_cache *xgi_fb_cache_block = NULL; extern struct list_head xgi_mempid_list; -static xgi_mem_block_t *xgi_mem_new_node(void); -static xgi_mem_block_t *xgi_mem_alloc(xgi_info_t * info, unsigned long size); -static xgi_mem_block_t *xgi_mem_free(xgi_info_t * info, unsigned long offset); +static struct xgi_mem_block *xgi_mem_new_node(void); +static struct xgi_mem_block *xgi_mem_alloc(struct xgi_info * info, unsigned long size); +static struct xgi_mem_block *xgi_mem_free(struct xgi_info * info, unsigned long offset); -void xgi_fb_alloc(xgi_info_t * info, - xgi_mem_req_t * req, xgi_mem_alloc_t * alloc) +void xgi_fb_alloc(struct xgi_info * info, + struct xgi_mem_req * req, struct xgi_mem_alloc * alloc) { - xgi_mem_block_t *block; - xgi_mem_pid_t *mempid_block; + struct xgi_mem_block *block; + struct xgi_mem_pid *mempid_block; if (req->is_front) { alloc->location = LOCAL; @@ -74,7 +74,7 @@ void xgi_fb_alloc(xgi_info_t * info, /* manage mempid */ mempid_block = - kmalloc(sizeof(xgi_mem_pid_t), GFP_KERNEL); + kmalloc(sizeof(struct xgi_mem_pid), GFP_KERNEL); mempid_block->location = LOCAL; mempid_block->bus_addr = alloc->bus_addr; mempid_block->pid = alloc->pid; @@ -90,12 +90,12 @@ void xgi_fb_alloc(xgi_info_t * info, } } -void xgi_fb_free(xgi_info_t * info, unsigned long bus_addr) +void xgi_fb_free(struct xgi_info * info, unsigned long bus_addr) { - xgi_mem_block_t *block; + struct xgi_mem_block *block; unsigned long offset = bus_addr - info->fb.base; - xgi_mem_pid_t *mempid_block; - xgi_mem_pid_t *mempid_freeblock = NULL; + struct xgi_mem_pid *mempid_block; + struct xgi_mem_pid *mempid_freeblock = NULL; struct list_head *mempid_list; if (offset < 0) { @@ -114,7 +114,7 @@ void xgi_fb_free(xgi_info_t * info, unsigned long bus_addr) mempid_list = xgi_mempid_list.next; while (mempid_list != &xgi_mempid_list) { mempid_block = - list_entry(mempid_list, struct xgi_mem_pid_s, list); + list_entry(mempid_list, struct xgi_mem_pid, list); if (mempid_block->location == LOCAL && mempid_block->bus_addr == bus_addr) { mempid_freeblock = mempid_block; @@ -132,11 +132,11 @@ void xgi_fb_free(xgi_info_t * info, unsigned long bus_addr) } } -int xgi_fb_heap_init(xgi_info_t * info) +int xgi_fb_heap_init(struct xgi_info * info) { - xgi_mem_block_t *block; + struct xgi_mem_block *block; - xgi_fb_heap = kmalloc(sizeof(xgi_mem_heap_t), GFP_KERNEL); + xgi_fb_heap = kmalloc(sizeof(struct xgi_mem_heap), GFP_KERNEL); if (!xgi_fb_heap) { XGI_ERROR("xgi_fb_heap alloc failed\n"); return 0; @@ -147,7 +147,7 @@ int xgi_fb_heap_init(xgi_info_t * info) INIT_LIST_HEAD(&xgi_fb_heap->sort_list); xgi_fb_cache_block = - kmem_cache_create("xgi_fb_block", sizeof(xgi_mem_block_t), 0, + kmem_cache_create("xgi_fb_block", sizeof(struct xgi_mem_block), 0, SLAB_HWCACHE_ALIGN, NULL, NULL); if (NULL == xgi_fb_cache_block) { @@ -156,7 +156,7 @@ int xgi_fb_heap_init(xgi_info_t * info) } block = - (xgi_mem_block_t *) kmem_cache_alloc(xgi_fb_cache_block, + (struct xgi_mem_block *) kmem_cache_alloc(xgi_fb_cache_block, GFP_KERNEL); if (!block) { XGI_ERROR("kmem_cache_alloc failed\n"); @@ -190,10 +190,10 @@ int xgi_fb_heap_init(xgi_info_t * info) return 0; } -void xgi_fb_heap_cleanup(xgi_info_t * info) +void xgi_fb_heap_cleanup(struct xgi_info * info) { struct list_head *free_list, *temp; - xgi_mem_block_t *block; + struct xgi_mem_block *block; int i; if (xgi_fb_heap) { @@ -202,7 +202,7 @@ void xgi_fb_heap_cleanup(xgi_info_t * info) temp = free_list->next; while (temp != free_list) { block = - list_entry(temp, struct xgi_mem_block_s, + list_entry(temp, struct xgi_mem_block, list); temp = temp->next; @@ -225,12 +225,12 @@ void xgi_fb_heap_cleanup(xgi_info_t * info) } } -static xgi_mem_block_t *xgi_mem_new_node(void) +static struct xgi_mem_block *xgi_mem_new_node(void) { - xgi_mem_block_t *block; + struct xgi_mem_block *block; block = - (xgi_mem_block_t *) kmem_cache_alloc(xgi_fb_cache_block, + (struct xgi_mem_block *) kmem_cache_alloc(xgi_fb_cache_block, GFP_KERNEL); if (!block) { XGI_ERROR("kmem_cache_alloc failed\n"); @@ -241,23 +241,23 @@ static xgi_mem_block_t *xgi_mem_new_node(void) } #if 0 -static void xgi_mem_insert_node_after(xgi_mem_list_t * list, - xgi_mem_block_t * current, - xgi_mem_block_t * block); -static void xgi_mem_insert_node_before(xgi_mem_list_t * list, - xgi_mem_block_t * current, - xgi_mem_block_t * block); -static void xgi_mem_insert_node_head(xgi_mem_list_t * list, - xgi_mem_block_t * block); -static void xgi_mem_insert_node_tail(xgi_mem_list_t * list, - xgi_mem_block_t * block); -static void xgi_mem_delete_node(xgi_mem_list_t * list, xgi_mem_block_t * block); +static void xgi_mem_insert_node_after(struct xgi_mem_list * list, + struct xgi_mem_block * current, + struct xgi_mem_block * block); +static void xgi_mem_insert_node_before(struct xgi_mem_list * list, + struct xgi_mem_block * current, + struct xgi_mem_block * block); +static void xgi_mem_insert_node_head(struct xgi_mem_list * list, + struct xgi_mem_block * block); +static void xgi_mem_insert_node_tail(struct xgi_mem_list * list, + struct xgi_mem_block * block); +static void xgi_mem_delete_node(struct xgi_mem_list * list, struct xgi_mem_block * block); /* * insert node:block after node:current */ -static void xgi_mem_insert_node_after(xgi_mem_list_t * list, - xgi_mem_block_t * current, - xgi_mem_block_t * block) +static void xgi_mem_insert_node_after(struct xgi_mem_list * list, + struct xgi_mem_block * current, + struct xgi_mem_block * block) { block->prev = current; block->next = current->next; @@ -273,9 +273,9 @@ static void xgi_mem_insert_node_after(xgi_mem_list_t * list, /* * insert node:block before node:current */ -static void xgi_mem_insert_node_before(xgi_mem_list_t * list, - xgi_mem_block_t * current, - xgi_mem_block_t * block) +static void xgi_mem_insert_node_before(struct xgi_mem_list * list, + struct xgi_mem_block * current, + struct xgi_mem_block * block) { block->prev = current->prev; block->next = current; @@ -286,7 +286,7 @@ static void xgi_mem_insert_node_before(xgi_mem_list_t * list, block->prev->next = block; } } -void xgi_mem_insert_node_head(xgi_mem_list_t * list, xgi_mem_block_t * block) +void xgi_mem_insert_node_head(struct xgi_mem_list * list, struct xgi_mem_block * block) { block->next = list->head; block->prev = NULL; @@ -299,8 +299,8 @@ void xgi_mem_insert_node_head(xgi_mem_list_t * list, xgi_mem_block_t * block) list->head = block; } -static void xgi_mem_insert_node_tail(xgi_mem_list_t * list, - xgi_mem_block_t * block) +static void xgi_mem_insert_node_tail(struct xgi_mem_list * list, + struct xgi_mem_block * block) { block->next = NULL; block->prev = list->tail; @@ -312,7 +312,7 @@ static void xgi_mem_insert_node_tail(xgi_mem_list_t * list, list->tail = block; } -static void xgi_mem_delete_node(xgi_mem_list_t * list, xgi_mem_block_t * block) +static void xgi_mem_delete_node(struct xgi_mem_list * list, struct xgi_mem_block * block) { if (block == list->head) { list->head = block->next; @@ -331,11 +331,11 @@ static void xgi_mem_delete_node(xgi_mem_list_t * list, xgi_mem_block_t * block) block->next = block->prev = NULL; } #endif -static xgi_mem_block_t *xgi_mem_alloc(xgi_info_t * info, +static struct xgi_mem_block *xgi_mem_alloc(struct xgi_info * info, unsigned long originalSize) { struct list_head *free_list; - xgi_mem_block_t *block, *free_block, *used_block; + struct xgi_mem_block *block, *free_block, *used_block; unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK; @@ -358,7 +358,7 @@ static xgi_mem_block_t *xgi_mem_alloc(xgi_info_t * info, while (free_list != &xgi_fb_heap->free_list) { XGI_INFO("free_list: 0x%px \n", free_list); - block = list_entry(free_list, struct xgi_mem_block_s, list); + block = list_entry(free_list, struct xgi_mem_block, list); if (size <= block->size) { break; } @@ -406,18 +406,18 @@ static xgi_mem_block_t *xgi_mem_alloc(xgi_info_t * info, return (used_block); } -static xgi_mem_block_t *xgi_mem_free(xgi_info_t * info, unsigned long offset) +static struct xgi_mem_block *xgi_mem_free(struct xgi_info * info, unsigned long offset) { struct list_head *free_list, *used_list; - xgi_mem_block_t *used_block = NULL, *block = NULL; - xgi_mem_block_t *prev, *next; + struct xgi_mem_block *used_block = NULL, *block = NULL; + struct xgi_mem_block *prev, *next; unsigned long upper; unsigned long lower; used_list = xgi_fb_heap->used_list.next; while (used_list != &xgi_fb_heap->used_list) { - block = list_entry(used_list, struct xgi_mem_block_s, list); + block = list_entry(used_list, struct xgi_mem_block, list); if (block->offset == offset) { break; } @@ -441,7 +441,7 @@ static xgi_mem_block_t *xgi_mem_free(xgi_info_t * info, unsigned long offset) free_list = xgi_fb_heap->free_list.next; while (free_list != &xgi_fb_heap->free_list) { - block = list_entry(free_list, struct xgi_mem_block_s, list); + block = list_entry(free_list, struct xgi_mem_block, list); if (block->offset == upper) { next = block; diff --git a/linux-core/xgi_fb.h b/linux-core/xgi_fb.h index ae078ae0..363c8bc8 100644 --- a/linux-core/xgi_fb.h +++ b/linux-core/xgi_fb.h @@ -29,42 +29,19 @@ #ifndef _XGI_FB_H_ #define _XGI_FB_H_ -typedef struct xgi_mem_block_s { +struct xgi_mem_block { struct list_head list; unsigned long offset; unsigned long size; atomic_t use_count; -} xgi_mem_block_t; +}; -typedef struct xgi_mem_heap_s { +struct xgi_mem_heap { struct list_head free_list; struct list_head used_list; struct list_head sort_list; unsigned long max_freesize; spinlock_t lock; -} xgi_mem_heap_t; - -#if 0 -typedef struct xgi_mem_block_s { - struct xgi_mem_block_s *next; - struct xgi_mem_block_s *prev; - unsigned long offset; - unsigned long size; - atomic_t use_count; -} xgi_mem_block_t; - -typedef struct xgi_mem_list_s { - xgi_mem_block_t *head; - xgi_mem_block_t *tail; -} xgi_mem_list_t; - -typedef struct xgi_mem_heap_s { - xgi_mem_list_t *free_list; - xgi_mem_list_t *used_list; - xgi_mem_list_t *sort_list; - unsigned long max_freesize; - spinlock_t lock; -} xgi_mem_heap_t; -#endif +}; #endif diff --git a/linux-core/xgi_linux.h b/linux-core/xgi_linux.h index 465feb3c..2602b0f5 100644 --- a/linux-core/xgi_linux.h +++ b/linux-core/xgi_linux.h @@ -415,10 +415,10 @@ static inline pgprot_t pgprot_writecombined(pgprot_t old_prot) free_pages(ptr, order); \ } -typedef struct xgi_pte_s { +struct xgi_pte { unsigned long phys_addr; unsigned long virt_addr; -} xgi_pte_t; +}; /* * AMD Athlon processors expose a subtle bug in the Linux @@ -427,12 +427,12 @@ typedef struct xgi_pte_s { * 2.4.20 is the first kernel to address it properly. The * page_attr API provides the means to solve the problem. */ -static inline void XGI_SET_PAGE_ATTRIB_UNCACHED(xgi_pte_t * page_ptr) +static inline void XGI_SET_PAGE_ATTRIB_UNCACHED(struct xgi_pte * page_ptr) { struct page *page = virt_to_page(__va(page_ptr->phys_addr)); change_page_attr(page, 1, PAGE_KERNEL_NOCACHE); } -static inline void XGI_SET_PAGE_ATTRIB_CACHED(xgi_pte_t * page_ptr) +static inline void XGI_SET_PAGE_ATTRIB_CACHED(struct xgi_pte * page_ptr) { struct page *page = virt_to_page(__va(page_ptr->phys_addr)); change_page_attr(page, 1, PAGE_KERNEL); @@ -453,20 +453,16 @@ static inline void XGI_SET_PAGE_ATTRIB_CACHED(xgi_pte_t * page_ptr) #define XGILockPage(page) SetPageLocked(page) #define XGIUnlockPage(page) ClearPageLocked(page) -/* - * hide a pointer to struct xgi_info_t in a file-private info - */ - -typedef struct { - void *info; +struct xgi_file_private { + struct xgi_info *info; U32 num_events; spinlock_t fp_lock; wait_queue_head_t wait_queue; -} xgi_file_private_t; +}; #define FILE_PRIVATE(filp) ((filp)->private_data) -#define XGI_GET_FP(filp) ((xgi_file_private_t *) FILE_PRIVATE(filp)) +#define XGI_GET_FP(filp) ((struct xgi_file_private *) FILE_PRIVATE(filp)) /* for the card devices */ #define XGI_INFO_FROM_FP(filp) (XGI_GET_FP(filp)->info) diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 8d0e81b6..68c5ca20 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -32,7 +32,7 @@ #include "xgi_regs.h" #include "xgi_pcie.h" -void xgi_get_device_info(xgi_info_t * info, xgi_chip_info_t * req) +void xgi_get_device_info(struct xgi_info * info, struct xgi_chip_info * req) { req->device_id = info->device_id; req->device_name[0] = 'x'; @@ -46,13 +46,13 @@ void xgi_get_device_info(xgi_info_t * info, xgi_chip_info_t * req) req->sarea_size = info->sarea_info.size; } -void xgi_get_mmio_info(xgi_info_t * info, xgi_mmio_info_t * req) +void xgi_get_mmio_info(struct xgi_info * info, struct xgi_mmio_info * req) { req->mmioBase = (void *)info->mmio.base; req->size = info->mmio.size; } -void xgi_put_screen_info(xgi_info_t * info, xgi_screen_info_t * req) +void xgi_put_screen_info(struct xgi_info * info, struct xgi_screen_info * req) { info->scrn_info.scrn_start = req->scrn_start; info->scrn_info.scrn_xres = req->scrn_xres; @@ -71,7 +71,7 @@ void xgi_put_screen_info(xgi_info_t * info, xgi_screen_info_t * req) info->scrn_info.scrn_bpp, info->scrn_info.scrn_pitch); } -void xgi_get_screen_info(xgi_info_t * info, xgi_screen_info_t * req) +void xgi_get_screen_info(struct xgi_info * info, struct xgi_screen_info * req) { req->scrn_start = info->scrn_info.scrn_start; req->scrn_xres = info->scrn_info.scrn_xres; @@ -89,13 +89,13 @@ void xgi_get_screen_info(xgi_info_t * info, xgi_screen_info_t * req) req->scrn_yres, req->scrn_bpp, req->scrn_pitch); } -void xgi_ge_reset(xgi_info_t * info) +void xgi_ge_reset(struct xgi_info * info) { xgi_disable_ge(info); xgi_enable_ge(info); } -void xgi_sarea_info(xgi_info_t * info, xgi_sarea_info_t * req) +void xgi_sarea_info(struct xgi_info * info, struct xgi_sarea_info * req) { info->sarea_info.bus_addr = req->bus_addr; info->sarea_info.size = req->size; @@ -111,7 +111,7 @@ void xgi_sarea_info(xgi_info_t * info, xgi_sarea_info_t * req) static U32 s_invalid_begin = 0; -BOOL xgi_ge_irq_handler(xgi_info_t * info) +BOOL xgi_ge_irq_handler(struct xgi_info * info) { volatile U8 *mmio_vbase = info->mmio.vbase; volatile U32 *ge_3d_status = (volatile U32 *)(mmio_vbase + 0x2800); @@ -287,7 +287,7 @@ BOOL xgi_ge_irq_handler(xgi_info_t * info) return FALSE; } -BOOL xgi_crt_irq_handler(xgi_info_t * info) +BOOL xgi_crt_irq_handler(struct xgi_info * info) { BOOL ret = FALSE; U8 save_3ce = bReadReg(0x3ce); @@ -311,7 +311,7 @@ BOOL xgi_crt_irq_handler(xgi_info_t * info) return (ret); } -BOOL xgi_dvi_irq_handler(xgi_info_t * info) +BOOL xgi_dvi_irq_handler(struct xgi_info * info) { BOOL ret = FALSE; U8 save_3ce = bReadReg(0x3ce); @@ -344,7 +344,7 @@ BOOL xgi_dvi_irq_handler(xgi_info_t * info) return (ret); } -void xgi_dump_register(xgi_info_t * info) +void xgi_dump_register(struct xgi_info * info) { int i, j; unsigned char temp; @@ -518,13 +518,13 @@ void xgi_dump_register(xgi_info_t * info) } } -void xgi_restore_registers(xgi_info_t * info) +void xgi_restore_registers(struct xgi_info * info) { bOut3x5(0x13, 0); bOut3x5(0x8b, 2); } -void xgi_waitfor_pci_idle(xgi_info_t * info) +void xgi_waitfor_pci_idle(struct xgi_info * info) { #define WHOLD_GE_STATUS 0x2800 #define IDLE_MASK ~0x90200000 @@ -539,7 +539,7 @@ void xgi_waitfor_pci_idle(xgi_info_t * info) } } -int xgi_get_cpu_id(struct cpu_info_s *arg) +int xgi_get_cpu_id(struct cpu_info *arg) { int op = arg->_eax; __asm__("cpuid":"=a"(arg->_eax), @@ -554,9 +554,9 @@ int xgi_get_cpu_id(struct cpu_info_s *arg) /*memory collect function*/ extern struct list_head xgi_mempid_list; -void xgi_mem_collect(xgi_info_t * info, unsigned int *pcnt) +void xgi_mem_collect(struct xgi_info * info, unsigned int *pcnt) { - xgi_mem_pid_t *mempid_block; + struct xgi_mem_pid *mempid_block; struct list_head *mempid_list; struct task_struct *p, *find; unsigned int cnt = 0; @@ -565,7 +565,7 @@ void xgi_mem_collect(xgi_info_t * info, unsigned int *pcnt) while (mempid_list != &xgi_mempid_list) { mempid_block = - list_entry(mempid_list, struct xgi_mem_pid_s, list); + list_entry(mempid_list, struct xgi_mem_pid, list); mempid_list = mempid_list->next; find = NULL; diff --git a/linux-core/xgi_misc.h b/linux-core/xgi_misc.h index 37120aaa..0ebbe7e8 100644 --- a/linux-core/xgi_misc.h +++ b/linux-core/xgi_misc.h @@ -29,19 +29,19 @@ #ifndef _XGI_MISC_H_ #define _XGI_MISC_H_ -extern void xgi_dump_register(xgi_info_t * info); -extern void xgi_get_device_info(xgi_info_t * info, xgi_chip_info_t * req); -extern void xgi_get_mmio_info(xgi_info_t * info, xgi_mmio_info_t * req); -extern void xgi_get_screen_info(xgi_info_t * info, xgi_screen_info_t * req); -extern void xgi_put_screen_info(xgi_info_t * info, xgi_screen_info_t * req); -extern void xgi_ge_reset(xgi_info_t * info); -extern void xgi_sarea_info(xgi_info_t * info, xgi_sarea_info_t * req); -extern int xgi_get_cpu_id(struct cpu_info_s *arg); +extern void xgi_dump_register(struct xgi_info * info); +extern void xgi_get_device_info(struct xgi_info * info, struct xgi_chip_info * req); +extern void xgi_get_mmio_info(struct xgi_info * info, struct xgi_mmio_info * req); +extern void xgi_get_screen_info(struct xgi_info * info, struct xgi_screen_info * req); +extern void xgi_put_screen_info(struct xgi_info * info, struct xgi_screen_info * req); +extern void xgi_ge_reset(struct xgi_info * info); +extern void xgi_sarea_info(struct xgi_info * info, struct xgi_sarea_info * req); +extern int xgi_get_cpu_id(struct cpu_info *arg); -extern void xgi_restore_registers(xgi_info_t * info); -extern BOOL xgi_ge_irq_handler(xgi_info_t * info); -extern BOOL xgi_crt_irq_handler(xgi_info_t * info); -extern BOOL xgi_dvi_irq_handler(xgi_info_t * info); -extern void xgi_waitfor_pci_idle(xgi_info_t * info); +extern void xgi_restore_registers(struct xgi_info * info); +extern BOOL xgi_ge_irq_handler(struct xgi_info * info); +extern BOOL xgi_crt_irq_handler(struct xgi_info * info); +extern BOOL xgi_dvi_irq_handler(struct xgi_info * info); +extern void xgi_waitfor_pci_idle(struct xgi_info * info); #endif diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 1a4d8e12..a81dbe8b 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -33,11 +33,11 @@ #include "xgi_pcie.h" #include "xgi_misc.h" -static xgi_pcie_heap_t *xgi_pcie_heap = NULL; -static kmem_cache_t *xgi_pcie_cache_block = NULL; -static xgi_pcie_block_t *xgi_pcie_vertex_block = NULL; -static xgi_pcie_block_t *xgi_pcie_cmdlist_block = NULL; -static xgi_pcie_block_t *xgi_pcie_scratchpad_block = NULL; +static struct xgi_pcie_heap *xgi_pcie_heap = NULL; +static struct kmem_cache *xgi_pcie_cache_block = NULL; +static struct xgi_pcie_block *xgi_pcie_vertex_block = NULL; +static struct xgi_pcie_block *xgi_pcie_cmdlist_block = NULL; +static struct xgi_pcie_block *xgi_pcie_scratchpad_block = NULL; extern struct list_head xgi_mempid_list; static unsigned long xgi_pcie_lut_alloc(unsigned long page_order) @@ -85,7 +85,7 @@ static void xgi_pcie_lut_free(unsigned long page_addr, unsigned long page_order) free_pages(page_addr, page_order); } -static int xgi_pcie_lut_init(xgi_info_t * info) +static int xgi_pcie_lut_init(struct xgi_info * info) { unsigned char *page_addr = NULL; unsigned long pciePageCount, lutEntryNum, lutPageCount, lutPageOrder; @@ -214,7 +214,7 @@ static int xgi_pcie_lut_init(xgi_info_t * info) return 0; } -static void xgi_pcie_lut_cleanup(xgi_info_t * info) +static void xgi_pcie_lut_cleanup(struct xgi_info * info) { if (info->lut_base) { XGI_INFO("info->lut_base: 0x%p info->lutPageOrder: 0x%lx \n", @@ -225,10 +225,10 @@ static void xgi_pcie_lut_cleanup(xgi_info_t * info) } } -static xgi_pcie_block_t *xgi_pcie_new_node(void) +static struct xgi_pcie_block *xgi_pcie_new_node(void) { - xgi_pcie_block_t *block = - (xgi_pcie_block_t *) kmem_cache_alloc(xgi_pcie_cache_block, + struct xgi_pcie_block *block = + (struct xgi_pcie_block *) kmem_cache_alloc(xgi_pcie_cache_block, GFP_KERNEL); if (block == NULL) { return NULL; @@ -247,11 +247,11 @@ static xgi_pcie_block_t *xgi_pcie_new_node(void) return block; } -static void xgi_pcie_block_stuff_free(xgi_pcie_block_t * block) +static void xgi_pcie_block_stuff_free(struct xgi_pcie_block * block) { struct page *page; - xgi_page_block_t *page_block = block->page_block; - xgi_page_block_t *free_block; + struct xgi_page_block *page_block = block->page_block; + struct xgi_page_block *free_block; unsigned long page_count = 0; int i; @@ -285,9 +285,9 @@ static void xgi_pcie_block_stuff_free(xgi_pcie_block_t * block) } } -int xgi_pcie_heap_init(xgi_info_t * info) +int xgi_pcie_heap_init(struct xgi_info * info) { - xgi_pcie_block_t *block; + struct xgi_pcie_block *block; if (!xgi_pcie_lut_init(info)) { XGI_ERROR("xgi_pcie_lut_init failed\n"); @@ -295,7 +295,7 @@ int xgi_pcie_heap_init(xgi_info_t * info) } xgi_pcie_heap = - (xgi_pcie_heap_t *) kmalloc(sizeof(xgi_pcie_heap_t), GFP_KERNEL); + (struct xgi_pcie_heap *) kmalloc(sizeof(struct xgi_pcie_heap), GFP_KERNEL); if (!xgi_pcie_heap) { XGI_ERROR("xgi_pcie_heap alloc failed\n"); goto fail1; @@ -307,7 +307,7 @@ int xgi_pcie_heap_init(xgi_info_t * info) xgi_pcie_heap->max_freesize = info->pcie.size; xgi_pcie_cache_block = - kmem_cache_create("xgi_pcie_block", sizeof(xgi_pcie_block_t), 0, + kmem_cache_create("xgi_pcie_block", sizeof(struct xgi_pcie_block), 0, SLAB_HWCACHE_ALIGN, NULL, NULL); if (NULL == xgi_pcie_cache_block) { @@ -315,7 +315,7 @@ int xgi_pcie_heap_init(xgi_info_t * info) goto fail2; } - block = (xgi_pcie_block_t *) xgi_pcie_new_node(); + block = (struct xgi_pcie_block *) xgi_pcie_new_node(); if (!block) { XGI_ERROR("xgi_pcie_new_node failed\n"); goto fail3; @@ -348,7 +348,7 @@ int xgi_pcie_heap_init(xgi_info_t * info) void xgi_pcie_heap_check(void) { struct list_head *useList, *temp; - xgi_pcie_block_t *block; + struct xgi_pcie_block *block; unsigned int ownerIndex; #ifdef XGI_DEBUG char *ownerStr[6] = @@ -360,7 +360,7 @@ void xgi_pcie_heap_check(void) temp = useList->next; XGI_INFO("pcie freemax = 0x%lx\n", xgi_pcie_heap->max_freesize); while (temp != useList) { - block = list_entry(temp, struct xgi_pcie_block_s, list); + block = list_entry(temp, struct xgi_pcie_block, list); if (block->owner == PCIE_2D) ownerIndex = 0; else if (block->owner > PCIE_3D_TEXTURE @@ -378,10 +378,10 @@ void xgi_pcie_heap_check(void) } } -void xgi_pcie_heap_cleanup(xgi_info_t * info) +void xgi_pcie_heap_cleanup(struct xgi_info * info) { struct list_head *free_list, *temp; - xgi_pcie_block_t *block; + struct xgi_pcie_block *block; int j; xgi_pcie_lut_cleanup(info); @@ -394,7 +394,7 @@ void xgi_pcie_heap_cleanup(xgi_info_t * info) while (temp != free_list) { block = - list_entry(temp, struct xgi_pcie_block_s, + list_entry(temp, struct xgi_pcie_block, list); XGI_INFO ("No. %d block->offset: 0x%lx block->size: 0x%lx \n", @@ -421,13 +421,13 @@ void xgi_pcie_heap_cleanup(xgi_info_t * info) } } -static xgi_pcie_block_t *xgi_pcie_mem_alloc(xgi_info_t * info, +static struct xgi_pcie_block *xgi_pcie_mem_alloc(struct xgi_info * info, unsigned long originalSize, enum PcieOwner owner) { struct list_head *free_list; - xgi_pcie_block_t *block, *used_block, *free_block; - xgi_page_block_t *page_block, *prev_page_block; + struct xgi_pcie_block *block, *used_block, *free_block; + struct xgi_page_block *page_block, *prev_page_block; struct page *page; unsigned long page_order = 0, count = 0, index = 0; unsigned long page_addr = 0; @@ -482,7 +482,7 @@ static xgi_pcie_block_t *xgi_pcie_mem_alloc(xgi_info_t * info, free_list = xgi_pcie_heap->free_list.next; while (free_list != &xgi_pcie_heap->free_list) { //XGI_INFO("free_list: 0x%px \n", free_list); - block = list_entry(free_list, struct xgi_pcie_block_s, list); + block = list_entry(free_list, struct xgi_pcie_block, list); if (size <= block->size) { break; } @@ -543,12 +543,12 @@ static xgi_pcie_block_t *xgi_pcie_mem_alloc(xgi_info_t * info, used_block->page_order); used_block->page_block = NULL; - //used_block->page_block = (xgi_pages_block_t *)kmalloc(sizeof(xgi_pages_block_t), GFP_KERNEL); - //if (!used_block->page_block) return NULL; + //used_block->page_block = (struct xgi_pages_block *)kmalloc(sizeof(struct xgi_pages_block), GFP_KERNEL); + //if (!used_block->page_block) return NULL;_t //used_block->page_block->next = NULL; used_block->page_table = - (xgi_pte_t *) kmalloc(sizeof(xgi_pte_t) * used_block->page_count, + (struct xgi_pte *) kmalloc(sizeof(struct xgi_pte) * used_block->page_count, GFP_KERNEL); if (used_block->page_table == NULL) { goto fail; @@ -595,8 +595,8 @@ static xgi_pcie_block_t *xgi_pcie_mem_alloc(xgi_info_t * info, if (page_block == NULL) { page_block = - (xgi_page_block_t *) - kmalloc(sizeof(xgi_page_block_t), GFP_KERNEL); + (struct xgi_page_block *) + kmalloc(sizeof(struct xgi_page_block), GFP_KERNEL); if (!page_block) { XGI_ERROR ("Can't get memory for page_block! \n"); @@ -697,17 +697,17 @@ static xgi_pcie_block_t *xgi_pcie_mem_alloc(xgi_info_t * info, return NULL; } -static xgi_pcie_block_t *xgi_pcie_mem_free(xgi_info_t * info, +static struct xgi_pcie_block *xgi_pcie_mem_free(struct xgi_info * info, unsigned long offset) { struct list_head *free_list, *used_list; - xgi_pcie_block_t *used_block, *block = NULL; - xgi_pcie_block_t *prev, *next; + struct xgi_pcie_block *used_block, *block = NULL; + struct xgi_pcie_block *prev, *next; unsigned long upper, lower; used_list = xgi_pcie_heap->used_list.next; while (used_list != &xgi_pcie_heap->used_list) { - block = list_entry(used_list, struct xgi_pcie_block_s, list); + block = list_entry(used_list, struct xgi_pcie_block, list); if (block->offset == offset) { break; } @@ -737,7 +737,7 @@ static xgi_pcie_block_t *xgi_pcie_mem_free(xgi_info_t * info, free_list = xgi_pcie_heap->free_list.next; while (free_list != &xgi_pcie_heap->free_list) { - block = list_entry(free_list, struct xgi_pcie_block_s, list); + block = list_entry(free_list, struct xgi_pcie_block, list); if (block->offset == upper) { next = block; } else if ((block->offset + block->size) == lower) { @@ -787,11 +787,11 @@ static xgi_pcie_block_t *xgi_pcie_mem_free(xgi_info_t * info, return (used_block); } -void xgi_pcie_alloc(xgi_info_t * info, unsigned long size, - enum PcieOwner owner, xgi_mem_alloc_t * alloc) +void xgi_pcie_alloc(struct xgi_info * info, unsigned long size, + enum PcieOwner owner, struct xgi_mem_alloc * alloc) { - xgi_pcie_block_t *block; - xgi_mem_pid_t *mempid_block; + struct xgi_pcie_block *block; + struct xgi_mem_pid *mempid_block; xgi_down(info->pcie_sem); block = xgi_pcie_mem_alloc(info, size, owner); @@ -819,7 +819,7 @@ void xgi_pcie_alloc(xgi_info_t * info, unsigned long size, */ if (owner == PCIE_3D || owner == PCIE_3D_TEXTURE) { mempid_block = - kmalloc(sizeof(xgi_mem_pid_t), GFP_KERNEL); + kmalloc(sizeof(struct xgi_mem_pid), GFP_KERNEL); if (!mempid_block) XGI_ERROR("mempid_block alloc failed\n"); mempid_block->location = NON_LOCAL; @@ -837,12 +837,12 @@ void xgi_pcie_alloc(xgi_info_t * info, unsigned long size, } } -void xgi_pcie_free(xgi_info_t * info, unsigned long bus_addr) +void xgi_pcie_free(struct xgi_info * info, unsigned long bus_addr) { - xgi_pcie_block_t *block; + struct xgi_pcie_block *block; unsigned long offset = bus_addr - info->pcie.base; - xgi_mem_pid_t *mempid_block; - xgi_mem_pid_t *mempid_freeblock = NULL; + struct xgi_mem_pid *mempid_block; + struct xgi_mem_pid *mempid_freeblock = NULL; struct list_head *mempid_list; char isvertex = 0; int processcnt; @@ -857,7 +857,7 @@ void xgi_pcie_free(xgi_info_t * info, unsigned long bus_addr) mempid_list = xgi_mempid_list.next; while (mempid_list != &xgi_mempid_list) { mempid_block = - list_entry(mempid_list, struct xgi_mem_pid_s, list); + list_entry(mempid_list, struct xgi_mem_pid, list); if (mempid_block->location == NON_LOCAL && mempid_block->bus_addr == 0xFFFFFFFF) { ++processcnt; @@ -884,7 +884,7 @@ void xgi_pcie_free(xgi_info_t * info, unsigned long bus_addr) mempid_list = xgi_mempid_list.next; while (mempid_list != &xgi_mempid_list) { mempid_block = - list_entry(mempid_list, struct xgi_mem_pid_s, list); + list_entry(mempid_list, struct xgi_mem_pid, list); if (mempid_block->location == NON_LOCAL && ((isvertex && mempid_block->bus_addr == 0xFFFFFFFF) || (!isvertex && mempid_block->bus_addr == bus_addr))) { @@ -906,17 +906,17 @@ void xgi_pcie_free(xgi_info_t * info, unsigned long bus_addr) * given a bus address, fid the pcie mem block * uses the bus address as the key. */ -struct xgi_pcie_block_s *xgi_find_pcie_block(xgi_info_t * info, - unsigned long address) +struct xgi_pcie_block *xgi_find_pcie_block(struct xgi_info * info, + unsigned long address) { struct list_head *used_list; - xgi_pcie_block_t *block; + struct xgi_pcie_block *block; int i; used_list = xgi_pcie_heap->used_list.next; while (used_list != &xgi_pcie_heap->used_list) { - block = list_entry(used_list, struct xgi_pcie_block_s, list); + block = list_entry(used_list, struct xgi_pcie_block, list); if (block->bus_addr == address) { return block; @@ -946,7 +946,7 @@ struct xgi_pcie_block_s *xgi_find_pcie_block(xgi_info_t * info, * Returns CPU virtual address. Assumes the CPU VAddr is continuous in not * the same block */ -void *xgi_find_pcie_virt(xgi_info_t * info, unsigned long address) +void *xgi_find_pcie_virt(struct xgi_info * info, unsigned long address) { struct list_head *used_list = xgi_pcie_heap->used_list.next; const unsigned long offset_in_page = address & (PAGE_SIZE - 1); @@ -956,8 +956,8 @@ void *xgi_find_pcie_virt(xgi_info_t * info, unsigned long address) used_list, address, PAGE_SIZE - 1, offset_in_page); while (used_list != &xgi_pcie_heap->used_list) { - xgi_pcie_block_t *block = - list_entry(used_list, struct xgi_pcie_block_s, list); + struct xgi_pcie_block *block = + list_entry(used_list, struct xgi_pcie_block, list); XGI_INFO("block = 0x%p (hw_addr = 0x%lx, size=%lu)\n", block, block->hw_addr, block->size); @@ -987,19 +987,19 @@ void *xgi_find_pcie_virt(xgi_info_t * info, unsigned long address) return NULL; } -void xgi_read_pcie_mem(xgi_info_t * info, xgi_mem_req_t * req) +void xgi_read_pcie_mem(struct xgi_info * info, struct xgi_mem_req * req) { } -void xgi_write_pcie_mem(xgi_info_t * info, xgi_mem_req_t * req) +void xgi_write_pcie_mem(struct xgi_info * info, struct xgi_mem_req * req) { } /* address -- GE hw address */ -void xgi_test_rwinkernel(xgi_info_t * info, unsigned long address) +void xgi_test_rwinkernel(struct xgi_info * info, unsigned long address) { unsigned long *virtaddr = 0; if (address == 0) { diff --git a/linux-core/xgi_pcie.h b/linux-core/xgi_pcie.h index 6e8e45b9..b66d6a28 100644 --- a/linux-core/xgi_pcie.h +++ b/linux-core/xgi_pcie.h @@ -33,15 +33,15 @@ #define XGI_PCIE_ALLOC_MAX_ORDER 1 /* 8K in Kernel 2.4.* */ #endif -typedef struct xgi_page_block_s { - struct xgi_page_block_s *next; +struct xgi_page_block { + struct xgi_page_block *next; unsigned long phys_addr; unsigned long virt_addr; unsigned long page_count; unsigned long page_order; -} xgi_page_block_t; +}; -typedef struct xgi_pcie_block_s { +struct xgi_pcie_block { struct list_head list; unsigned long offset; /* block's offset in pcie memory, begin from 0 */ unsigned long size; /* The block size. */ @@ -50,19 +50,19 @@ typedef struct xgi_pcie_block_s { unsigned long page_count; unsigned long page_order; - xgi_page_block_t *page_block; - xgi_pte_t *page_table; /* list of physical pages allocated */ + struct xgi_page_block *page_block; + struct xgi_pte *page_table; /* list of physical pages allocated */ atomic_t use_count; enum PcieOwner owner; unsigned long processID; -} xgi_pcie_block_t; +}; -typedef struct xgi_pcie_heap_s { +struct xgi_pcie_heap { struct list_head free_list; struct list_head used_list; struct list_head sort_list; unsigned long max_freesize; -} xgi_pcie_heap_t; +}; #endif diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h index 487a7e15..0e54e7d8 100644 --- a/linux-core/xgi_regs.h +++ b/linux-core/xgi_regs.h @@ -50,25 +50,25 @@ #endif /* Hardware access functions */ -static inline void OUT3C5B(xgi_info_t * info, u8 index, u8 data) +static inline void OUT3C5B(struct xgi_info * info, u8 index, u8 data) { OUTB(0x3C4, index); OUTB(0x3C5, data); } -static inline void OUT3X5B(xgi_info_t * info, u8 index, u8 data) +static inline void OUT3X5B(struct xgi_info * info, u8 index, u8 data) { OUTB(0x3D4, index); OUTB(0x3D5, data); } -static inline void OUT3CFB(xgi_info_t * info, u8 index, u8 data) +static inline void OUT3CFB(struct xgi_info * info, u8 index, u8 data) { OUTB(0x3CE, index); OUTB(0x3CF, data); } -static inline u8 IN3C5B(xgi_info_t * info, u8 index) +static inline u8 IN3C5B(struct xgi_info * info, u8 index) { volatile u8 data = 0; OUTB(0x3C4, index); @@ -76,7 +76,7 @@ static inline u8 IN3C5B(xgi_info_t * info, u8 index) return data; } -static inline u8 IN3X5B(xgi_info_t * info, u8 index) +static inline u8 IN3X5B(struct xgi_info * info, u8 index) { volatile u8 data = 0; OUTB(0x3D4, index); @@ -84,7 +84,7 @@ static inline u8 IN3X5B(xgi_info_t * info, u8 index) return data; } -static inline u8 IN3CFB(xgi_info_t * info, u8 index) +static inline u8 IN3CFB(struct xgi_info * info, u8 index) { volatile u8 data = 0; OUTB(0x3CE, index); @@ -92,25 +92,25 @@ static inline u8 IN3CFB(xgi_info_t * info, u8 index) return data; } -static inline void OUT3C5W(xgi_info_t * info, u8 index, u16 data) +static inline void OUT3C5W(struct xgi_info * info, u8 index, u16 data) { OUTB(0x3C4, index); OUTB(0x3C5, data); } -static inline void OUT3X5W(xgi_info_t * info, u8 index, u16 data) +static inline void OUT3X5W(struct xgi_info * info, u8 index, u16 data) { OUTB(0x3D4, index); OUTB(0x3D5, data); } -static inline void OUT3CFW(xgi_info_t * info, u8 index, u8 data) +static inline void OUT3CFW(struct xgi_info * info, u8 index, u8 data) { OUTB(0x3CE, index); OUTB(0x3CF, data); } -static inline u8 IN3C5W(xgi_info_t * info, u8 index) +static inline u8 IN3C5W(struct xgi_info * info, u8 index) { volatile u8 data = 0; OUTB(0x3C4, index); @@ -118,7 +118,7 @@ static inline u8 IN3C5W(xgi_info_t * info, u8 index) return data; } -static inline u8 IN3X5W(xgi_info_t * info, u8 index) +static inline u8 IN3X5W(struct xgi_info * info, u8 index) { volatile u8 data = 0; OUTB(0x3D4, index); @@ -126,7 +126,7 @@ static inline u8 IN3X5W(xgi_info_t * info, u8 index) return data; } -static inline u8 IN3CFW(xgi_info_t * info, u8 index) +static inline u8 IN3CFW(struct xgi_info * info, u8 index) { volatile u8 data = 0; OUTB(0x3CE, index); @@ -134,14 +134,14 @@ static inline u8 IN3CFW(xgi_info_t * info, u8 index) return data; } -static inline u8 readAttr(xgi_info_t * info, u8 index) +static inline u8 readAttr(struct xgi_info * info, u8 index) { INB(0x3DA); /* flip-flop to index */ OUTB(0x3C0, index); return INB(0x3C1); } -static inline void writeAttr(xgi_info_t * info, u8 index, u8 value) +static inline void writeAttr(struct xgi_info * info, u8 index, u8 value) { INB(0x3DA); /* flip-flop to index */ OUTB(0x3C0, index); @@ -151,7 +151,7 @@ static inline void writeAttr(xgi_info_t * info, u8 index, u8 value) /* * Graphic engine register (2d/3d) acessing interface */ -static inline void WriteRegDWord(xgi_info_t * info, u32 addr, u32 data) +static inline void WriteRegDWord(struct xgi_info * info, u32 addr, u32 data) { /* Jong 05/25/2006 */ XGI_INFO("Jong-WriteRegDWord()-Begin \n"); @@ -165,31 +165,31 @@ static inline void WriteRegDWord(xgi_info_t * info, u32 addr, u32 data) XGI_INFO("Jong-WriteRegDWord()-End \n"); } -static inline void WriteRegWord(xgi_info_t * info, u32 addr, u16 data) +static inline void WriteRegWord(struct xgi_info * info, u32 addr, u16 data) { *(volatile u16 *)(info->mmio.vbase + addr) = (data); } -static inline void WriteRegByte(xgi_info_t * info, u32 addr, u8 data) +static inline void WriteRegByte(struct xgi_info * info, u32 addr, u8 data) { *(volatile u8 *)(info->mmio.vbase + addr) = (data); } -static inline u32 ReadRegDWord(xgi_info_t * info, u32 addr) +static inline u32 ReadRegDWord(struct xgi_info * info, u32 addr) { volatile u32 data; data = *(volatile u32 *)(info->mmio.vbase + addr); return data; } -static inline u16 ReadRegWord(xgi_info_t * info, u32 addr) +static inline u16 ReadRegWord(struct xgi_info * info, u32 addr) { volatile u16 data; data = *(volatile u16 *)(info->mmio.vbase + addr); return data; } -static inline u8 ReadRegByte(xgi_info_t * info, u32 addr) +static inline u8 ReadRegByte(struct xgi_info * info, u32 addr) { volatile u8 data; data = *(volatile u8 *)(info->mmio.vbase + addr); @@ -197,25 +197,25 @@ static inline u8 ReadRegByte(xgi_info_t * info, u32 addr) } #if 0 -extern void OUT3C5B(xgi_info_t * info, u8 index, u8 data); -extern void OUT3X5B(xgi_info_t * info, u8 index, u8 data); -extern void OUT3CFB(xgi_info_t * info, u8 index, u8 data); -extern u8 IN3C5B(xgi_info_t * info, u8 index); -extern u8 IN3X5B(xgi_info_t * info, u8 index); -extern u8 IN3CFB(xgi_info_t * info, u8 index); -extern void OUT3C5W(xgi_info_t * info, u8 index, u8 data); -extern void OUT3X5W(xgi_info_t * info, u8 index, u8 data); -extern void OUT3CFW(xgi_info_t * info, u8 index, u8 data); -extern u8 IN3C5W(xgi_info_t * info, u8 index); -extern u8 IN3X5W(xgi_info_t * info, u8 index); -extern u8 IN3CFW(xgi_info_t * info, u8 index); - -extern void WriteRegDWord(xgi_info_t * info, u32 addr, u32 data); -extern void WriteRegWord(xgi_info_t * info, u32 addr, u16 data); -extern void WriteRegByte(xgi_info_t * info, u32 addr, u8 data); -extern u32 ReadRegDWord(xgi_info_t * info, u32 addr); -extern u16 ReadRegWord(xgi_info_t * info, u32 addr); -extern u8 ReadRegByte(xgi_info_t * info, u32 addr); +extern void OUT3C5B(struct xgi_info * info, u8 index, u8 data); +extern void OUT3X5B(struct xgi_info * info, u8 index, u8 data); +extern void OUT3CFB(struct xgi_info * info, u8 index, u8 data); +extern u8 IN3C5B(struct xgi_info * info, u8 index); +extern u8 IN3X5B(struct xgi_info * info, u8 index); +extern u8 IN3CFB(struct xgi_info * info, u8 index); +extern void OUT3C5W(struct xgi_info * info, u8 index, u8 data); +extern void OUT3X5W(struct xgi_info * info, u8 index, u8 data); +extern void OUT3CFW(struct xgi_info * info, u8 index, u8 data); +extern u8 IN3C5W(struct xgi_info * info, u8 index); +extern u8 IN3X5W(struct xgi_info * info, u8 index); +extern u8 IN3CFW(struct xgi_info * info, u8 index); + +extern void WriteRegDWord(struct xgi_info * info, u32 addr, u32 data); +extern void WriteRegWord(struct xgi_info * info, u32 addr, u16 data); +extern void WriteRegByte(struct xgi_info * info, u32 addr, u8 data); +extern u32 ReadRegDWord(struct xgi_info * info, u32 addr); +extern u16 ReadRegWord(struct xgi_info * info, u32 addr); +extern u8 ReadRegByte(struct xgi_info * info, u32 addr); extern void EnableProtect(); extern void DisableProtect(); @@ -262,19 +262,19 @@ extern void DisableProtect(); #define wReadReg(addr) ReadRegWord(info, addr) #define bReadReg(addr) ReadRegByte(info, addr) -static inline void xgi_protect_all(xgi_info_t * info) +static inline void xgi_protect_all(struct xgi_info * info) { OUTB(0x3C4, 0x11); OUTB(0x3C5, 0x92); } -static inline void xgi_unprotect_all(xgi_info_t * info) +static inline void xgi_unprotect_all(struct xgi_info * info) { OUTB(0x3C4, 0x11); OUTB(0x3C5, 0x92); } -static inline void xgi_enable_mmio(xgi_info_t * info) +static inline void xgi_enable_mmio(struct xgi_info * info) { u8 protect = 0; @@ -294,7 +294,7 @@ static inline void xgi_enable_mmio(xgi_info_t * info) OUTB(0x3C5, protect); } -static inline void xgi_disable_mmio(xgi_info_t * info) +static inline void xgi_disable_mmio(struct xgi_info * info) { u8 protect = 0; @@ -312,7 +312,7 @@ static inline void xgi_disable_mmio(xgi_info_t * info) outb(protect, 0x3C5); } -static inline void xgi_enable_ge(xgi_info_t * info) +static inline void xgi_enable_ge(struct xgi_info * info) { unsigned char bOld3cf2a = 0; int wait = 0; @@ -350,7 +350,7 @@ static inline void xgi_enable_ge(xgi_info_t * info) bOut3cf(0x2a, bOld3cf2a); } -static inline void xgi_disable_ge(xgi_info_t * info) +static inline void xgi_disable_ge(struct xgi_info * info) { int wait = 0; @@ -378,25 +378,25 @@ static inline void xgi_disable_ge(xgi_info_t * info) bOut3x5(0x36, 0); } -static inline void xgi_enable_dvi_interrupt(xgi_info_t * info) +static inline void xgi_enable_dvi_interrupt(struct xgi_info * info) { Out3cf(0x39, In3cf(0x39) & ~0x01); //Set 3cf.39 bit 0 to 0 Out3cf(0x39, In3cf(0x39) | 0x01); //Set 3cf.39 bit 0 to 1 Out3cf(0x39, In3cf(0x39) | 0x02); } -static inline void xgi_disable_dvi_interrupt(xgi_info_t * info) +static inline void xgi_disable_dvi_interrupt(struct xgi_info * info) { Out3cf(0x39, In3cf(0x39) & ~0x02); } -static inline void xgi_enable_crt1_interrupt(xgi_info_t * info) +static inline void xgi_enable_crt1_interrupt(struct xgi_info * info) { Out3cf(0x3d, In3cf(0x3d) | 0x04); Out3cf(0x3d, In3cf(0x3d) & ~0x04); Out3cf(0x3d, In3cf(0x3d) | 0x08); } -static inline void xgi_disable_crt1_interrupt(xgi_info_t * info) +static inline void xgi_disable_crt1_interrupt(struct xgi_info * info) { Out3cf(0x3d, In3cf(0x3d) & ~0x08); } -- cgit v1.2.3 From ba3173fa39e236eee9ce9abb60f1151492378811 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 29 Jun 2007 16:35:36 -0700 Subject: Eliminate unused integer and float typedefs. --- linux-core/xgi_types.h | 10 ---------- 1 file changed, 10 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_types.h b/linux-core/xgi_types.h index 65ec498b..89804667 100644 --- a/linux-core/xgi_types.h +++ b/linux-core/xgi_types.h @@ -33,27 +33,17 @@ * Typedefs * ***************************************************************************/ -typedef unsigned char V8; /* "void": enumerated or multiple fields */ -typedef unsigned short V16; /* "void": enumerated or multiple fields */ typedef unsigned char U8; /* 0 to 255 */ typedef unsigned short U16; /* 0 to 65535 */ -typedef signed char S8; /* -128 to 127 */ -typedef signed short S16; /* -32768 to 32767 */ -typedef float F32; /* IEEE Single Precision (S1E8M23) */ -typedef double F64; /* IEEE Double Precision (S1E11M52) */ typedef unsigned long BOOL; /* * mainly for 64-bit linux, where long is 64 bits * and win9x, where int is 16 bit. */ #if defined(vxworks) -typedef unsigned int V32; /* "void": enumerated or multiple fields */ typedef unsigned int U32; /* 0 to 4294967295 */ -typedef signed int S32; /* -2147483648 to 2147483647 */ #else -typedef unsigned long V32; /* "void": enumerated or multiple fields */ typedef unsigned long U32; /* 0 to 4294967295 */ -typedef signed long S32; /* -2147483648 to 2147483647 */ #endif #ifndef TRUE -- cgit v1.2.3 From 5da2a3c2d488983efed6f8433a304096e2bb75e8 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 29 Jun 2007 16:37:01 -0700 Subject: Replace BOOL with bool. --- linux-core/xgi_misc.c | 14 +++++++------- linux-core/xgi_misc.h | 6 +++--- linux-core/xgi_types.h | 1 - 3 files changed, 10 insertions(+), 11 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 68c5ca20..280e69f1 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -111,13 +111,13 @@ void xgi_sarea_info(struct xgi_info * info, struct xgi_sarea_info * req) static U32 s_invalid_begin = 0; -BOOL xgi_ge_irq_handler(struct xgi_info * info) +bool xgi_ge_irq_handler(struct xgi_info * info) { volatile U8 *mmio_vbase = info->mmio.vbase; volatile U32 *ge_3d_status = (volatile U32 *)(mmio_vbase + 0x2800); U32 int_status = ge_3d_status[4]; // interrupt status U32 auto_reset_count = 0; - BOOL is_support_auto_reset = FALSE; + bool is_support_auto_reset = FALSE; // Check GE on/off if (0 == (0xffffc0f0 & int_status)) { @@ -128,7 +128,7 @@ BOOL xgi_ge_irq_handler(struct xgi_info * info) ge_3d_status[0x04] = int_status | 0x04000000; if (TRUE == is_support_auto_reset) { - BOOL is_wrong_signal = FALSE; + bool is_wrong_signal = FALSE; static U32 last_int_tick_low, last_int_tick_high; static U32 new_int_tick_low; @@ -287,9 +287,9 @@ BOOL xgi_ge_irq_handler(struct xgi_info * info) return FALSE; } -BOOL xgi_crt_irq_handler(struct xgi_info * info) +bool xgi_crt_irq_handler(struct xgi_info * info) { - BOOL ret = FALSE; + bool ret = FALSE; U8 save_3ce = bReadReg(0x3ce); if (bIn3cf(0x37) & 0x01) // CRT1 interrupt just happened @@ -311,9 +311,9 @@ BOOL xgi_crt_irq_handler(struct xgi_info * info) return (ret); } -BOOL xgi_dvi_irq_handler(struct xgi_info * info) +bool xgi_dvi_irq_handler(struct xgi_info * info) { - BOOL ret = FALSE; + bool ret = FALSE; U8 save_3ce = bReadReg(0x3ce); if (bIn3cf(0x38) & 0x20) // DVI interrupt just happened diff --git a/linux-core/xgi_misc.h b/linux-core/xgi_misc.h index 0ebbe7e8..4b944c4c 100644 --- a/linux-core/xgi_misc.h +++ b/linux-core/xgi_misc.h @@ -39,9 +39,9 @@ extern void xgi_sarea_info(struct xgi_info * info, struct xgi_sarea_info * req); extern int xgi_get_cpu_id(struct cpu_info *arg); extern void xgi_restore_registers(struct xgi_info * info); -extern BOOL xgi_ge_irq_handler(struct xgi_info * info); -extern BOOL xgi_crt_irq_handler(struct xgi_info * info); -extern BOOL xgi_dvi_irq_handler(struct xgi_info * info); +extern bool xgi_ge_irq_handler(struct xgi_info * info); +extern bool xgi_crt_irq_handler(struct xgi_info * info); +extern bool xgi_dvi_irq_handler(struct xgi_info * info); extern void xgi_waitfor_pci_idle(struct xgi_info * info); #endif diff --git a/linux-core/xgi_types.h b/linux-core/xgi_types.h index 89804667..6d941abe 100644 --- a/linux-core/xgi_types.h +++ b/linux-core/xgi_types.h @@ -35,7 +35,6 @@ typedef unsigned char U8; /* 0 to 255 */ typedef unsigned short U16; /* 0 to 65535 */ -typedef unsigned long BOOL; /* * mainly for 64-bit linux, where long is 64 bits * and win9x, where int is 16 bit. -- cgit v1.2.3 From ec7730e5ba6ac1d60f90af483b3966d863cb5400 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 29 Jun 2007 16:37:39 -0700 Subject: Eliminate unnecessary defines of TRUE and FALSE. --- linux-core/xgi_types.h | 8 -------- 1 file changed, 8 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_types.h b/linux-core/xgi_types.h index 6d941abe..724f5f86 100644 --- a/linux-core/xgi_types.h +++ b/linux-core/xgi_types.h @@ -45,12 +45,4 @@ typedef unsigned int U32; /* 0 to 4294967295 */ typedef unsigned long U32; /* 0 to 4294967295 */ #endif -#ifndef TRUE -#define TRUE 1UL -#endif - -#ifndef FALSE -#define FALSE 0UL -#endif - #endif -- cgit v1.2.3 From 406ded3816300f6b3e945c932c44350b22f43bd9 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 29 Jun 2007 16:41:32 -0700 Subject: Replace U(8|16) with u(8|16). --- linux-core/xgi_drv.h | 4 ++-- linux-core/xgi_misc.c | 38 +++++++++++++++++++------------------- linux-core/xgi_types.h | 2 -- 3 files changed, 21 insertions(+), 23 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 32ee5e81..8431eb16 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -96,7 +96,7 @@ struct xgi_aperture { U32 base; // pcie base is different from fb base U32 size; - U8 *vbase; + u8 *vbase; }; struct xgi_screen_info { @@ -120,7 +120,7 @@ struct xgi_info { int slot; int vendor_id; U32 device_id; - U8 revision_id; + u8 revision_id; /* physical characteristics */ struct xgi_aperture mmio; diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 280e69f1..96ad12ee 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -113,7 +113,7 @@ static U32 s_invalid_begin = 0; bool xgi_ge_irq_handler(struct xgi_info * info) { - volatile U8 *mmio_vbase = info->mmio.vbase; + volatile u8 *mmio_vbase = info->mmio.vbase; volatile U32 *ge_3d_status = (volatile U32 *)(mmio_vbase + 0x2800); U32 int_status = ge_3d_status[4]; // interrupt status U32 auto_reset_count = 0; @@ -135,11 +135,11 @@ bool xgi_ge_irq_handler(struct xgi_info * info) static U32 continoue_int_count = 0; // OE II is busy. while (old_ge_status & 0x001c0000) { - U16 check; + u16 check; // Check Read back status *(mmio_vbase + 0x235c) = 0x80; check = - *((volatile U16 *)(mmio_vbase + + *((volatile u16 *)(mmio_vbase + 0x2360)); if ((check & 0x3f) != ((check & 0x3f00) >> 8)) { @@ -149,7 +149,7 @@ bool xgi_ge_irq_handler(struct xgi_info * info) // Check RO channel *(mmio_vbase + 0x235c) = 0x83; check = - *((volatile U16 *)(mmio_vbase + + *((volatile u16 *)(mmio_vbase + 0x2360)); if ((check & 0x0f) != ((check & 0xf0) >> 4)) { @@ -159,7 +159,7 @@ bool xgi_ge_irq_handler(struct xgi_info * info) // Check RW channel *(mmio_vbase + 0x235c) = 0x88; check = - *((volatile U16 *)(mmio_vbase + + *((volatile u16 *)(mmio_vbase + 0x2360)); if ((check & 0x0f) != ((check & 0xf0) >> 4)) { @@ -169,7 +169,7 @@ bool xgi_ge_irq_handler(struct xgi_info * info) // Check RO channel outstanding *(mmio_vbase + 0x235c) = 0x8f; check = - *((volatile U16 *)(mmio_vbase + + *((volatile u16 *)(mmio_vbase + 0x2360)); if (0 != (check & 0x3ff)) { is_wrong_signal = TRUE; @@ -178,7 +178,7 @@ bool xgi_ge_irq_handler(struct xgi_info * info) // Check RW channel outstanding *(mmio_vbase + 0x235c) = 0x90; check = - *((volatile U16 *)(mmio_vbase + + *((volatile u16 *)(mmio_vbase + 0x2360)); if (0 != (check & 0x3ff)) { is_wrong_signal = TRUE; @@ -216,10 +216,10 @@ bool xgi_ge_irq_handler(struct xgi_info * info) ((--time_out) & 0xfff)) ; if (0 == time_out) { - U8 old_3ce; - U8 old_3cf; - U8 old_index; - U8 old_36; + u8 old_3ce; + u8 old_3cf; + u8 old_index; + u8 old_36; XGI_INFO ("Can not reset back 0x%lx!\n", @@ -290,12 +290,12 @@ bool xgi_ge_irq_handler(struct xgi_info * info) bool xgi_crt_irq_handler(struct xgi_info * info) { bool ret = FALSE; - U8 save_3ce = bReadReg(0x3ce); + u8 save_3ce = bReadReg(0x3ce); if (bIn3cf(0x37) & 0x01) // CRT1 interrupt just happened { - U8 op3cf_3d; - U8 op3cf_37; + u8 op3cf_3d; + u8 op3cf_37; // What happened? op3cf_37 = bIn3cf(0x37); @@ -314,14 +314,14 @@ bool xgi_crt_irq_handler(struct xgi_info * info) bool xgi_dvi_irq_handler(struct xgi_info * info) { bool ret = FALSE; - U8 save_3ce = bReadReg(0x3ce); + u8 save_3ce = bReadReg(0x3ce); if (bIn3cf(0x38) & 0x20) // DVI interrupt just happened { - U8 op3cf_39; - U8 op3cf_37; - U8 op3x5_5a; - U8 save_3x4 = bReadReg(0x3d4);; + u8 op3cf_39; + u8 op3cf_37; + u8 op3x5_5a; + u8 save_3x4 = bReadReg(0x3d4);; // What happened? op3cf_37 = bIn3cf(0x37); diff --git a/linux-core/xgi_types.h b/linux-core/xgi_types.h index 724f5f86..f9a3360c 100644 --- a/linux-core/xgi_types.h +++ b/linux-core/xgi_types.h @@ -33,8 +33,6 @@ * Typedefs * ***************************************************************************/ -typedef unsigned char U8; /* 0 to 255 */ -typedef unsigned short U16; /* 0 to 65535 */ /* * mainly for 64-bit linux, where long is 64 bits * and win9x, where int is 16 bit. -- cgit v1.2.3 From 37733786582d04f072178949cc9e31225abf5577 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 29 Jun 2007 20:49:21 -0700 Subject: Delete unused arrays s_emptyBegin and s_flush2D. --- linux-core/xgi_cmdlist.c | 14 -------------- 1 file changed, 14 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 2cdf714f..b67a40f6 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -33,20 +33,6 @@ #include "xgi_misc.h" #include "xgi_cmdlist.h" -U32 s_emptyBegin[AGPCMDLIST_BEGIN_SIZE] = { - 0x10000000, // 3D Type Begin, Invalid - 0x80000004, // Length = 4; - 0x00000000, - 0x00000000 -}; - -U32 s_flush2D[AGPCMDLIST_FLUSH_CMD_LEN] = { - FLUSH_2D, - FLUSH_2D, - FLUSH_2D, - FLUSH_2D -}; - struct xgi_cmdring_info s_cmdring; static void addFlush2D(struct xgi_info * info); -- cgit v1.2.3 From e206c4c59da0e81ed65796d543c311fc7e30b19a Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 29 Jun 2007 21:00:50 -0700 Subject: Convert some PCI-e GART related variable to generic types. A few of the PCI-e GART related fields in struct xgi_info were hardcoded to u32. None of them need to be. Convert them to either unsigned int or bool. --- linux-core/xgi_drv.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 8431eb16..3cb6dc7f 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -131,10 +131,10 @@ struct xgi_info { /* look up table parameters */ U32 *lut_base; - U32 lutPageSize; - U32 lutPageOrder; - U32 isLUTInLFB; - U32 sdfbPageSize; + unsigned int lutPageSize; + unsigned int lutPageOrder; + bool isLUTInLFB; + unsigned int sdfbPageSize; U32 pcie_config; U32 pcie_status; -- cgit v1.2.3 From 4c4780bc8e5bf01b2b920c6b8de4ddbd0256c81f Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 29 Jun 2007 21:05:16 -0700 Subject: Stop-gap fix in xgi_submit_cmdlist Comment in the code explains it. Basically, I put an if-statement around a block of code to prevent a NULL pointer dereference that should never happen in the first place. Eventually, this will need to come out. --- linux-core/xgi_cmdlist.c | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index b67a40f6..f8aacea2 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -198,17 +198,24 @@ void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo) (U32 *) xgi_find_pcie_virt(info, s_cmdring._lastBatchStartAddr); - lastBatchVirtAddr[1] = - BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize; - lastBatchVirtAddr[2] = pCmdInfo->_firstBeginAddr >> 4; - lastBatchVirtAddr[3] = 0; - //barrier(); - lastBatchVirtAddr[0] = - (beginPort << 22) + (BEGIN_VALID_MASK) + - (0xffff & pCmdInfo->_curDebugID); - - /* Jong 06/12/2006; system hang; marked for test */ - triggerHWCommandList(info, pCmdInfo->_beginCount); + /* lastBatchVirtAddr should *never* be NULL. However, there + * are currently some bugs that cause this to happen. The + * if-statement here prevents some fatal (i.e., hard lock + * requiring the reset button) oopses. + */ + if (lastBatchVirtAddr) { + lastBatchVirtAddr[1] = + BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize; + lastBatchVirtAddr[2] = pCmdInfo->_firstBeginAddr >> 4; + lastBatchVirtAddr[3] = 0; + //barrier(); + lastBatchVirtAddr[0] = + (beginPort << 22) + (BEGIN_VALID_MASK) + + (0xffff & pCmdInfo->_curDebugID); + + /* Jong 06/12/2006; system hang; marked for test */ + triggerHWCommandList(info, pCmdInfo->_beginCount); + } XGI_INFO ("Jong-xgi_submit_cmdlist-s_cmdring._lastBatchStartAddr != 0 - End\n"); -- cgit v1.2.3 From 4403540776c8ed3c2e28f26b6dacaab0b9e40e05 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 29 Jun 2007 21:15:33 -0700 Subject: Clean up xgi_pcie_heap_check The whole purpose of xgi_pcie_heap_check is to log information about entries on the used_list. If XGI_DEBUG is not set, it doesn't print anything. Therefore we can #ifdef the whole function body. Convert open-code list iteration to use list_for_each_entry. --- linux-core/xgi_pcie.c | 42 +++++++++++++++++++----------------------- 1 file changed, 19 insertions(+), 23 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index a81dbe8b..dd758013 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -347,35 +347,31 @@ int xgi_pcie_heap_init(struct xgi_info * info) void xgi_pcie_heap_check(void) { - struct list_head *useList, *temp; +#ifdef XGI_DEBUG struct xgi_pcie_block *block; unsigned int ownerIndex; -#ifdef XGI_DEBUG - char *ownerStr[6] = + static const char *const ownerStr[6] = { "2D", "3D", "3D_CMD", "3D_SCR", "3D_TEX", "ELSE" }; -#endif - if (xgi_pcie_heap) { - useList = &xgi_pcie_heap->used_list; - temp = useList->next; - XGI_INFO("pcie freemax = 0x%lx\n", xgi_pcie_heap->max_freesize); - while (temp != useList) { - block = list_entry(temp, struct xgi_pcie_block, list); - if (block->owner == PCIE_2D) - ownerIndex = 0; - else if (block->owner > PCIE_3D_TEXTURE - || block->owner < PCIE_2D - || block->owner < PCIE_3D) - ownerIndex = 5; - else - ownerIndex = block->owner - PCIE_3D + 1; - XGI_INFO - ("Allocated by %s, block->offset: 0x%lx block->size: 0x%lx \n", - ownerStr[ownerIndex], block->offset, block->size); - temp = temp->next; - } + if (!xgi_pcie_heap) { + return; + } + + XGI_INFO("pcie freemax = 0x%lx\n", xgi_pcie_heap->max_freesize); + list_for_each_entry(block, &xgi_pcie_heap->used_list, list) { + if (block->owner == PCIE_2D) + ownerIndex = 0; + else if (block->owner > PCIE_3D_TEXTURE + || block->owner < PCIE_2D + || block->owner < PCIE_3D) + ownerIndex = 5; + else + ownerIndex = block->owner - PCIE_3D + 1; + XGI_INFO("Allocated by %s, block offset: 0x%lx, size: 0x%lx \n", + ownerStr[ownerIndex], block->offset, block->size); } +#endif } void xgi_pcie_heap_cleanup(struct xgi_info * info) -- cgit v1.2.3 From 32584d94e6ef7c0b463794a40541eb8183c7fb02 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 29 Jun 2007 21:35:27 -0700 Subject: Convert open coded list iterators to either list_for_each_entry or list_for_each_entry_safe --- linux-core/xgi_fb.c | 43 +++++++---------------------- linux-core/xgi_misc.c | 39 ++++++++++++--------------- linux-core/xgi_pcie.c | 75 +++++++++++++-------------------------------------- 3 files changed, 45 insertions(+), 112 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index 56cc589b..32fde5ab 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -96,7 +96,6 @@ void xgi_fb_free(struct xgi_info * info, unsigned long bus_addr) unsigned long offset = bus_addr - info->fb.base; struct xgi_mem_pid *mempid_block; struct xgi_mem_pid *mempid_freeblock = NULL; - struct list_head *mempid_list; if (offset < 0) { XGI_INFO("free onscreen frame buffer successfully !\n"); @@ -111,16 +110,12 @@ void xgi_fb_free(struct xgi_info * info, unsigned long bus_addr) } /* manage mempid */ - mempid_list = xgi_mempid_list.next; - while (mempid_list != &xgi_mempid_list) { - mempid_block = - list_entry(mempid_list, struct xgi_mem_pid, list); + list_for_each_entry(mempid_block, &xgi_mempid_list, list) { if (mempid_block->location == LOCAL && mempid_block->bus_addr == bus_addr) { mempid_freeblock = mempid_block; break; } - mempid_list = mempid_list->next; } if (mempid_freeblock) { list_del(&mempid_freeblock->list); @@ -192,20 +187,15 @@ int xgi_fb_heap_init(struct xgi_info * info) void xgi_fb_heap_cleanup(struct xgi_info * info) { - struct list_head *free_list, *temp; + struct list_head *free_list; struct xgi_mem_block *block; + struct xgi_mem_block *next; int i; if (xgi_fb_heap) { free_list = &xgi_fb_heap->free_list; for (i = 0; i < 3; i++, free_list++) { - temp = free_list->next; - while (temp != free_list) { - block = - list_entry(temp, struct xgi_mem_block, - list); - temp = temp->next; - + list_for_each_entry_safe(block, next, free_list, list) { XGI_INFO ("No. %d block->offset: 0x%lx block->size: 0x%lx \n", i, block->offset, block->size); @@ -334,7 +324,6 @@ static void xgi_mem_delete_node(struct xgi_mem_list * list, struct xgi_mem_block static struct xgi_mem_block *xgi_mem_alloc(struct xgi_info * info, unsigned long originalSize) { - struct list_head *free_list; struct xgi_mem_block *block, *free_block, *used_block; unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK; @@ -354,18 +343,14 @@ static struct xgi_mem_block *xgi_mem_alloc(struct xgi_info * info, return (NULL); } - free_list = xgi_fb_heap->free_list.next; - - while (free_list != &xgi_fb_heap->free_list) { + list_for_each_entry(block, &xgi_fb_heap->free_list, list) { XGI_INFO("free_list: 0x%px \n", free_list); - block = list_entry(free_list, struct xgi_mem_block, list); if (size <= block->size) { break; } - free_list = free_list->next; } - if (free_list == &xgi_fb_heap->free_list) { + if (&block->list == &xgi_fb_heap->free_list) { XGI_ERROR ("Can't allocate %ldk size from frame buffer memory !\n", size / 1024); @@ -408,23 +393,19 @@ static struct xgi_mem_block *xgi_mem_alloc(struct xgi_info * info, static struct xgi_mem_block *xgi_mem_free(struct xgi_info * info, unsigned long offset) { - struct list_head *free_list, *used_list; - struct xgi_mem_block *used_block = NULL, *block = NULL; + struct xgi_mem_block *used_block = NULL, *block; struct xgi_mem_block *prev, *next; unsigned long upper; unsigned long lower; - used_list = xgi_fb_heap->used_list.next; - while (used_list != &xgi_fb_heap->used_list) { - block = list_entry(used_list, struct xgi_mem_block, list); + list_for_each_entry(block, &xgi_fb_heap->used_list, list) { if (block->offset == offset) { break; } - used_list = used_list->next; } - if (used_list == &xgi_fb_heap->used_list) { + if (&block->list == &xgi_fb_heap->used_list) { XGI_ERROR("can't find block: 0x%lx to free!\n", offset); return (NULL); } @@ -439,16 +420,12 @@ static struct xgi_mem_block *xgi_mem_free(struct xgi_info * info, unsigned long upper = used_block->offset + used_block->size; lower = used_block->offset; - free_list = xgi_fb_heap->free_list.next; - while (free_list != &xgi_fb_heap->free_list) { - block = list_entry(free_list, struct xgi_mem_block, list); - + list_for_each_entry(block, &xgi_fb_heap->free_list, list) { if (block->offset == upper) { next = block; } else if ((block->offset + block->size) == lower) { prev = block; } - free_list = free_list->next; } XGI_INFO("next = 0x%p, prev = 0x%p\n", next, prev); diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 96ad12ee..eecd717b 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -556,50 +556,45 @@ int xgi_get_cpu_id(struct cpu_info *arg) extern struct list_head xgi_mempid_list; void xgi_mem_collect(struct xgi_info * info, unsigned int *pcnt) { - struct xgi_mem_pid *mempid_block; - struct list_head *mempid_list; + struct xgi_mem_pid *block; + struct xgi_mem_pid *next; struct task_struct *p, *find; unsigned int cnt = 0; - mempid_list = xgi_mempid_list.next; - - while (mempid_list != &xgi_mempid_list) { - mempid_block = - list_entry(mempid_list, struct xgi_mem_pid, list); - mempid_list = mempid_list->next; + list_for_each_entry_safe(block, next, &xgi_mempid_list, list) { find = NULL; XGI_SCAN_PROCESS(p) { - if (p->pid == mempid_block->pid) { + if (p->pid == block->pid) { XGI_INFO ("[!]Find active pid:%ld state:%ld location:%d addr:0x%lx! \n", - mempid_block->pid, p->state, - mempid_block->location, - mempid_block->bus_addr); + block->pid, p->state, + block->location, + block->bus_addr); find = p; - if (mempid_block->bus_addr == 0xFFFFFFFF) + if (block->bus_addr == 0xFFFFFFFF) ++cnt; break; } } if (!find) { - if (mempid_block->location == LOCAL) { + if (block->location == LOCAL) { XGI_INFO ("Memory ProcessID free fb and delete one block pid:%ld addr:0x%lx successfully! \n", - mempid_block->pid, mempid_block->bus_addr); - xgi_fb_free(info, mempid_block->bus_addr); - } else if (mempid_block->bus_addr != 0xFFFFFFFF) { + block->pid, block->bus_addr); + xgi_fb_free(info, block->bus_addr); + } else if (block->bus_addr != 0xFFFFFFFF) { XGI_INFO ("Memory ProcessID free pcie and delete one block pid:%ld addr:0x%lx successfully! \n", - mempid_block->pid, mempid_block->bus_addr); - xgi_pcie_free(info, mempid_block->bus_addr); + block->pid, block->bus_addr); + xgi_pcie_free(info, block->bus_addr); } else { /*only delete the memory block */ - list_del(&mempid_block->list); + list_del(&block->list); XGI_INFO ("Memory ProcessID delete one pcie block pid:%ld successfully! \n", - mempid_block->pid); - kfree(mempid_block); + block->pid); + kfree(block); } } } diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index dd758013..e451ebd5 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -376,8 +376,9 @@ void xgi_pcie_heap_check(void) void xgi_pcie_heap_cleanup(struct xgi_info * info) { - struct list_head *free_list, *temp; + struct list_head *free_list; struct xgi_pcie_block *block; + struct xgi_pcie_block *next; int j; xgi_pcie_lut_cleanup(info); @@ -386,23 +387,16 @@ void xgi_pcie_heap_cleanup(struct xgi_info * info) if (xgi_pcie_heap) { free_list = &xgi_pcie_heap->free_list; for (j = 0; j < 3; j++, free_list++) { - temp = free_list->next; - - while (temp != free_list) { - block = - list_entry(temp, struct xgi_pcie_block, - list); + list_for_each_entry_safe(block, next, free_list, list) { XGI_INFO - ("No. %d block->offset: 0x%lx block->size: 0x%lx \n", + ("No. %d block offset: 0x%lx size: 0x%lx\n", j, block->offset, block->size); xgi_pcie_block_stuff_free(block); block->bus_addr = 0; block->hw_addr = 0; - temp = temp->next; //XGI_INFO("No. %d free block: 0x%p \n", j, block); kmem_cache_free(xgi_pcie_cache_block, block); - block = NULL; } } @@ -421,7 +415,6 @@ static struct xgi_pcie_block *xgi_pcie_mem_alloc(struct xgi_info * info, unsigned long originalSize, enum PcieOwner owner) { - struct list_head *free_list; struct xgi_pcie_block *block, *used_block, *free_block; struct xgi_page_block *page_block, *prev_page_block; struct page *page; @@ -475,17 +468,13 @@ static struct xgi_pcie_block *xgi_pcie_mem_alloc(struct xgi_info * info, } /* Jong 05/30/2006; find next free list which has enough space */ - free_list = xgi_pcie_heap->free_list.next; - while (free_list != &xgi_pcie_heap->free_list) { - //XGI_INFO("free_list: 0x%px \n", free_list); - block = list_entry(free_list, struct xgi_pcie_block, list); + list_for_each_entry(block, &xgi_pcie_heap->free_list, list) { if (size <= block->size) { break; } - free_list = free_list->next; } - if (free_list == &xgi_pcie_heap->free_list) { + if (&block->list == &xgi_pcie_heap->free_list) { XGI_ERROR("Can't allocate %ldk size from PCIE memory !\n", size / 1024); return (NULL); @@ -696,21 +685,17 @@ static struct xgi_pcie_block *xgi_pcie_mem_alloc(struct xgi_info * info, static struct xgi_pcie_block *xgi_pcie_mem_free(struct xgi_info * info, unsigned long offset) { - struct list_head *free_list, *used_list; - struct xgi_pcie_block *used_block, *block = NULL; + struct xgi_pcie_block *used_block, *block; struct xgi_pcie_block *prev, *next; unsigned long upper, lower; - used_list = xgi_pcie_heap->used_list.next; - while (used_list != &xgi_pcie_heap->used_list) { - block = list_entry(used_list, struct xgi_pcie_block, list); + list_for_each_entry(block, &xgi_pcie_heap->used_list, list) { if (block->offset == offset) { break; } - used_list = used_list->next; } - if (used_list == &xgi_pcie_heap->used_list) { + if (&block->list == &xgi_pcie_heap->used_list) { XGI_ERROR("can't find block: 0x%lx to free!\n", offset); return (NULL); } @@ -730,16 +715,12 @@ static struct xgi_pcie_block *xgi_pcie_mem_free(struct xgi_info * info, upper = used_block->offset + used_block->size; lower = used_block->offset; - free_list = xgi_pcie_heap->free_list.next; - - while (free_list != &xgi_pcie_heap->free_list) { - block = list_entry(free_list, struct xgi_pcie_block, list); + list_for_each_entry(block, &xgi_pcie_heap->free_list, list) { if (block->offset == upper) { next = block; } else if ((block->offset + block->size) == lower) { prev = block; } - free_list = free_list->next; } XGI_INFO("next = 0x%p, prev = 0x%p\n", next, prev); @@ -839,7 +820,6 @@ void xgi_pcie_free(struct xgi_info * info, unsigned long bus_addr) unsigned long offset = bus_addr - info->pcie.base; struct xgi_mem_pid *mempid_block; struct xgi_mem_pid *mempid_freeblock = NULL; - struct list_head *mempid_list; char isvertex = 0; int processcnt; @@ -850,15 +830,12 @@ void xgi_pcie_free(struct xgi_info * info, unsigned long bus_addr) if (isvertex) { /*check is there any other process using vertex */ processcnt = 0; - mempid_list = xgi_mempid_list.next; - while (mempid_list != &xgi_mempid_list) { - mempid_block = - list_entry(mempid_list, struct xgi_mem_pid, list); + + list_for_each_entry(mempid_block, &xgi_mempid_list, list) { if (mempid_block->location == NON_LOCAL && mempid_block->bus_addr == 0xFFFFFFFF) { ++processcnt; } - mempid_list = mempid_list->next; } if (processcnt > 1) { return; @@ -877,17 +854,13 @@ void xgi_pcie_free(struct xgi_info * info, unsigned long bus_addr) xgi_pcie_vertex_block = NULL; /* manage mempid */ - mempid_list = xgi_mempid_list.next; - while (mempid_list != &xgi_mempid_list) { - mempid_block = - list_entry(mempid_list, struct xgi_mem_pid, list); + list_for_each_entry(mempid_block, &xgi_mempid_list, list) { if (mempid_block->location == NON_LOCAL && ((isvertex && mempid_block->bus_addr == 0xFFFFFFFF) || (!isvertex && mempid_block->bus_addr == bus_addr))) { mempid_freeblock = mempid_block; break; } - mempid_list = mempid_list->next; } if (mempid_freeblock) { list_del(&mempid_freeblock->list); @@ -905,15 +878,11 @@ void xgi_pcie_free(struct xgi_info * info, unsigned long bus_addr) struct xgi_pcie_block *xgi_find_pcie_block(struct xgi_info * info, unsigned long address) { - struct list_head *used_list; struct xgi_pcie_block *block; int i; - used_list = xgi_pcie_heap->used_list.next; - - while (used_list != &xgi_pcie_heap->used_list) { - block = list_entry(used_list, struct xgi_pcie_block, list); + list_for_each_entry(block, &xgi_pcie_heap->used_list, list) { if (block->bus_addr == address) { return block; } @@ -927,7 +896,6 @@ struct xgi_pcie_block *xgi_find_pcie_block(struct xgi_info * info, } } } - used_list = used_list->next; } XGI_ERROR("could not find map for vm 0x%lx\n", address); @@ -944,17 +912,13 @@ struct xgi_pcie_block *xgi_find_pcie_block(struct xgi_info * info, */ void *xgi_find_pcie_virt(struct xgi_info * info, unsigned long address) { - struct list_head *used_list = xgi_pcie_heap->used_list.next; + struct xgi_pcie_block *block; const unsigned long offset_in_page = address & (PAGE_SIZE - 1); - XGI_INFO("begin (used_list = 0x%p, address = 0x%lx, " - "PAGE_SIZE - 1 = %lu, offset_in_page = %lu)\n", - used_list, address, PAGE_SIZE - 1, offset_in_page); - - while (used_list != &xgi_pcie_heap->used_list) { - struct xgi_pcie_block *block = - list_entry(used_list, struct xgi_pcie_block, list); + XGI_INFO("begin (address = 0x%lx, offset_in_page = %lu)\n", + address, offset_in_page); + list_for_each_entry(block, &xgi_pcie_heap->used_list, list) { XGI_INFO("block = 0x%p (hw_addr = 0x%lx, size=%lu)\n", block, block->hw_addr, block->size); @@ -973,9 +937,6 @@ void *xgi_find_pcie_virt(struct xgi_info * info, unsigned long address) XGI_INFO("return 0x%p\n", ret); return ret; - } else { - XGI_INFO("used_list = used_list->next;\n"); - used_list = used_list->next; } } -- cgit v1.2.3 From 49ccec1b0845ea14ab2cfd2f53704fe26e38fbef Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 29 Jun 2007 21:38:48 -0700 Subject: Convert xgi_mem_location enum values to less generic names. --- linux-core/xgi_drv.h | 6 +++--- linux-core/xgi_fb.c | 10 +++++----- linux-core/xgi_misc.c | 2 +- linux-core/xgi_pcie.c | 10 +++++----- 4 files changed, 14 insertions(+), 14 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 3cb6dc7f..360e7120 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -158,9 +158,9 @@ struct xgi_ioctl_post_vbios { }; enum xgi_mem_location { - NON_LOCAL = 0, - LOCAL = 1, - INVALID = 0x7fffffff + XGI_MEMLOC_NON_LOCAL = 0, + XGI_MEMLOC_LOCAL = 1, + XGI_MEMLOC_INVALID = 0x7fffffff }; enum PcieOwner { diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index 32fde5ab..d7e9285d 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -48,7 +48,7 @@ void xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_pid *mempid_block; if (req->is_front) { - alloc->location = LOCAL; + alloc->location = XGI_MEMLOC_LOCAL; alloc->bus_addr = info->fb.base; alloc->hw_addr = 0; XGI_INFO @@ -59,7 +59,7 @@ void xgi_fb_alloc(struct xgi_info * info, xgi_up(info->fb_sem); if (block == NULL) { - alloc->location = LOCAL; + alloc->location = XGI_MEMLOC_LOCAL; alloc->size = 0; alloc->bus_addr = 0; alloc->hw_addr = 0; @@ -67,7 +67,7 @@ void xgi_fb_alloc(struct xgi_info * info, } else { XGI_INFO("Video RAM allocation succeeded: 0x%p\n", (char *)block->offset); - alloc->location = LOCAL; + alloc->location = XGI_MEMLOC_LOCAL; alloc->size = block->size; alloc->bus_addr = info->fb.base + block->offset; alloc->hw_addr = block->offset; @@ -75,7 +75,7 @@ void xgi_fb_alloc(struct xgi_info * info, /* manage mempid */ mempid_block = kmalloc(sizeof(struct xgi_mem_pid), GFP_KERNEL); - mempid_block->location = LOCAL; + mempid_block->location = XGI_MEMLOC_LOCAL; mempid_block->bus_addr = alloc->bus_addr; mempid_block->pid = alloc->pid; @@ -111,7 +111,7 @@ void xgi_fb_free(struct xgi_info * info, unsigned long bus_addr) /* manage mempid */ list_for_each_entry(mempid_block, &xgi_mempid_list, list) { - if (mempid_block->location == LOCAL + if (mempid_block->location == XGI_MEMLOC_LOCAL && mempid_block->bus_addr == bus_addr) { mempid_freeblock = mempid_block; break; diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index eecd717b..b7923228 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -578,7 +578,7 @@ void xgi_mem_collect(struct xgi_info * info, unsigned int *pcnt) } } if (!find) { - if (block->location == LOCAL) { + if (block->location == XGI_MEMLOC_LOCAL) { XGI_INFO ("Memory ProcessID free fb and delete one block pid:%ld addr:0x%lx successfully! \n", block->pid, block->bus_addr); diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index e451ebd5..82111249 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -775,7 +775,7 @@ void xgi_pcie_alloc(struct xgi_info * info, unsigned long size, xgi_up(info->pcie_sem); if (block == NULL) { - alloc->location = INVALID; + alloc->location = XGI_MEMLOC_INVALID; alloc->size = 0; alloc->bus_addr = 0; alloc->hw_addr = 0; @@ -784,7 +784,7 @@ void xgi_pcie_alloc(struct xgi_info * info, unsigned long size, XGI_INFO ("PCIE RAM allocation succeeded: offset = 0x%lx, bus_addr = 0x%lx\n", block->offset, block->bus_addr); - alloc->location = NON_LOCAL; + alloc->location = XGI_MEMLOC_NON_LOCAL; alloc->size = block->size; alloc->bus_addr = block->bus_addr; alloc->hw_addr = block->hw_addr; @@ -799,7 +799,7 @@ void xgi_pcie_alloc(struct xgi_info * info, unsigned long size, kmalloc(sizeof(struct xgi_mem_pid), GFP_KERNEL); if (!mempid_block) XGI_ERROR("mempid_block alloc failed\n"); - mempid_block->location = NON_LOCAL; + mempid_block->location = XGI_MEMLOC_NON_LOCAL; if (owner == PCIE_3D) mempid_block->bus_addr = 0xFFFFFFFF; /*xgi_pcie_vertex_block has the address */ else @@ -832,7 +832,7 @@ void xgi_pcie_free(struct xgi_info * info, unsigned long bus_addr) processcnt = 0; list_for_each_entry(mempid_block, &xgi_mempid_list, list) { - if (mempid_block->location == NON_LOCAL + if (mempid_block->location == XGI_MEMLOC_NON_LOCAL && mempid_block->bus_addr == 0xFFFFFFFF) { ++processcnt; } @@ -855,7 +855,7 @@ void xgi_pcie_free(struct xgi_info * info, unsigned long bus_addr) /* manage mempid */ list_for_each_entry(mempid_block, &xgi_mempid_list, list) { - if (mempid_block->location == NON_LOCAL + if (mempid_block->location == XGI_MEMLOC_NON_LOCAL && ((isvertex && mempid_block->bus_addr == 0xFFFFFFFF) || (!isvertex && mempid_block->bus_addr == bus_addr))) { mempid_freeblock = mempid_block; -- cgit v1.2.3 From fc37781dd30b53815dd71ce576eb2147d23f0914 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 29 Jun 2007 21:48:31 -0700 Subject: Convert a few more U32 variables to more appropriate, generic types. --- linux-core/xgi_cmdlist.c | 14 +++++++------- linux-core/xgi_cmdlist.h | 2 +- linux-core/xgi_linux.h | 2 +- linux-core/xgi_misc.c | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index f8aacea2..04ee6e82 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -36,11 +36,11 @@ struct xgi_cmdring_info s_cmdring; static void addFlush2D(struct xgi_info * info); -static U32 getCurBatchBeginPort(struct xgi_cmd_info * pCmdInfo); +static unsigned int getCurBatchBeginPort(struct xgi_cmd_info * pCmdInfo); static void triggerHWCommandList(struct xgi_info * info, U32 triggerCounter); static void xgi_cmdlist_reset(void); -int xgi_cmdlist_initialize(struct xgi_info * info, U32 size) +int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) { //struct xgi_mem_req mem_req; struct xgi_mem_alloc mem_alloc; @@ -64,7 +64,7 @@ int xgi_cmdlist_initialize(struct xgi_info * info, U32 size) void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo) { - U32 beginPort; + unsigned int beginPort; /** XGI_INFO("Jong-xgi_submit_cmdlist-Begin \n"); **/ /* Jong 05/25/2006 */ @@ -77,7 +77,7 @@ void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo) /* return; */ if (s_cmdring._lastBatchStartAddr == 0) { - U32 portOffset; + unsigned int portOffset; /* Jong 06/13/2006; remove marked for system hang test */ /* xgi_waitfor_pci_idle(info); */ @@ -278,17 +278,17 @@ void xgi_cmdlist_cleanup(struct xgi_info * info) static void triggerHWCommandList(struct xgi_info * info, U32 triggerCounter) { - static U32 s_triggerID = 1; + static unsigned int s_triggerID = 1; //Fix me, currently we just trigger one time while (triggerCounter--) { dwWriteReg(BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS, - 0x05000000 + (0xffff & s_triggerID++)); + 0x05000000 + (0x0ffff & s_triggerID++)); // xgi_waitfor_pci_idle(info); } } -static U32 getCurBatchBeginPort(struct xgi_cmd_info * pCmdInfo) +static unsigned int getCurBatchBeginPort(struct xgi_cmd_info * pCmdInfo) { // Convert the batch type to begin port ID switch (pCmdInfo->_firstBeginType) { diff --git a/linux-core/xgi_cmdlist.h b/linux-core/xgi_cmdlist.h index b11511ff..c6221511 100644 --- a/linux-core/xgi_cmdlist.h +++ b/linux-core/xgi_cmdlist.h @@ -65,7 +65,7 @@ struct xgi_cmdring_info { U32 _cmdRingOffset; }; -extern int xgi_cmdlist_initialize(struct xgi_info * info, U32 size); +extern int xgi_cmdlist_initialize(struct xgi_info * info, size_t size); extern void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo); diff --git a/linux-core/xgi_linux.h b/linux-core/xgi_linux.h index 2602b0f5..99bf2d04 100644 --- a/linux-core/xgi_linux.h +++ b/linux-core/xgi_linux.h @@ -455,7 +455,7 @@ static inline void XGI_SET_PAGE_ATTRIB_CACHED(struct xgi_pte * page_ptr) struct xgi_file_private { struct xgi_info *info; - U32 num_events; + unsigned int num_events; spinlock_t fp_lock; wait_queue_head_t wait_queue; }; diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index b7923228..9bf8205b 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -109,7 +109,7 @@ void xgi_sarea_info(struct xgi_info * info, struct xgi_sarea_info * req) */ #define STALL_INTERRUPT_RESET_THRESHOLD 0xffff -static U32 s_invalid_begin = 0; +static unsigned int s_invalid_begin = 0; bool xgi_ge_irq_handler(struct xgi_info * info) { -- cgit v1.2.3 From b323ab52aa9ccbfb06dd723ece361a5242d067b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kristian=20H=C3=B8gsberg?= Date: Thu, 28 Jun 2007 14:45:26 -0400 Subject: Drop drm_drawable_list and add drm_drawable_info directly to the idr. --- linux-core/drmP.h | 4 ---- linux-core/drm_drawable.c | 41 +++++++++++++++++++++-------------------- 2 files changed, 21 insertions(+), 24 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drmP.h b/linux-core/drmP.h index dd3a69df..7bcd095a 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -598,10 +598,6 @@ typedef struct ati_pcigart_info { int table_size; } drm_ati_pcigart_info; -struct drm_drawable_list { - drm_drawable_info_t info; -}; - #include "drm_objects.h" /** diff --git a/linux-core/drm_drawable.c b/linux-core/drm_drawable.c index eb44a189..74f0bb5d 100644 --- a/linux-core/drm_drawable.c +++ b/linux-core/drm_drawable.c @@ -44,19 +44,19 @@ int drm_adddraw(DRM_IOCTL_ARGS) { DRM_DEVICE; unsigned long irqflags; - struct drm_drawable_list *draw_info; + struct drm_drawable_info *draw_info; drm_draw_t draw; int new_id = 0; int ret; - draw_info = drm_calloc(1, sizeof(struct drm_drawable_list), DRM_MEM_BUFS); + draw_info = drm_calloc(1, sizeof(*draw_info), DRM_MEM_BUFS); if (!draw_info) return -ENOMEM; again: if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) { DRM_ERROR("Out of memory expanding drawable idr\n"); - drm_free(draw_info, sizeof(struct drm_drawable_list), DRM_MEM_BUFS); + drm_free(draw_info, sizeof(struct drm_drawable_info), DRM_MEM_BUFS); return -ENOMEM; } @@ -86,7 +86,7 @@ int drm_rmdraw(DRM_IOCTL_ARGS) DRM_DEVICE; drm_draw_t draw; unsigned long irqflags; - struct drm_drawable_list *draw_info; + struct drm_drawable_info *draw_info; DRM_COPY_FROM_USER_IOCTL(draw, (drm_draw_t __user *) data, sizeof(draw)); @@ -100,33 +100,31 @@ int drm_rmdraw(DRM_IOCTL_ARGS) spin_lock_irqsave(&dev->drw_lock, irqflags); idr_remove(&dev->drw_idr, draw.handle); - drm_free(draw_info, sizeof(struct drm_drawable_list), DRM_MEM_BUFS); + drm_free(draw_info, sizeof(struct drm_drawable_info), DRM_MEM_BUFS); spin_unlock_irqrestore(&dev->drw_lock, irqflags); DRM_DEBUG("%d\n", draw.handle); return 0; } -int drm_update_drawable_info(DRM_IOCTL_ARGS) { +int drm_update_drawable_info(DRM_IOCTL_ARGS) +{ DRM_DEVICE; drm_update_draw_t update; unsigned long irqflags; - drm_drawable_info_t *info; drm_clip_rect_t *rects; - struct drm_drawable_list *draw_info; + struct drm_drawable_info *info; int err; DRM_COPY_FROM_USER_IOCTL(update, (drm_update_draw_t __user *) data, sizeof(update)); - draw_info = idr_find(&dev->drw_idr, update.handle); - if (!draw_info) { + info = idr_find(&dev->drw_idr, update.handle); + if (!info) { DRM_ERROR("No such drawable %d\n", update.handle); return DRM_ERR(EINVAL); } - info = &draw_info->info; - switch (update.type) { case DRM_DRAWABLE_CLIPRECTS: if (update.num != info->num_rects) { @@ -184,24 +182,27 @@ error: /** * Caller must hold the drawable spinlock! */ -drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id) { - struct drm_drawable_list *draw_info; - draw_info = idr_find(&dev->drw_idr, id); - if (!draw_info) { +drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id) +{ + struct drm_drawable_info *info; + + info = idr_find(&dev->drw_idr, id); + if (!info) { DRM_DEBUG("No such drawable %d\n", id); return NULL; } - return &draw_info->info; + return info; } EXPORT_SYMBOL(drm_get_drawable_info); static int drm_drawable_free(int idr, void *p, void *data) { - struct drm_drawable_list *drw_entry = p; - drm_free(drw_entry->info.rects, drw_entry->info.num_rects * + struct drm_drawable_info *info = p; + + drm_free(info->rects, info->num_rects * sizeof(drm_clip_rect_t), DRM_MEM_BUFS); - drm_free(drw_entry, sizeof(struct drm_drawable_list), DRM_MEM_BUFS); + drm_free(info, sizeof(struct drm_drawable_info), DRM_MEM_BUFS); return 0; } -- cgit v1.2.3 From c9d752ff4fb2b6eee2fef636193fc9ca29abba37 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kristian=20H=C3=B8gsberg?= Date: Mon, 2 Jul 2007 17:52:07 -0400 Subject: Fix must-check warnings and implement a few error paths. --- linux-core/drm_drv.c | 2 +- linux-core/drm_stub.c | 26 +++++++++++++++++--------- linux-core/drm_sysfs.c | 31 +++++++++++++++++++++++++------ 3 files changed, 43 insertions(+), 16 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index d5eb9713..6bbe7fca 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -311,7 +311,7 @@ int drm_init(struct drm_driver *driver, } if (!drm_fb_loaded) - pci_register_driver(&driver->pci_driver); + return pci_register_driver(&driver->pci_driver); else { for (i = 0; pciidlist[i].vendor != 0; i++) { pid = &pciidlist[i]; diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c index f57ed9cc..b96408ab 100644 --- a/linux-core/drm_stub.c +++ b/linux-core/drm_stub.c @@ -232,18 +232,22 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, if (!drm_fb_loaded) { pci_set_drvdata(pdev, dev); - pci_request_regions(pdev, driver->pci_driver.name); + ret = pci_request_regions(pdev, driver->pci_driver.name); + if (ret) + goto err_g1; } - pci_enable_device(pdev); + ret = pci_enable_device(pdev); + if (ret) + goto err_g2; pci_set_master(pdev); if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) { printk(KERN_ERR "DRM: fill_in_dev failed\n"); - goto err_g1; + goto err_g3; } if ((ret = drm_get_head(dev, &dev->primary))) - goto err_g1; + goto err_g3; DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name, driver->major, driver->minor, driver->patchlevel, @@ -251,12 +255,16 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, return 0; -err_g1: - if (!drm_fb_loaded) { - pci_set_drvdata(pdev, NULL); - pci_release_regions(pdev); + err_g3: + if (!drm_fb_loaded) pci_disable_device(pdev); - } + err_g2: + if (!drm_fb_loaded) + pci_release_regions(pdev); + err_g1: + if (!drm_fb_loaded) + pci_set_drvdata(pdev, NULL); + drm_free(dev, sizeof(*dev), DRM_MEM_STUB); printk(KERN_ERR "DRM: drm_get_dev failed.\n"); return ret; diff --git a/linux-core/drm_sysfs.c b/linux-core/drm_sysfs.c index ace0778b..9b2f5dce 100644 --- a/linux-core/drm_sysfs.c +++ b/linux-core/drm_sysfs.c @@ -93,11 +93,15 @@ struct drm_sysfs_class *drm_sysfs_create(struct module *owner, char *name) retval = class_register(&cs->class); if (retval) goto error; - class_create_file(&cs->class, &class_attr_version); + retval = class_create_file(&cs->class, &class_attr_version); + if (retval) + goto error_with_class; return cs; - error: + error_with_class: + class_unregister(&cs->class); + error: kfree(cs); return ERR_PTR(retval); } @@ -170,16 +174,31 @@ struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, if (retval) goto error; - class_device_create_file(&s_dev->class_dev, &cs->attr); + retval = class_device_create_file(&s_dev->class_dev, &cs->attr); + if (retval) + goto error_with_device; + class_set_devdata(&s_dev->class_dev, head); - for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) - class_device_create_file(&s_dev->class_dev, &class_device_attrs[i]); + for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) { + retval = class_device_create_file(&s_dev->class_dev, + &class_device_attrs[i]); + if (retval) + goto error_with_files; + } return &s_dev->class_dev; -error: + error_with_files: + while (i > 0) + class_device_remove_file(&s_dev->class_dev, + &class_device_attrs[--i]); + class_device_remove_file(&s_dev->class_dev, &cs->attr); + error_with_device: + class_device_unregister(&s_dev->class_dev); + error: kfree(s_dev); + return ERR_PTR(retval); } -- cgit v1.2.3 From 8d96ba9805316b29e948d7594344feebb17042f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Tue, 3 Jul 2007 11:41:44 +0200 Subject: Restore pre-idr semantics for drawable information. There's a difference between a drawable ID not having valid drawable information and not being allocated at all. Not making the distinction would break i915 DRM swap scheduling with older X servers that don't push drawable cliprect information to the DRM. --- linux-core/drm_drawable.c | 43 +++++++++++++++++++++++++------------------ 1 file changed, 25 insertions(+), 18 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_drawable.c b/linux-core/drm_drawable.c index 74f0bb5d..7657e954 100644 --- a/linux-core/drm_drawable.c +++ b/linux-core/drm_drawable.c @@ -37,6 +37,8 @@ #include "drmP.h" +#define NO_DRW_INFO (void*)1 + /** * Allocate drawable ID and memory to store information about it. */ @@ -44,24 +46,18 @@ int drm_adddraw(DRM_IOCTL_ARGS) { DRM_DEVICE; unsigned long irqflags; - struct drm_drawable_info *draw_info; drm_draw_t draw; int new_id = 0; int ret; - draw_info = drm_calloc(1, sizeof(*draw_info), DRM_MEM_BUFS); - if (!draw_info) - return -ENOMEM; - again: if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) { DRM_ERROR("Out of memory expanding drawable idr\n"); - drm_free(draw_info, sizeof(struct drm_drawable_info), DRM_MEM_BUFS); return -ENOMEM; } spin_lock_irqsave(&dev->drw_lock, irqflags); - ret = idr_get_new_above(&dev->drw_idr, draw_info, 1, &new_id); + ret = idr_get_new_above(&dev->drw_idr, NO_DRW_INFO, 1, &new_id); if (ret == -EAGAIN) { spin_unlock_irqrestore(&dev->drw_lock, irqflags); goto again; @@ -86,21 +82,16 @@ int drm_rmdraw(DRM_IOCTL_ARGS) DRM_DEVICE; drm_draw_t draw; unsigned long irqflags; - struct drm_drawable_info *draw_info; DRM_COPY_FROM_USER_IOCTL(draw, (drm_draw_t __user *) data, sizeof(draw)); - draw_info = idr_find(&dev->drw_idr, draw.handle); - if (!draw_info) { - DRM_DEBUG("No such drawable %d\n", draw.handle); - return -EINVAL; - } - spin_lock_irqsave(&dev->drw_lock, irqflags); + drm_free(drm_get_drawable_info(dev, draw.handle), + sizeof(struct drm_drawable_info), DRM_MEM_BUFS); + idr_remove(&dev->drw_idr, draw.handle); - drm_free(draw_info, sizeof(struct drm_drawable_info), DRM_MEM_BUFS); spin_unlock_irqrestore(&dev->drw_lock, irqflags); DRM_DEBUG("%d\n", draw.handle); @@ -125,6 +116,13 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) return DRM_ERR(EINVAL); } + if (info == NO_DRW_INFO) { + info = drm_calloc(1, sizeof(*info), DRM_MEM_BUFS); + if (!info) + return -ENOMEM; + idr_replace(&dev->drw_idr, info, update.handle); + } + switch (update.type) { case DRM_DRAWABLE_CLIPRECTS: if (update.num != info->num_rects) { @@ -187,11 +185,17 @@ drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id) struct drm_drawable_info *info; info = idr_find(&dev->drw_idr, id); + if (!info) { DRM_DEBUG("No such drawable %d\n", id); return NULL; } + if (info == NO_DRW_INFO) { + DRM_DEBUG("No information for drawable %d\n", id); + return NULL; + } + return info; } EXPORT_SYMBOL(drm_get_drawable_info); @@ -200,9 +204,12 @@ static int drm_drawable_free(int idr, void *p, void *data) { struct drm_drawable_info *info = p; - drm_free(info->rects, info->num_rects * - sizeof(drm_clip_rect_t), DRM_MEM_BUFS); - drm_free(info, sizeof(struct drm_drawable_info), DRM_MEM_BUFS); + if (info != NO_DRW_INFO) { + drm_free(info->rects, info->num_rects * + sizeof(drm_clip_rect_t), DRM_MEM_BUFS); + drm_free(info, sizeof(*info), DRM_MEM_BUFS); + } + return 0; } -- cgit v1.2.3 From ea832a8e555c9e1f90830b55cbd970d0eca0e2cf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Tue, 3 Jul 2007 12:15:15 +0200 Subject: Simplification for previous commit. Dave Airlie pointed out on IRC that idr_replace lets us know if the ID hasn't been allocated, so we don't need a special pointer value for allocated IDs that don't have valid information yet. --- linux-core/drm_drawable.c | 34 +++++++++------------------------- 1 file changed, 9 insertions(+), 25 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_drawable.c b/linux-core/drm_drawable.c index 7657e954..57b62ca4 100644 --- a/linux-core/drm_drawable.c +++ b/linux-core/drm_drawable.c @@ -37,8 +37,6 @@ #include "drmP.h" -#define NO_DRW_INFO (void*)1 - /** * Allocate drawable ID and memory to store information about it. */ @@ -57,7 +55,7 @@ again: } spin_lock_irqsave(&dev->drw_lock, irqflags); - ret = idr_get_new_above(&dev->drw_idr, NO_DRW_INFO, 1, &new_id); + ret = idr_get_new_above(&dev->drw_idr, NULL, 1, &new_id); if (ret == -EAGAIN) { spin_unlock_irqrestore(&dev->drw_lock, irqflags); goto again; @@ -112,15 +110,15 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) info = idr_find(&dev->drw_idr, update.handle); if (!info) { - DRM_ERROR("No such drawable %d\n", update.handle); - return DRM_ERR(EINVAL); - } - - if (info == NO_DRW_INFO) { info = drm_calloc(1, sizeof(*info), DRM_MEM_BUFS); if (!info) return -ENOMEM; - idr_replace(&dev->drw_idr, info, update.handle); + if (idr_replace(&dev->drw_idr, info, update.handle) == + (void*)-ENOENT) { + DRM_ERROR("No such drawable %d\n", update.handle); + drm_free(info, sizeof(*info), DRM_MEM_BUFS); + return -EINVAL; + } } switch (update.type) { @@ -182,21 +180,7 @@ error: */ drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id) { - struct drm_drawable_info *info; - - info = idr_find(&dev->drw_idr, id); - - if (!info) { - DRM_DEBUG("No such drawable %d\n", id); - return NULL; - } - - if (info == NO_DRW_INFO) { - DRM_DEBUG("No information for drawable %d\n", id); - return NULL; - } - - return info; + return idr_find(&dev->drw_idr, id); } EXPORT_SYMBOL(drm_get_drawable_info); @@ -204,7 +188,7 @@ static int drm_drawable_free(int idr, void *p, void *data) { struct drm_drawable_info *info = p; - if (info != NO_DRW_INFO) { + if (info) { drm_free(info->rects, info->num_rects * sizeof(drm_clip_rect_t), DRM_MEM_BUFS); drm_free(info, sizeof(*info), DRM_MEM_BUFS); -- cgit v1.2.3 From 1814a829eb65ee53a14fa9b53fc6f3a4196dcaa5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kristian=20H=C3=B8gsberg?= Date: Tue, 3 Jul 2007 10:31:46 -0400 Subject: Don't take dev->struct_mutex twice in drm_setsareactx. --- linux-core/drm_context.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_context.c b/linux-core/drm_context.c index 101a298c..ff08e809 100644 --- a/linux-core/drm_context.c +++ b/linux-core/drm_context.c @@ -245,8 +245,6 @@ int drm_setsareactx(struct inode *inode, struct file *filp, if (!map) goto bad; - mutex_lock(&dev->struct_mutex); - ctx_sarea = idr_find(&dev->ctx_idr, request.ctx_id); if (!ctx_sarea) goto bad; -- cgit v1.2.3 From d57b7f02d2e525e5600e5d77370d7ad2b4c9b265 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kristian=20H=C3=B8gsberg?= Date: Tue, 3 Jul 2007 10:41:48 -0400 Subject: Use idr_replace trick to eliminate struct drm_ctx_sarea_list. --- linux-core/drmP.h | 4 ---- linux-core/drm_context.c | 41 +++++++---------------------------------- linux-core/drm_drawable.c | 3 +-- 3 files changed, 8 insertions(+), 40 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 7bcd095a..c992c8d9 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -570,10 +570,6 @@ typedef struct drm_ctx_list { drm_file_t *tag; /**< associated fd private data */ } drm_ctx_list_t; -struct drm_ctx_sarea_list { - drm_map_t *map; -}; - typedef struct drm_vbl_sig { struct list_head head; unsigned int sequence; diff --git a/linux-core/drm_context.c b/linux-core/drm_context.c index ff08e809..195c7fb5 100644 --- a/linux-core/drm_context.c +++ b/linux-core/drm_context.c @@ -58,17 +58,9 @@ */ void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle) { - struct drm_ctx_sarea_list *ctx; - mutex_lock(&dev->struct_mutex); - ctx = idr_find(&dev->ctx_idr, ctx_handle); - if (ctx) { - idr_remove(&dev->ctx_idr, ctx_handle); - drm_free(ctx, sizeof(struct drm_ctx_sarea_list), DRM_MEM_CTXLIST); - } else - DRM_ERROR("Attempt to free invalid context handle: %d\n", ctx_handle); + idr_remove(&dev->ctx_idr, ctx_handle); mutex_unlock(&dev->struct_mutex); - return; } /** @@ -84,20 +76,15 @@ static int drm_ctxbitmap_next(drm_device_t * dev) { int new_id; int ret; - struct drm_ctx_sarea_list *new_ctx; - - new_ctx = drm_calloc(1, sizeof(struct drm_ctx_sarea_list), DRM_MEM_CTXLIST); - if (!new_ctx) - return -1; again: if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) { DRM_ERROR("Out of memory expanding drawable idr\n"); - drm_free(new_ctx, sizeof(struct drm_ctx_sarea_list), DRM_MEM_CTXLIST); return -ENOMEM; } mutex_lock(&dev->struct_mutex); - ret = idr_get_new_above(&dev->ctx_idr, new_ctx, DRM_RESERVED_CONTEXTS, &new_id); + ret = idr_get_new_above(&dev->ctx_idr, NULL, + DRM_RESERVED_CONTEXTS, &new_id); if (ret == -EAGAIN) { mutex_unlock(&dev->struct_mutex); goto again; @@ -120,15 +107,6 @@ int drm_ctxbitmap_init(drm_device_t * dev) return 0; } - - -static int drm_ctx_sarea_free(int id, void *p, void *data) -{ - struct drm_ctx_sarea_list *ctx_entry = p; - drm_free(ctx_entry, sizeof(struct drm_ctx_sarea_list), DRM_MEM_CTXLIST); - return 0; -} - /** * Context bitmap cleanup. * @@ -140,7 +118,6 @@ static int drm_ctx_sarea_free(int id, void *p, void *data) void drm_ctxbitmap_cleanup(drm_device_t * dev) { mutex_lock(&dev->struct_mutex); - idr_for_each(&dev->ctx_idr, drm_ctx_sarea_free, NULL); idr_remove_all(&dev->ctx_idr); mutex_unlock(&dev->struct_mutex); } @@ -172,19 +149,17 @@ int drm_getsareactx(struct inode *inode, struct file *filp, drm_ctx_priv_map_t request; drm_map_t *map; drm_map_list_t *_entry; - struct drm_ctx_sarea_list *ctx_sarea; if (copy_from_user(&request, argp, sizeof(request))) return -EFAULT; mutex_lock(&dev->struct_mutex); - ctx_sarea = idr_find(&dev->ctx_idr, request.ctx_id); - if (!ctx_sarea) { + map = idr_find(&dev->ctx_idr, request.ctx_id); + if (!map) { mutex_unlock(&dev->struct_mutex); return -EINVAL; } - map = ctx_sarea->map; mutex_unlock(&dev->struct_mutex); @@ -224,7 +199,6 @@ int drm_setsareactx(struct inode *inode, struct file *filp, drm_ctx_priv_map_t request; drm_map_t *map = NULL; drm_map_list_t *r_list = NULL; - struct drm_ctx_sarea_list *ctx_sarea; if (copy_from_user(&request, (drm_ctx_priv_map_t __user *) arg, sizeof(request))) @@ -245,12 +219,11 @@ int drm_setsareactx(struct inode *inode, struct file *filp, if (!map) goto bad; - ctx_sarea = idr_find(&dev->ctx_idr, request.ctx_id); - if (!ctx_sarea) + if (IS_ERR(idr_replace(&dev->ctx_idr, map, request.ctx_id))) goto bad; - ctx_sarea->map = map; mutex_unlock(&dev->struct_mutex); + return 0; } diff --git a/linux-core/drm_drawable.c b/linux-core/drm_drawable.c index 57b62ca4..7129980b 100644 --- a/linux-core/drm_drawable.c +++ b/linux-core/drm_drawable.c @@ -113,8 +113,7 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) info = drm_calloc(1, sizeof(*info), DRM_MEM_BUFS); if (!info) return -ENOMEM; - if (idr_replace(&dev->drw_idr, info, update.handle) == - (void*)-ENOENT) { + if (IS_ERR(idr_replace(&dev->drw_idr, info, update.handle))) { DRM_ERROR("No such drawable %d\n", update.handle); drm_free(info, sizeof(*info), DRM_MEM_BUFS); return -EINVAL; -- cgit v1.2.3 From 2695e8e209228dfc2e6a9b10bc118d0794602b37 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 5 Jul 2007 17:18:12 -0700 Subject: Convert weird rtdsc usage to get_cycles. I'm not convinced that get_cycles is the right approach here, but it's better than the weird way that rtdsc was being used. --- linux-core/xgi_misc.c | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 9bf8205b..a0ed18c2 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -127,12 +127,10 @@ bool xgi_ge_irq_handler(struct xgi_info * info) // We got GE stall interrupt. ge_3d_status[0x04] = int_status | 0x04000000; - if (TRUE == is_support_auto_reset) { + if (is_support_auto_reset) { bool is_wrong_signal = FALSE; - static U32 last_int_tick_low, - last_int_tick_high; - static U32 new_int_tick_low; - static U32 continoue_int_count = 0; + static cycles_t last_tick; + static unsigned continue_int_count = 0; // OE II is busy. while (old_ge_status & 0x001c0000) { u16 check; @@ -190,19 +188,17 @@ bool xgi_ge_irq_handler(struct xgi_info * info) if (is_wrong_signal) { // Nothing but skip. - } else if (0 == continoue_int_count++) { - rdtsc(last_int_tick_low, - last_int_tick_high); + } else if (0 == continue_int_count++) { + last_tick = get_cycles(); } else { - rdtscl(new_int_tick_low); - if ((new_int_tick_low - - last_int_tick_low) > + const cycles_t new_tick = get_cycles(); + if ((new_tick - last_tick) > STALL_INTERRUPT_RESET_THRESHOLD) { - continoue_int_count = 0; - } else if (continoue_int_count >= 3) { + continue_int_count = 0; + } else if (continue_int_count >= 3) { int time_out; - continoue_int_count = 0; + continue_int_count = 0; // GE Hung up, need reset. XGI_INFO("Reset GE!\n"); -- cgit v1.2.3 From 8b18276458e93263d5d554f779227a906592ac74 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 5 Jul 2007 17:45:44 -0700 Subject: Major clean up of xgi_ge_irq_handler Two large blocks of code were moved out of this function into separate functions. This brought some much needed sanity to the indentation. Some dead varaibles were removed. --- linux-core/xgi_misc.c | 260 ++++++++++++++++++++++++-------------------------- 1 file changed, 123 insertions(+), 137 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index a0ed18c2..6cc0f107 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -111,83 +111,136 @@ void xgi_sarea_info(struct xgi_info * info, struct xgi_sarea_info * req) static unsigned int s_invalid_begin = 0; +static bool xgi_validate_signal(volatile u8 *mmio_vbase) +{ + volatile u32 *const ge_3d_status = + (volatile u32 *)(mmio_vbase + 0x2800); + const u32 old_ge_status = ge_3d_status[0x00]; + + if (old_ge_status & 0x001c0000) { + u16 check; + + /* Check Read back status */ + *(mmio_vbase + 0x235c) = 0x80; + check = *((volatile u16 *)(mmio_vbase + 0x2360)); + + if ((check & 0x3f) != ((check & 0x3f00) >> 8)) { + return FALSE; + } + + /* Check RO channel */ + *(mmio_vbase + 0x235c) = 0x83; + check = *((volatile u16 *)(mmio_vbase + 0x2360)); + if ((check & 0x0f) != ((check & 0xf0) >> 4)) { + return FALSE; + } + + /* Check RW channel */ + *(mmio_vbase + 0x235c) = 0x88; + check = *((volatile u16 *)(mmio_vbase + 0x2360)); + if ((check & 0x0f) != ((check & 0xf0) >> 4)) { + return FALSE; + } + + /* Check RO channel outstanding */ + *(mmio_vbase + 0x235c) = 0x8f; + check = *((volatile u16 *)(mmio_vbase + 0x2360)); + if (0 != (check & 0x3ff)) { + return FALSE; + } + + /* Check RW channel outstanding */ + *(mmio_vbase + 0x235c) = 0x90; + check = *((volatile u16 *)(mmio_vbase + 0x2360)); + if (0 != (check & 0x3ff)) { + return FALSE; + } + + /* No pending PCIE request. GE stall. */ + } + + return TRUE; +} + + +static void xgi_ge_hang_reset(volatile u8 *mmio_vbase) +{ + volatile u32 *const ge_3d_status = + (volatile u32 *)(mmio_vbase + 0x2800); + int time_out = 0xffff; + + *(mmio_vbase + 0xb057) = 8; + while (0 != (ge_3d_status[0x00] & 0xf0000000)) { + while (0 != ((--time_out) & 0xfff)) + /* empty */ ; + + if (0 == time_out) { + u8 old_3ce; + u8 old_3cf; + u8 old_index; + u8 old_36; + + XGI_INFO("Can not reset back 0x%x!\n", + ge_3d_status[0x00]); + + *(mmio_vbase + 0xb057) = 0; + + /* Have to use 3x5.36 to reset. */ + /* Save and close dynamic gating */ + + old_3ce = *(mmio_vbase + 0x3ce); + *(mmio_vbase + 0x3ce) = 0x2a; + old_3cf = *(mmio_vbase + 0x3cf); + *(mmio_vbase + 0x3cf) = old_3cf & 0xfe; + + /* Reset GE */ + old_index = *(mmio_vbase + 0x3d4); + *(mmio_vbase + 0x3d4) = 0x36; + old_36 = *(mmio_vbase + 0x3d5); + *(mmio_vbase + 0x3d5) = old_36 | 0x10; + + while (0 != ((--time_out) & 0xfff)) + /* empty */ ; + + *(mmio_vbase + 0x3d5) = old_36; + *(mmio_vbase + 0x3d4) = old_index; + + /* Restore dynamic gating */ + *(mmio_vbase + 0x3cf) = old_3cf; + *(mmio_vbase + 0x3ce) = old_3ce; + break; + } + } + + *(mmio_vbase + 0xb057) = 0; +} + + bool xgi_ge_irq_handler(struct xgi_info * info) { - volatile u8 *mmio_vbase = info->mmio.vbase; - volatile U32 *ge_3d_status = (volatile U32 *)(mmio_vbase + 0x2800); - U32 int_status = ge_3d_status[4]; // interrupt status - U32 auto_reset_count = 0; + volatile u8 *const mmio_vbase = info->mmio.vbase; + volatile u32 *const ge_3d_status = + (volatile u32 *)(mmio_vbase + 0x2800); + const u32 int_status = ge_3d_status[4]; bool is_support_auto_reset = FALSE; - // Check GE on/off + /* Check GE on/off */ if (0 == (0xffffc0f0 & int_status)) { - U32 old_ge_status = ge_3d_status[0x00]; - U32 old_pcie_cmd_fetch_Addr = ge_3d_status[0x0a]; + u32 old_pcie_cmd_fetch_Addr = ge_3d_status[0x0a]; + if (0 != (0x1000 & int_status)) { - // We got GE stall interrupt. + /* We got GE stall interrupt. + */ ge_3d_status[0x04] = int_status | 0x04000000; if (is_support_auto_reset) { - bool is_wrong_signal = FALSE; static cycles_t last_tick; static unsigned continue_int_count = 0; - // OE II is busy. - while (old_ge_status & 0x001c0000) { - u16 check; - // Check Read back status - *(mmio_vbase + 0x235c) = 0x80; - check = - *((volatile u16 *)(mmio_vbase + - 0x2360)); - if ((check & 0x3f) != - ((check & 0x3f00) >> 8)) { - is_wrong_signal = TRUE; - break; - } - // Check RO channel - *(mmio_vbase + 0x235c) = 0x83; - check = - *((volatile u16 *)(mmio_vbase + - 0x2360)); - if ((check & 0x0f) != - ((check & 0xf0) >> 4)) { - is_wrong_signal = TRUE; - break; - } - // Check RW channel - *(mmio_vbase + 0x235c) = 0x88; - check = - *((volatile u16 *)(mmio_vbase + - 0x2360)); - if ((check & 0x0f) != - ((check & 0xf0) >> 4)) { - is_wrong_signal = TRUE; - break; - } - // Check RO channel outstanding - *(mmio_vbase + 0x235c) = 0x8f; - check = - *((volatile u16 *)(mmio_vbase + - 0x2360)); - if (0 != (check & 0x3ff)) { - is_wrong_signal = TRUE; - break; - } - // Check RW channel outstanding - *(mmio_vbase + 0x235c) = 0x90; - check = - *((volatile u16 *)(mmio_vbase + - 0x2360)); - if (0 != (check & 0x3ff)) { - is_wrong_signal = TRUE; - break; - } - // No pending PCIE request. GE stall. - break; - } - if (is_wrong_signal) { - // Nothing but skip. + /* OE II is busy. */ + + if (!xgi_validate_signal(mmio_vbase)) { + /* Nothing but skip. */ } else if (0 == continue_int_count++) { last_tick = get_cycles(); } else { @@ -196,90 +249,23 @@ bool xgi_ge_irq_handler(struct xgi_info * info) STALL_INTERRUPT_RESET_THRESHOLD) { continue_int_count = 0; } else if (continue_int_count >= 3) { - int time_out; - continue_int_count = 0; - // GE Hung up, need reset. + /* GE Hung up, need reset. */ XGI_INFO("Reset GE!\n"); - *(mmio_vbase + 0xb057) = 8; - time_out = 0xffff; - while (0 != - (ge_3d_status[0x00] & - 0xf0000000)) { - while (0 != - ((--time_out) & - 0xfff)) ; - if (0 == time_out) { - u8 old_3ce; - u8 old_3cf; - u8 old_index; - u8 old_36; - - XGI_INFO - ("Can not reset back 0x%lx!\n", - ge_3d_status - [0x00]); - *(mmio_vbase + - 0xb057) = 0; - // Have to use 3x5.36 to reset. - // Save and close dynamic gating - old_3ce = - *(mmio_vbase - + 0x3ce); - *(mmio_vbase + - 0x3ce) = 0x2a; - old_3cf = - *(mmio_vbase - + 0x3cf); - *(mmio_vbase + - 0x3cf) = - old_3cf & 0xfe; - // Reset GE - old_index = - *(mmio_vbase - + 0x3d4); - *(mmio_vbase + - 0x3d4) = 0x36; - old_36 = - *(mmio_vbase - + 0x3d5); - *(mmio_vbase + - 0x3d5) = - old_36 | 0x10; - while (0 != - ((--time_out) & 0xfff)) ; - *(mmio_vbase + - 0x3d5) = - old_36; - *(mmio_vbase + - 0x3d4) = - old_index; - // Restore dynamic gating - *(mmio_vbase + - 0x3cf) = - old_3cf; - *(mmio_vbase + - 0x3ce) = - old_3ce; - break; - } - } - *(mmio_vbase + 0xb057) = 0; - - // Increase Reset counter - auto_reset_count++; + xgi_ge_hang_reset(mmio_vbase); } } } - return TRUE; } else if (0 != (0x1 & int_status)) { s_invalid_begin++; ge_3d_status[0x04] = (int_status & ~0x01) | 0x04000000; - return TRUE; } + + return TRUE; } + return FALSE; } -- cgit v1.2.3 From 86e75b7f7f64643c6ef2c0fef353b38753df8239 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 5 Jul 2007 17:49:13 -0700 Subject: Remove XGI_IOCTL_CPUID and associated cruft. --- linux-core/xgi_drv.c | 4 ---- linux-core/xgi_drv.h | 15 +++------------ linux-core/xgi_misc.c | 12 ------------ linux-core/xgi_misc.h | 1 - 4 files changed, 3 insertions(+), 29 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 44b003a8..081db19e 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -960,10 +960,6 @@ int xgi_kern_ioctl(struct inode *inode, struct file *filp, XGI_INFO("Jong-xgi_state_change \n"); xgi_state_change(info, (struct xgi_state_info *) arg_copy); break; - case XGI_ESC_CPUID: - XGI_INFO("Jong-XGI_ESC_CPUID \n"); - xgi_get_cpu_id((struct cpu_info *)arg_copy); - break; default: XGI_INFO("Jong-xgi_ioctl_default \n"); status = -EINVAL; diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 360e7120..248377aa 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -234,13 +234,6 @@ struct xgi_state_info { U32 _toState; }; -struct cpu_info { - U32 _eax; - U32 _ebx; - U32 _ecx; - U32 _edx; -}; - struct xgi_mem_pid { struct list_head list; enum xgi_mem_location location; @@ -275,8 +268,7 @@ struct xgi_mem_pid { #define XGI_ESC_STATE_CHANGE (XGI_IOCTL_BASE + 17) #define XGI_ESC_MMIO_INFO (XGI_IOCTL_BASE + 18) #define XGI_ESC_PCIE_CHECK (XGI_IOCTL_BASE + 19) -#define XGI_ESC_CPUID (XGI_IOCTL_BASE + 20) -#define XGI_ESC_MEM_COLLECT (XGI_IOCTL_BASE + 21) +#define XGI_ESC_MEM_COLLECT (XGI_IOCTL_BASE + 20) #define XGI_IOCTL_DEVICE_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_DEVICE_INFO, struct xgi_chip_info) #define XGI_IOCTL_POST_VBIOS _IO(XGI_IOCTL_MAGIC, XGI_ESC_POST_VBIOS) @@ -298,12 +290,11 @@ struct xgi_mem_pid { #define XGI_IOCTL_DEBUG_INFO _IO(XGI_IOCTL_MAGIC, XGI_ESC_DEBUG_INFO) #define XGI_IOCTL_MMIO_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_MMIO_INFO, struct xgi_mmio_info) -#define XGI_IOCTL_SUBMIT_CMDLIST _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_SUBMIT_CMDLIST, struct xgi_cmd_info) -#define XGI_IOCTL_TEST_RWINKERNEL _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_TEST_RWINKERNEL, unsigned long) +#define XGI_IOCTL_SUBMIT_CMDLIST _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_SUBMIT_CMDLIST, struct xgi_cmd_info) +#define XGI_IOCTL_TEST_RWINKERNEL _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_TEST_RWINKERNEL, unsigned long) #define XGI_IOCTL_STATE_CHANGE _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_STATE_CHANGE, struct xgi_state_info) #define XGI_IOCTL_PCIE_CHECK _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_CHECK) -#define XGI_IOCTL_CPUID _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_CPUID, struct cpu_info) #define XGI_IOCTL_MAXNR 30 /* diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 6cc0f107..9712241f 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -521,18 +521,6 @@ void xgi_waitfor_pci_idle(struct xgi_info * info) } } -int xgi_get_cpu_id(struct cpu_info *arg) -{ - int op = arg->_eax; - __asm__("cpuid":"=a"(arg->_eax), - "=b"(arg->_ebx), - "=c"(arg->_ecx), "=d"(arg->_edx) - : "0"(op)); - - XGI_INFO - ("opCode = 0x%x, eax = 0x%x, ebx = 0x%x, ecx = 0x%x, edx = 0x%x \n", - op, arg->_eax, arg->_ebx, arg->_ecx, arg->_edx); -} /*memory collect function*/ extern struct list_head xgi_mempid_list; diff --git a/linux-core/xgi_misc.h b/linux-core/xgi_misc.h index 4b944c4c..85cfbf2b 100644 --- a/linux-core/xgi_misc.h +++ b/linux-core/xgi_misc.h @@ -36,7 +36,6 @@ extern void xgi_get_screen_info(struct xgi_info * info, struct xgi_screen_info * extern void xgi_put_screen_info(struct xgi_info * info, struct xgi_screen_info * req); extern void xgi_ge_reset(struct xgi_info * info); extern void xgi_sarea_info(struct xgi_info * info, struct xgi_sarea_info * req); -extern int xgi_get_cpu_id(struct cpu_info *arg); extern void xgi_restore_registers(struct xgi_info * info); extern bool xgi_ge_irq_handler(struct xgi_info * info); -- cgit v1.2.3 From c806bba4665bb369168ee0b453fa28e2e0bf2a5d Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 5 Jul 2007 00:12:33 +1000 Subject: nouveau/nv50: Initial channel/object support Should be OK on G84 for a single channel, multiple channels *almost* work. Untested on G80. --- linux-core/Makefile.kernel | 3 ++- linux-core/nv04_instmem.c | 1 + linux-core/nv50_instmem.c | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) create mode 120000 linux-core/nv04_instmem.c create mode 120000 linux-core/nv50_instmem.c (limited to 'linux-core') diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 478c4df0..be2641c8 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -27,7 +27,8 @@ nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ nv04_fb.o nv10_fb.o nv40_fb.o \ nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \ nv04_graph.o nv10_graph.o nv20_graph.o nv30_graph.o \ - nv40_graph.o nv50_graph.o + nv40_graph.o nv50_graph.o \ + nv04_instmem.o nv50_instmem.o radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o sis-objs := sis_drv.o sis_mm.o ffb-objs := ffb_drv.o ffb_context.o diff --git a/linux-core/nv04_instmem.c b/linux-core/nv04_instmem.c new file mode 120000 index 00000000..e720fb5b --- /dev/null +++ b/linux-core/nv04_instmem.c @@ -0,0 +1 @@ +../shared-core/nv04_instmem.c \ No newline at end of file diff --git a/linux-core/nv50_instmem.c b/linux-core/nv50_instmem.c new file mode 120000 index 00000000..4e45344a --- /dev/null +++ b/linux-core/nv50_instmem.c @@ -0,0 +1 @@ +../shared-core/nv50_instmem.c \ No newline at end of file -- cgit v1.2.3 From 2f2d8b9688743ac6367bf13c3c023310a257ceb7 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 9 Jul 2007 15:59:09 -0700 Subject: Merge xgi_mem_req and xgi_mem_alloc into a single type. These two structures were used as the request and reply for certain ioctls. Having a different type for an ioctl's input and output is just wierd. In addition, each structure contained fields (e.g., pid) that had no business being there. This change requires updates to user-space. --- linux-core/xgi_cmdlist.c | 10 +++++----- linux-core/xgi_drv.c | 7 ++----- linux-core/xgi_drv.h | 37 +++++++++++++++++++------------------ linux-core/xgi_fb.c | 10 +++++----- linux-core/xgi_pcie.c | 25 ++++++++----------------- 5 files changed, 39 insertions(+), 50 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 04ee6e82..f7730d89 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -42,12 +42,12 @@ static void xgi_cmdlist_reset(void); int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) { - //struct xgi_mem_req mem_req; - struct xgi_mem_alloc mem_alloc; + struct xgi_mem_alloc mem_alloc = { + .size = size, + .owner = PCIE_2D, + }; - //mem_req.size = size; - - xgi_pcie_alloc(info, size, PCIE_2D, &mem_alloc); + xgi_pcie_alloc(info, &mem_alloc, 0); if ((mem_alloc.size == 0) && (mem_alloc.hw_addr == 0)) { return -1; diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 081db19e..3608c747 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -894,7 +894,7 @@ int xgi_kern_ioctl(struct inode *inode, struct file *filp, break; case XGI_ESC_FB_ALLOC: XGI_INFO("Jong-xgi_ioctl_fb_alloc \n"); - xgi_fb_alloc(info, (struct xgi_mem_req *)arg_copy, alloc); + xgi_fb_alloc(info, alloc, 0); break; case XGI_ESC_FB_FREE: XGI_INFO("Jong-xgi_ioctl_fb_free \n"); @@ -906,8 +906,7 @@ int xgi_kern_ioctl(struct inode *inode, struct file *filp, break; case XGI_ESC_PCIE_ALLOC: XGI_INFO("Jong-xgi_ioctl_pcie_alloc \n"); - xgi_pcie_alloc(info, ((struct xgi_mem_req *) arg_copy)->size, - ((struct xgi_mem_req *) arg_copy)->owner, alloc); + xgi_pcie_alloc(info, alloc, 0); break; case XGI_ESC_PCIE_FREE: XGI_INFO("Jong-xgi_ioctl_pcie_free: bus_addr = 0x%lx \n", @@ -945,8 +944,6 @@ int xgi_kern_ioctl(struct inode *inode, struct file *filp, case XGI_ESC_DEBUG_INFO: XGI_INFO("Jong-xgi_ioctl_restore_registers \n"); xgi_restore_registers(info); - //xgi_write_pcie_mem(info, (struct xgi_mem_req *) arg_copy); - //xgi_read_pcie_mem(info, (struct xgi_mem_req *) arg_copy); break; case XGI_ESC_SUBMIT_CMDLIST: XGI_INFO("Jong-xgi_ioctl_submit_cmdlist \n"); diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 248377aa..361a1e96 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -177,19 +177,23 @@ enum PcieOwner { }; struct xgi_mem_req { - enum xgi_mem_location location; - unsigned long size; - unsigned long is_front; - enum PcieOwner owner; - unsigned long pid; }; struct xgi_mem_alloc { - enum xgi_mem_location location; - unsigned long size; + unsigned int location; + unsigned int size; + unsigned int is_front; + unsigned int owner; + + /** + * Address of the memory from the graphics hardware's point of view. + */ + u32 hw_addr; + + /** + * Physical address of the memory from the processor's point of view. + */ unsigned long bus_addr; - unsigned long hw_addr; - unsigned long pid; }; struct xgi_chip_info { @@ -274,11 +278,11 @@ struct xgi_mem_pid { #define XGI_IOCTL_POST_VBIOS _IO(XGI_IOCTL_MAGIC, XGI_ESC_POST_VBIOS) #define XGI_IOCTL_FB_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_FB_INIT) -#define XGI_IOCTL_FB_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_FB_ALLOC, struct xgi_mem_req) +#define XGI_IOCTL_FB_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_FB_ALLOC, struct xgi_mem_alloc) #define XGI_IOCTL_FB_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_FB_FREE, unsigned long) #define XGI_IOCTL_PCIE_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_INIT) -#define XGI_IOCTL_PCIE_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_ALLOC, struct xgi_mem_req) +#define XGI_IOCTL_PCIE_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_ALLOC, struct xgi_mem_alloc) #define XGI_IOCTL_PCIE_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_FREE, unsigned long) #define XGI_IOCTL_PUT_SCREEN_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PUT_SCREEN_INFO, struct xgi_screen_info) @@ -332,25 +336,22 @@ struct xgi_mem_pid { extern int xgi_fb_heap_init(struct xgi_info * info); extern void xgi_fb_heap_cleanup(struct xgi_info * info); -extern void xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_req * req, - struct xgi_mem_alloc * alloc); +extern void xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, + pid_t pid); extern void xgi_fb_free(struct xgi_info * info, unsigned long offset); extern void xgi_mem_collect(struct xgi_info * info, unsigned int *pcnt); extern int xgi_pcie_heap_init(struct xgi_info * info); extern void xgi_pcie_heap_cleanup(struct xgi_info * info); -extern void xgi_pcie_alloc(struct xgi_info * info, unsigned long size, - enum PcieOwner owner, struct xgi_mem_alloc * alloc); +extern void xgi_pcie_alloc(struct xgi_info * info, + struct xgi_mem_alloc * alloc, pid_t pid); extern void xgi_pcie_free(struct xgi_info * info, unsigned long offset); extern void xgi_pcie_heap_check(void); extern struct xgi_pcie_block *xgi_find_pcie_block(struct xgi_info * info, unsigned long address); extern void *xgi_find_pcie_virt(struct xgi_info * info, unsigned long address); -extern void xgi_read_pcie_mem(struct xgi_info * info, struct xgi_mem_req * req); -extern void xgi_write_pcie_mem(struct xgi_info * info, struct xgi_mem_req * req); - extern void xgi_test_rwinkernel(struct xgi_info * info, unsigned long address); #endif diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index d7e9285d..ac73b41a 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -41,13 +41,13 @@ static struct xgi_mem_block *xgi_mem_new_node(void); static struct xgi_mem_block *xgi_mem_alloc(struct xgi_info * info, unsigned long size); static struct xgi_mem_block *xgi_mem_free(struct xgi_info * info, unsigned long offset); -void xgi_fb_alloc(struct xgi_info * info, - struct xgi_mem_req * req, struct xgi_mem_alloc * alloc) +void xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, + pid_t pid) { struct xgi_mem_block *block; struct xgi_mem_pid *mempid_block; - if (req->is_front) { + if (alloc->is_front) { alloc->location = XGI_MEMLOC_LOCAL; alloc->bus_addr = info->fb.base; alloc->hw_addr = 0; @@ -55,7 +55,7 @@ void xgi_fb_alloc(struct xgi_info * info, ("Video RAM allocation on front buffer successfully! \n"); } else { xgi_down(info->fb_sem); - block = xgi_mem_alloc(info, req->size); + block = xgi_mem_alloc(info, alloc->size); xgi_up(info->fb_sem); if (block == NULL) { @@ -77,7 +77,7 @@ void xgi_fb_alloc(struct xgi_info * info, kmalloc(sizeof(struct xgi_mem_pid), GFP_KERNEL); mempid_block->location = XGI_MEMLOC_LOCAL; mempid_block->bus_addr = alloc->bus_addr; - mempid_block->pid = alloc->pid; + mempid_block->pid = pid; if (!mempid_block) XGI_ERROR("mempid_block alloc failed\n"); diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 82111249..0f82e4ec 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -764,14 +764,13 @@ static struct xgi_pcie_block *xgi_pcie_mem_free(struct xgi_info * info, return (used_block); } -void xgi_pcie_alloc(struct xgi_info * info, unsigned long size, - enum PcieOwner owner, struct xgi_mem_alloc * alloc) +void xgi_pcie_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, + pid_t pid) { struct xgi_pcie_block *block; - struct xgi_mem_pid *mempid_block; xgi_down(info->pcie_sem); - block = xgi_pcie_mem_alloc(info, size, owner); + block = xgi_pcie_mem_alloc(info, alloc->size, alloc->owner); xgi_up(info->pcie_sem); if (block == NULL) { @@ -794,17 +793,18 @@ void xgi_pcie_alloc(struct xgi_info * info, unsigned long size, PCIE_3D request means a opengl process created. PCIE_3D_TEXTURE request means texture cannot alloc from fb. */ - if (owner == PCIE_3D || owner == PCIE_3D_TEXTURE) { - mempid_block = + if ((alloc->owner == PCIE_3D) + || (alloc->owner == PCIE_3D_TEXTURE)) { + struct xgi_mem_pid *mempid_block = kmalloc(sizeof(struct xgi_mem_pid), GFP_KERNEL); if (!mempid_block) XGI_ERROR("mempid_block alloc failed\n"); mempid_block->location = XGI_MEMLOC_NON_LOCAL; - if (owner == PCIE_3D) + if (alloc->owner == PCIE_3D) mempid_block->bus_addr = 0xFFFFFFFF; /*xgi_pcie_vertex_block has the address */ else mempid_block->bus_addr = alloc->bus_addr; - mempid_block->pid = alloc->pid; + mempid_block->pid = pid; XGI_INFO ("Memory ProcessID add one pcie block pid:%ld successfully! \n", @@ -944,15 +944,6 @@ void *xgi_find_pcie_virt(struct xgi_info * info, unsigned long address) return NULL; } -void xgi_read_pcie_mem(struct xgi_info * info, struct xgi_mem_req * req) -{ - -} - -void xgi_write_pcie_mem(struct xgi_info * info, struct xgi_mem_req * req) -{ -} - /* address -- GE hw address */ -- cgit v1.2.3 From a3f56dc3d0620633c7719a01e6e578661d65edfc Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 9 Jul 2007 16:07:27 -0700 Subject: Adjust the types of the fields of xgi_aperture. --- linux-core/xgi_drv.c | 10 ++++------ linux-core/xgi_drv.h | 6 +++--- 2 files changed, 7 insertions(+), 9 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 3608c747..2f0218e8 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -252,8 +252,7 @@ int xgi_kern_probe(struct pci_dev *dev, const struct pci_device_id *id_table) XGI_INFO("info->mmio.base: 0x%lx \n", info->mmio.base); XGI_INFO("info->mmio.size: 0x%lx \n", info->mmio.size); - info->mmio.vbase = (unsigned char *)ioremap_nocache(info->mmio.base, - info->mmio.size); + info->mmio.vbase = ioremap_nocache(info->mmio.base, info->mmio.size); if (!info->mmio.vbase) { release_mem_region(info->mmio.base, info->mmio.size); XGI_ERROR("info->mmio.vbase failed\n"); @@ -282,8 +281,7 @@ int xgi_kern_probe(struct pci_dev *dev, const struct pci_device_id *id_table) goto error_disable_dev; } - info->fb.vbase = (unsigned char *)ioremap_nocache(info->fb.base, - info->fb.size); + info->fb.vbase = ioremap_nocache(info->fb.base, info->fb.size); if (!info->fb.vbase) { @@ -1484,11 +1482,11 @@ void __exit xgi_exit_module(void) xgi_cmdlist_cleanup(&xgi_devices[i]); if (xgi_devices[i].fb.vbase != NULL) { - iounmap((void *)xgi_devices[i].fb.vbase); + iounmap(xgi_devices[i].fb.vbase); xgi_devices[i].fb.vbase = NULL; } if (xgi_devices[i].mmio.vbase != NULL) { - iounmap((void *)xgi_devices[i].mmio.vbase); + iounmap(xgi_devices[i].mmio.vbase); xgi_devices[i].mmio.vbase = NULL; } //release_mem_region(xgi_devices[i].fb.base, xgi_devices[i].fb.size); diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 361a1e96..6bd04cd9 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -94,9 +94,9 @@ #define XGI_CONTROL_DEVICE_NUMBER 100 struct xgi_aperture { - U32 base; // pcie base is different from fb base - U32 size; - u8 *vbase; + unsigned long base; + unsigned int size; + void *vbase; }; struct xgi_screen_info { -- cgit v1.2.3 From 7268b65d5ce804713c12b8fadc42f9a086cdfe14 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 9 Jul 2007 16:22:48 -0700 Subject: Correct types that are shared with user mode. --- linux-core/xgi_cmdlist.c | 6 +++-- linux-core/xgi_drv.h | 63 +++++++++++++++++++++--------------------------- linux-core/xgi_misc.c | 2 +- 3 files changed, 33 insertions(+), 38 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index f7730d89..ee53d30c 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -37,7 +37,8 @@ struct xgi_cmdring_info s_cmdring; static void addFlush2D(struct xgi_info * info); static unsigned int getCurBatchBeginPort(struct xgi_cmd_info * pCmdInfo); -static void triggerHWCommandList(struct xgi_info * info, U32 triggerCounter); +static void triggerHWCommandList(struct xgi_info * info, + unsigned int triggerCounter); static void xgi_cmdlist_reset(void); int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) @@ -276,7 +277,8 @@ void xgi_cmdlist_cleanup(struct xgi_info * info) } } -static void triggerHWCommandList(struct xgi_info * info, U32 triggerCounter) +static void triggerHWCommandList(struct xgi_info * info, + unsigned int triggerCounter) { static unsigned int s_triggerID = 1; diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 6bd04cd9..f1cfa44e 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -100,16 +100,16 @@ struct xgi_aperture { }; struct xgi_screen_info { - U32 scrn_start; - U32 scrn_xres; - U32 scrn_yres; - U32 scrn_bpp; - U32 scrn_pitch; + unsigned int scrn_start; + unsigned int scrn_xres; + unsigned int scrn_yres; + unsigned int scrn_bpp; + unsigned int scrn_pitch; }; struct xgi_sarea_info { - U32 bus_addr; - U32 size; + unsigned long bus_addr; + unsigned int size; }; struct xgi_info { @@ -153,8 +153,8 @@ struct xgi_info { }; struct xgi_ioctl_post_vbios { - U32 bus; - U32 slot; + unsigned int bus; + unsigned int slot; }; enum xgi_mem_location { @@ -176,9 +176,6 @@ enum PcieOwner { PCIE_INVALID = 0x7fffffff }; -struct xgi_mem_req { -}; - struct xgi_mem_alloc { unsigned int location; unsigned int size; @@ -197,45 +194,41 @@ struct xgi_mem_alloc { }; struct xgi_chip_info { - U32 device_id; - char device_name[32]; - U32 vendor_id; - U32 curr_display_mode; //Singe, DualView(Contained), MHS - U32 fb_size; - U32 sarea_bus_addr; - U32 sarea_size; -}; + u16 device_id; + u16 vendor_id; -struct xgi_opengl_cmd { - U32 cmd; + char device_name[32]; + unsigned int curr_display_mode; //Singe, DualView(Contained), MHS + unsigned int fb_size; + unsigned long sarea_bus_addr; + unsigned int sarea_size; }; struct xgi_mmio_info { - struct xgi_opengl_cmd cmd_head; - void *mmioBase; - int size; + unsigned long mmio_base; + unsigned int size; }; -typedef enum { +enum xgi_batch_type { BTYPE_2D = 0, BTYPE_3D = 1, BTYPE_FLIP = 2, BTYPE_CTRL = 3, BTYPE_NONE = 0x7fffffff -} BATCH_TYPE; +}; struct xgi_cmd_info { - BATCH_TYPE _firstBeginType; - U32 _firstBeginAddr; - U32 _firstSize; - U32 _curDebugID; - U32 _lastBeginAddr; - U32 _beginCount; + unsigned int _firstBeginType; + u32 _firstBeginAddr; + u32 _firstSize; + u32 _curDebugID; + u32 _lastBeginAddr; + unsigned int _beginCount; }; struct xgi_state_info { - U32 _fromState; - U32 _toState; + unsigned int _fromState; + unsigned int _toState; }; struct xgi_mem_pid { diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 9712241f..9c9fd38f 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -48,7 +48,7 @@ void xgi_get_device_info(struct xgi_info * info, struct xgi_chip_info * req) void xgi_get_mmio_info(struct xgi_info * info, struct xgi_mmio_info * req) { - req->mmioBase = (void *)info->mmio.base; + req->mmio_base = info->mmio.base; req->size = info->mmio.size; } -- cgit v1.2.3 From 1f4e24b429789710f5d69fc78335f20c023569bb Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 9 Jul 2007 16:33:14 -0700 Subject: Move types shared with user mode to xgi_drm.h. --- linux-core/xgi_drm.h | 1 + linux-core/xgi_drv.h | 131 +-------------------------------------------------- 2 files changed, 3 insertions(+), 129 deletions(-) create mode 120000 linux-core/xgi_drm.h (limited to 'linux-core') diff --git a/linux-core/xgi_drm.h b/linux-core/xgi_drm.h new file mode 120000 index 00000000..677586d7 --- /dev/null +++ b/linux-core/xgi_drm.h @@ -0,0 +1 @@ +../shared-core/xgi_drm.h \ No newline at end of file diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index f1cfa44e..803ed9c1 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -29,6 +29,8 @@ #ifndef _XGI_DRV_H_ #define _XGI_DRV_H_ +#include "xgi_drm.h" + #define XGI_MAJOR_VERSION 0 #define XGI_MINOR_VERSION 7 #define XGI_PATCHLEVEL 5 @@ -99,19 +101,6 @@ struct xgi_aperture { void *vbase; }; -struct xgi_screen_info { - unsigned int scrn_start; - unsigned int scrn_xres; - unsigned int scrn_yres; - unsigned int scrn_bpp; - unsigned int scrn_pitch; -}; - -struct xgi_sarea_info { - unsigned long bus_addr; - unsigned int size; -}; - struct xgi_info { struct pci_dev *dev; int flags; @@ -157,12 +146,6 @@ struct xgi_ioctl_post_vbios { unsigned int slot; }; -enum xgi_mem_location { - XGI_MEMLOC_NON_LOCAL = 0, - XGI_MEMLOC_LOCAL = 1, - XGI_MEMLOC_INVALID = 0x7fffffff -}; - enum PcieOwner { PCIE_2D = 0, /* @@ -176,61 +159,6 @@ enum PcieOwner { PCIE_INVALID = 0x7fffffff }; -struct xgi_mem_alloc { - unsigned int location; - unsigned int size; - unsigned int is_front; - unsigned int owner; - - /** - * Address of the memory from the graphics hardware's point of view. - */ - u32 hw_addr; - - /** - * Physical address of the memory from the processor's point of view. - */ - unsigned long bus_addr; -}; - -struct xgi_chip_info { - u16 device_id; - u16 vendor_id; - - char device_name[32]; - unsigned int curr_display_mode; //Singe, DualView(Contained), MHS - unsigned int fb_size; - unsigned long sarea_bus_addr; - unsigned int sarea_size; -}; - -struct xgi_mmio_info { - unsigned long mmio_base; - unsigned int size; -}; - -enum xgi_batch_type { - BTYPE_2D = 0, - BTYPE_3D = 1, - BTYPE_FLIP = 2, - BTYPE_CTRL = 3, - BTYPE_NONE = 0x7fffffff -}; - -struct xgi_cmd_info { - unsigned int _firstBeginType; - u32 _firstBeginAddr; - u32 _firstSize; - u32 _curDebugID; - u32 _lastBeginAddr; - unsigned int _beginCount; -}; - -struct xgi_state_info { - unsigned int _fromState; - unsigned int _toState; -}; - struct xgi_mem_pid { struct list_head list; enum xgi_mem_location location; @@ -238,61 +166,6 @@ struct xgi_mem_pid { unsigned long pid; }; -/* - * Ioctl definitions - */ - -#define XGI_IOCTL_MAGIC 'x' /* use 'x' as magic number */ - -#define XGI_IOCTL_BASE 0 -#define XGI_ESC_DEVICE_INFO (XGI_IOCTL_BASE + 0) -#define XGI_ESC_POST_VBIOS (XGI_IOCTL_BASE + 1) - -#define XGI_ESC_FB_INIT (XGI_IOCTL_BASE + 2) -#define XGI_ESC_FB_ALLOC (XGI_IOCTL_BASE + 3) -#define XGI_ESC_FB_FREE (XGI_IOCTL_BASE + 4) -#define XGI_ESC_PCIE_INIT (XGI_IOCTL_BASE + 5) -#define XGI_ESC_PCIE_ALLOC (XGI_IOCTL_BASE + 6) -#define XGI_ESC_PCIE_FREE (XGI_IOCTL_BASE + 7) -#define XGI_ESC_SUBMIT_CMDLIST (XGI_IOCTL_BASE + 8) -#define XGI_ESC_PUT_SCREEN_INFO (XGI_IOCTL_BASE + 9) -#define XGI_ESC_GET_SCREEN_INFO (XGI_IOCTL_BASE + 10) -#define XGI_ESC_GE_RESET (XGI_IOCTL_BASE + 11) -#define XGI_ESC_SAREA_INFO (XGI_IOCTL_BASE + 12) -#define XGI_ESC_DUMP_REGISTER (XGI_IOCTL_BASE + 13) -#define XGI_ESC_DEBUG_INFO (XGI_IOCTL_BASE + 14) -#define XGI_ESC_TEST_RWINKERNEL (XGI_IOCTL_BASE + 16) -#define XGI_ESC_STATE_CHANGE (XGI_IOCTL_BASE + 17) -#define XGI_ESC_MMIO_INFO (XGI_IOCTL_BASE + 18) -#define XGI_ESC_PCIE_CHECK (XGI_IOCTL_BASE + 19) -#define XGI_ESC_MEM_COLLECT (XGI_IOCTL_BASE + 20) - -#define XGI_IOCTL_DEVICE_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_DEVICE_INFO, struct xgi_chip_info) -#define XGI_IOCTL_POST_VBIOS _IO(XGI_IOCTL_MAGIC, XGI_ESC_POST_VBIOS) - -#define XGI_IOCTL_FB_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_FB_INIT) -#define XGI_IOCTL_FB_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_FB_ALLOC, struct xgi_mem_alloc) -#define XGI_IOCTL_FB_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_FB_FREE, unsigned long) - -#define XGI_IOCTL_PCIE_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_INIT) -#define XGI_IOCTL_PCIE_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_ALLOC, struct xgi_mem_alloc) -#define XGI_IOCTL_PCIE_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_FREE, unsigned long) - -#define XGI_IOCTL_PUT_SCREEN_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PUT_SCREEN_INFO, struct xgi_screen_info) -#define XGI_IOCTL_GET_SCREEN_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_GET_SCREEN_INFO, struct xgi_screen_info) - -#define XGI_IOCTL_GE_RESET _IO(XGI_IOCTL_MAGIC, XGI_ESC_GE_RESET) -#define XGI_IOCTL_SAREA_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_SAREA_INFO, struct xgi_sarea_info) -#define XGI_IOCTL_DUMP_REGISTER _IO(XGI_IOCTL_MAGIC, XGI_ESC_DUMP_REGISTER) -#define XGI_IOCTL_DEBUG_INFO _IO(XGI_IOCTL_MAGIC, XGI_ESC_DEBUG_INFO) -#define XGI_IOCTL_MMIO_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_MMIO_INFO, struct xgi_mmio_info) - -#define XGI_IOCTL_SUBMIT_CMDLIST _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_SUBMIT_CMDLIST, struct xgi_cmd_info) -#define XGI_IOCTL_TEST_RWINKERNEL _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_TEST_RWINKERNEL, unsigned long) -#define XGI_IOCTL_STATE_CHANGE _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_STATE_CHANGE, struct xgi_state_info) - -#define XGI_IOCTL_PCIE_CHECK _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_CHECK) -#define XGI_IOCTL_MAXNR 30 /* * flags -- cgit v1.2.3 From 5c481d0a4284ec7311a47fbeab1680d007769668 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 9 Jul 2007 16:43:48 -0700 Subject: Eliminiate fields in xgi_info that are duplicates of fields in pci_dev. --- linux-core/xgi_drv.c | 51 ++++++++++++++------------------------------------- linux-core/xgi_drv.h | 10 ++-------- linux-core/xgi_misc.c | 4 ++-- 3 files changed, 18 insertions(+), 47 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 2f0218e8..c4cc8900 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -233,10 +233,6 @@ int xgi_kern_probe(struct pci_dev *dev, const struct pci_device_id *id_table) info = &xgi_devices[xgi_num_devices]; info->dev = dev; - info->vendor_id = dev->vendor; - info->device_id = dev->device; - info->bus = dev->bus->number; - info->slot = PCI_SLOT((dev)->devfn); xgi_lock_init(info); @@ -294,14 +290,13 @@ int xgi_kern_probe(struct pci_dev *dev, const struct pci_device_id *id_table) info->fb.vbase = NULL; XGI_INFO("info->fb.vbase: 0x%p \n", info->fb.vbase); - info->irq = dev->irq; /* check common error condition */ - if (info->irq == 0) { + if (info->dev->irq == 0) { XGI_ERROR("Can't find an IRQ for your XGI card! \n"); goto error_zero_dev; } - XGI_INFO("info->irq: %lx \n", info->irq); + XGI_INFO("info->irq: %lx \n", info->dev->irq); //xgi_enable_dvi_interrupt(info); @@ -568,21 +563,21 @@ int xgi_kern_open(struct inode *inode, struct file *filp) if (!(info->flags & XGI_FLAG_OPEN)) { XGI_INFO("info->flags & XGI_FLAG_OPEN \n"); - if (info->device_id == 0) { + if (info->dev->device == 0) { XGI_INFO("open of nonexistent device %d\n", dev_num); result = -ENXIO; goto failed; } /* initialize struct irqaction */ - status = request_irq(info->irq, xgi_kern_isr, + status = request_irq(info->dev->irq, xgi_kern_isr, SA_INTERRUPT | SA_SHIRQ, "xgi", (void *)info); if (status != 0) { - if (info->irq && (status == -EBUSY)) { + if (info->dev->irq && (status == -EBUSY)) { XGI_ERROR ("Tried to get irq %d, but another driver", - (unsigned int)info->irq); + (unsigned int)info->dev->irq); XGI_ERROR("has it and is not sharing it.\n"); } XGI_ERROR("isr request failed 0x%x\n", status); @@ -651,7 +646,7 @@ int xgi_kern_release(struct inode *inode, struct file *filp) * Free the IRQ, which may block until all pending interrupt processing * has completed. */ - free_irq(info->irq, (void *)info); + free_irq(info->dev->irq, (void *)info); xgi_cmdlist_cleanup(info); @@ -1064,21 +1059,6 @@ static u8 xgi_find_pcie_capability(struct pci_dev *dev) return 0; } -static struct pci_dev *xgi_get_pci_device(struct xgi_info * info) -{ - struct pci_dev *dev; - - dev = XGI_PCI_GET_DEVICE(info->vendor_id, info->device_id, NULL); - while (dev) { - if (XGI_PCI_SLOT_NUMBER(dev) == info->slot - && XGI_PCI_BUS_NUMBER(dev) == info->bus) - return dev; - dev = XGI_PCI_GET_DEVICE(info->vendor_id, info->device_id, dev); - } - - return NULL; -} - int xgi_kern_read_card_info(char *page, char **start, off_t off, int count, int *eof, void *data) { @@ -1089,7 +1069,7 @@ int xgi_kern_read_card_info(char *page, char **start, off_t off, struct xgi_info *info; info = (struct xgi_info *) data; - dev = xgi_get_pci_device(info); + dev = info->dev; if (!dev) return 0; @@ -1162,13 +1142,10 @@ static void xgi_proc_create(void) xgi_max_devices = xgi_devices + XGI_MAX_DEVICES; for (info = xgi_devices; info < xgi_max_devices; info++) { - if (info->device_id == 0) - break; - /* world readable file */ flags = S_IFREG | S_IRUGO; - dev = xgi_get_pci_device(info); + dev = info->dev; if (!dev) break; @@ -1314,19 +1291,19 @@ static void xgi_dev_init(struct xgi_info * info) for (dev = xgidev_list; dev->vendor; dev++) { if ((dev->vendor == pdev->vendor) && (dev->device == pdev->device)) { + u8 rev_id; + XGI_INFO("dev->vendor = pdev->vendor= %x \n", dev->vendor); XGI_INFO("dev->device = pdev->device= %x \n", dev->device); - xgi_devices[found].device_id = pdev->device; + xgi_devices[found].dev = pdev; pci_read_config_byte(pdev, PCI_REVISION_ID, - &xgi_devices[found]. - revision_id); + rev_id); - XGI_INFO("PCI_REVISION_ID= %x \n", - xgi_devices[found].revision_id); + XGI_INFO("PCI_REVISION_ID= %x \n", rev_id); pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 803ed9c1..efbbd647 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -105,11 +105,6 @@ struct xgi_info { struct pci_dev *dev; int flags; int device_number; - int bus; /* PCI config info */ - int slot; - int vendor_id; - U32 device_id; - u8 revision_id; /* physical characteristics */ struct xgi_aperture mmio; @@ -125,9 +120,8 @@ struct xgi_info { bool isLUTInLFB; unsigned int sdfbPageSize; - U32 pcie_config; - U32 pcie_status; - U32 irq; + u32 pcie_config; + u32 pcie_status; atomic_t use_count; diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 9c9fd38f..d3385bef 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -34,12 +34,12 @@ void xgi_get_device_info(struct xgi_info * info, struct xgi_chip_info * req) { - req->device_id = info->device_id; + req->device_id = info->dev->device; req->device_name[0] = 'x'; req->device_name[1] = 'g'; req->device_name[2] = '4'; req->device_name[3] = '7'; - req->vendor_id = info->vendor_id; + req->vendor_id = info->dev->vendor; req->curr_display_mode = 0; req->fb_size = info->fb.size; req->sarea_bus_addr = info->sarea_info.bus_addr; -- cgit v1.2.3 From 76ca1e858fb8e1a65ea49c0c62350d7ca91044a2 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 9 Jul 2007 18:54:25 -0700 Subject: Convert occurances of U32 to other types. Most occurances of U32 were converted to u32. These are cases where the data represents something that will be written to the hardware. Other cases were converted to 'unsigned int'. U32 was the last type in xgi_types.h, so that file is removed. --- linux-core/xgi_cmdlist.c | 20 +++++++++----------- linux-core/xgi_cmdlist.h | 10 +++++----- linux-core/xgi_drv.c | 1 - linux-core/xgi_drv.h | 2 +- linux-core/xgi_fb.c | 1 - linux-core/xgi_misc.c | 1 - linux-core/xgi_pcie.c | 3 +-- linux-core/xgi_types.h | 46 ---------------------------------------------- 8 files changed, 16 insertions(+), 68 deletions(-) delete mode 100644 linux-core/xgi_types.h (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index ee53d30c..7be0ac48 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -26,7 +26,6 @@ * DEALINGS IN THE SOFTWARE. ***************************************************************************/ -#include "xgi_types.h" #include "xgi_linux.h" #include "xgi_drv.h" #include "xgi_regs.h" @@ -183,7 +182,7 @@ void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo) /* Jong 06/13/2006; remove marked for system hang test */ /* xgi_waitfor_pci_idle(info); */ } else { - U32 *lastBatchVirtAddr; + u32 *lastBatchVirtAddr; XGI_INFO ("Jong-xgi_submit_cmdlist-s_cmdring._lastBatchStartAddr != 0 \n"); @@ -195,9 +194,9 @@ void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo) addFlush2D(info); } - lastBatchVirtAddr = - (U32 *) xgi_find_pcie_virt(info, - s_cmdring._lastBatchStartAddr); + lastBatchVirtAddr = + xgi_find_pcie_virt(info, + s_cmdring._lastBatchStartAddr); /* lastBatchVirtAddr should *never* be NULL. However, there * are currently some bugs that cause this to happen. The @@ -310,10 +309,9 @@ static unsigned int getCurBatchBeginPort(struct xgi_cmd_info * pCmdInfo) static void addFlush2D(struct xgi_info * info) { - U32 *flushBatchVirtAddr; - U32 flushBatchHWAddr; - - U32 *lastBatchVirtAddr; + u32 *flushBatchVirtAddr; + u32 flushBatchHWAddr; + u32 *lastBatchVirtAddr; /* check buf is large enough to contain a new flush batch */ if ((s_cmdring._cmdRingOffset + 0x20) >= s_cmdring._cmdRingSize) { @@ -321,7 +319,7 @@ static void addFlush2D(struct xgi_info * info) } flushBatchHWAddr = s_cmdring._cmdRingBuffer + s_cmdring._cmdRingOffset; - flushBatchVirtAddr = (U32 *) xgi_find_pcie_virt(info, flushBatchHWAddr); + flushBatchVirtAddr = xgi_find_pcie_virt(info, flushBatchHWAddr); /* not using memcpy for I assume the address is discrete */ *(flushBatchVirtAddr + 0) = 0x10000000; @@ -335,7 +333,7 @@ static void addFlush2D(struct xgi_info * info) // ASSERT(s_cmdring._lastBatchStartAddr != NULL); lastBatchVirtAddr = - (U32 *) xgi_find_pcie_virt(info, s_cmdring._lastBatchStartAddr); + xgi_find_pcie_virt(info, s_cmdring._lastBatchStartAddr); lastBatchVirtAddr[1] = BEGIN_LINK_ENABLE_MASK + 0x08; lastBatchVirtAddr[2] = flushBatchHWAddr >> 4; diff --git a/linux-core/xgi_cmdlist.h b/linux-core/xgi_cmdlist.h index c6221511..d2b95c0e 100644 --- a/linux-core/xgi_cmdlist.h +++ b/linux-core/xgi_cmdlist.h @@ -58,11 +58,11 @@ typedef enum { } CMD_SIZE; struct xgi_cmdring_info { - U32 _cmdRingSize; - U32 _cmdRingBuffer; - U32 _cmdRingBusAddr; - U32 _lastBatchStartAddr; - U32 _cmdRingOffset; + unsigned int _cmdRingSize; + u32 _cmdRingBuffer; + unsigned long _cmdRingBusAddr; + u32 _lastBatchStartAddr; + u32 _cmdRingOffset; }; extern int xgi_cmdlist_initialize(struct xgi_info * info, size_t size); diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index c4cc8900..b3425c75 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -25,7 +25,6 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. ***************************************************************************/ -#include "xgi_types.h" #include "xgi_linux.h" #include "xgi_drv.h" #include "xgi_regs.h" diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index efbbd647..983ed0a9 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -114,7 +114,7 @@ struct xgi_info { struct xgi_sarea_info sarea_info; /* look up table parameters */ - U32 *lut_base; + u32 *lut_base; unsigned int lutPageSize; unsigned int lutPageOrder; bool isLUTInLFB; diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index ac73b41a..7d390d4b 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -26,7 +26,6 @@ * DEALINGS IN THE SOFTWARE. ***************************************************************************/ -#include "xgi_types.h" #include "xgi_linux.h" #include "xgi_drv.h" #include "xgi_fb.h" diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index d3385bef..2d310a2f 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -26,7 +26,6 @@ * DEALINGS IN THE SOFTWARE. ***************************************************************************/ -#include "xgi_types.h" #include "xgi_linux.h" #include "xgi_drv.h" #include "xgi_regs.h" diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 0f82e4ec..70459b2c 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -26,7 +26,6 @@ * DEALINGS IN THE SOFTWARE. ***************************************************************************/ -#include "xgi_types.h" #include "xgi_linux.h" #include "xgi_drv.h" #include "xgi_regs.h" @@ -420,7 +419,7 @@ static struct xgi_pcie_block *xgi_pcie_mem_alloc(struct xgi_info * info, struct page *page; unsigned long page_order = 0, count = 0, index = 0; unsigned long page_addr = 0; - unsigned long *lut_addr = NULL; + u32 *lut_addr = NULL; unsigned long lut_id = 0; unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK; int i, j, page_count = 0; diff --git a/linux-core/xgi_types.h b/linux-core/xgi_types.h deleted file mode 100644 index f9a3360c..00000000 --- a/linux-core/xgi_types.h +++ /dev/null @@ -1,46 +0,0 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ - -#ifndef _XGI_TYPES_H_ -#define _XGI_TYPES_H_ - -/**************************************************************************** - * Typedefs * - ***************************************************************************/ - -/* - * mainly for 64-bit linux, where long is 64 bits - * and win9x, where int is 16 bit. - */ -#if defined(vxworks) -typedef unsigned int U32; /* 0 to 4294967295 */ -#else -typedef unsigned long U32; /* 0 to 4294967295 */ -#endif - -#endif -- cgit v1.2.3 From 04e4922c0c407a9f0cfe268f62130891e98fc682 Mon Sep 17 00:00:00 2001 From: Arthur Huillet Date: Wed, 11 Jul 2007 02:33:12 +0200 Subject: Made drm_sg_alloc accessible from inside the DRM - drm_sg_alloc_ioctl is the ioctl wrapper --- linux-core/drmP.h | 3 ++- linux-core/drm_drv.c | 2 +- linux-core/drm_scatter.c | 49 ++++++++++++++++++++++++++++++------------------ 3 files changed, 34 insertions(+), 20 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drmP.h b/linux-core/drmP.h index c992c8d9..c274f1fa 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -1129,8 +1129,9 @@ extern int drm_proc_cleanup(int minor, /* Scatter Gather Support (drm_scatter.h) */ extern void drm_sg_cleanup(drm_sg_mem_t * entry); -extern int drm_sg_alloc(struct inode *inode, struct file *filp, +extern int drm_sg_alloc_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); +extern int drm_sg_alloc(drm_device_t *dev, drm_scatter_gather_t * request); extern int drm_sg_free(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 6bbe7fca..0d446a12 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -113,7 +113,7 @@ static drm_ioctl_desc_t drm_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = {drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, #endif - [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = {drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = {drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0}, diff --git a/linux-core/drm_scatter.c b/linux-core/drm_scatter.c index e5c9f877..c0d6db24 100644 --- a/linux-core/drm_scatter.c +++ b/linux-core/drm_scatter.c @@ -55,6 +55,7 @@ void drm_sg_cleanup(drm_sg_mem_t * entry) entry->pages * sizeof(*entry->pagelist), DRM_MEM_PAGES); drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS); } +EXPORT_SYMBOL(drm_sg_cleanup); #ifdef _LP64 # define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1))) @@ -62,13 +63,8 @@ void drm_sg_cleanup(drm_sg_mem_t * entry) # define ScatterHandle(x) (unsigned int)(x) #endif -int drm_sg_alloc(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_sg_alloc(drm_device_t * dev, drm_scatter_gather_t * request) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_scatter_gather_t __user *argp = (void __user *)arg; - drm_scatter_gather_t request; drm_sg_mem_t *entry; unsigned long pages, i, j; @@ -80,17 +76,13 @@ int drm_sg_alloc(struct inode *inode, struct file *filp, if (dev->sg) return -EINVAL; - if (copy_from_user(&request, argp, sizeof(request))) - return -EFAULT; - entry = drm_alloc(sizeof(*entry), DRM_MEM_SGLISTS); if (!entry) return -ENOMEM; memset(entry, 0, sizeof(*entry)); - - pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE; - DRM_DEBUG("sg size=%ld pages=%ld\n", request.size, pages); + pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; + DRM_DEBUG("sg size=%ld pages=%ld\n", request->size, pages); entry->pages = pages; entry->pagelist = drm_alloc(pages * sizeof(*entry->pagelist), @@ -142,12 +134,7 @@ int drm_sg_alloc(struct inode *inode, struct file *filp, SetPageReserved(entry->pagelist[j]); } - request.handle = entry->handle; - - if (copy_to_user(argp, &request, sizeof(request))) { - drm_sg_cleanup(entry); - return -EFAULT; - } + request->handle = entry->handle; dev->sg = entry; @@ -196,6 +183,32 @@ int drm_sg_alloc(struct inode *inode, struct file *filp, failed: drm_sg_cleanup(entry); return -ENOMEM; + +} +EXPORT_SYMBOL(drm_sg_alloc); + +int drm_sg_alloc_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_scatter_gather_t __user *argp = (void __user *)arg; + drm_scatter_gather_t request; + int ret; + + if (copy_from_user(&request, argp, sizeof(request))) + return -EFAULT; + + ret = drm_sg_alloc(priv->head->dev, &request); + if ( ret ) return ret; + + if (copy_to_user(argp, &request, sizeof(request))) { + drm_sg_cleanup(priv->head->dev->sg); + return -EFAULT; + } + + + return 0; + } int drm_sg_free(struct inode *inode, struct file *filp, -- cgit v1.2.3 From 750371cb6ea9a64c9d4d4d3b9716c3c68d810d48 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 12 Jul 2007 10:15:16 +1000 Subject: nouveau: separate region_offset into map_handle and offset. --- linux-core/drmP.h | 3 +++ linux-core/drm_bufs.c | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drmP.h b/linux-core/drmP.h index c274f1fa..2bbc6200 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -1048,6 +1048,9 @@ extern unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource); extern unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource); +extern drm_map_list_t *drm_find_matching_map(drm_device_t *dev, + drm_local_map_t *map); + /* DMA support (drm_dma.h) */ extern int drm_dma_setup(drm_device_t * dev); diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c index a2c8a75e..2f3e4b2a 100644 --- a/linux-core/drm_bufs.c +++ b/linux-core/drm_bufs.c @@ -48,8 +48,7 @@ unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource) } EXPORT_SYMBOL(drm_get_resource_len); -static drm_map_list_t *drm_find_matching_map(drm_device_t *dev, - drm_local_map_t *map) +drm_map_list_t *drm_find_matching_map(drm_device_t *dev, drm_local_map_t *map) { drm_map_list_t *entry; list_for_each_entry(entry, &dev->maplist, head) { @@ -62,6 +61,7 @@ static drm_map_list_t *drm_find_matching_map(drm_device_t *dev, return NULL; } +EXPORT_SYMBOL(drm_find_matching_map); static int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash, unsigned long user_token, int hashed_handle) -- cgit v1.2.3 From 4be9554fcdf27bce86d0d69068d284af2793b950 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 16 Jul 2007 11:13:07 +1000 Subject: drm: fix typedef in drm_os_linux.h --- linux-core/drm_os_linux.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_os_linux.h b/linux-core/drm_os_linux.h index 2ea105c5..9d0d3f69 100644 --- a/linux-core/drm_os_linux.h +++ b/linux-core/drm_os_linux.h @@ -52,8 +52,8 @@ /** Read/write memory barrier */ #define DRM_MEMORYBARRIER() mb() /** DRM device local declaration */ -#define DRM_DEVICE drm_file_t *priv = filp->private_data; \ - drm_device_t *dev = priv->head->dev +#define DRM_DEVICE struct drm_file *priv = filp->private_data; \ + struct drm_device *dev = priv->head->dev /** IRQ handler arguments and return type and values */ #define DRM_IRQ_ARGS int irq, void *arg -- cgit v1.2.3 From b95ac8b7b313ad3eadc9e8bb0ead155303b7fa92 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 16 Jul 2007 11:22:15 +1000 Subject: drm: detypedef drm.h and fixup all problems --- linux-core/drmP.h | 96 ++++++++++++++++++++++----------------------- linux-core/drm_agpsupport.c | 36 ++++++++--------- linux-core/drm_auth.c | 44 ++++++++++----------- linux-core/drm_bufs.c | 77 ++++++++++++++++++------------------ linux-core/drm_context.c | 46 +++++++++++----------- linux-core/drm_drawable.c | 26 ++++++------ linux-core/drm_drv.c | 4 +- linux-core/drm_ioctl.c | 28 ++++++------- linux-core/drm_irq.c | 12 +++--- linux-core/drm_lock.c | 8 ++-- linux-core/drm_proc.c | 4 +- linux-core/drm_scatter.c | 16 ++++---- linux-core/drm_vm.c | 14 +++---- linux-core/i810_dma.c | 10 ++--- linux-core/i810_drm.h | 2 +- linux-core/i810_drv.h | 4 +- 16 files changed, 212 insertions(+), 215 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 2bbc6200..cf2ed2ed 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -296,13 +296,9 @@ typedef struct drm_ioctl_desc { int flags; } drm_ioctl_desc_t; -typedef struct drm_devstate { - pid_t owner; /**< X server pid holding x_lock */ -} drm_devstate_t; - typedef struct drm_magic_entry { struct list_head head; - drm_hash_item_t hash_item; + struct drm_hash_item hash_item; struct drm_file *priv; } drm_magic_entry_t; @@ -346,10 +342,10 @@ typedef struct drm_buf { /** bufs is one longer than it has to be */ typedef struct drm_waitlist { int count; /**< Number of possible buffers */ - drm_buf_t **bufs; /**< List of pointers to buffers */ - drm_buf_t **rp; /**< Read pointer */ - drm_buf_t **wp; /**< Write pointer */ - drm_buf_t **end; /**< End pointer */ + struct drm_buf **bufs; /**< List of pointers to buffers */ + struct drm_buf **rp; /**< Read pointer */ + struct drm_buf **wp; /**< Write pointer */ + struct drm_buf **end; /**< End pointer */ spinlock_t read_lock; spinlock_t write_lock; } drm_waitlist_t; @@ -357,7 +353,7 @@ typedef struct drm_waitlist { typedef struct drm_freelist { int initialized; /**< Freelist in use */ atomic_t count; /**< Number of free buffers */ - drm_buf_t *next; /**< End pointer */ + struct drm_buf *next; /**< End pointer */ wait_queue_head_t waiting; /**< Processes waiting on free bufs */ int low_mark; /**< Low water mark */ @@ -378,11 +374,11 @@ typedef struct drm_dma_handle { typedef struct drm_buf_entry { int buf_size; /**< size */ int buf_count; /**< number of buffers */ - drm_buf_t *buflist; /**< buffer list */ + struct drm_buf *buflist; /**< buffer list */ int seg_count; int page_order; - drm_dma_handle_t **seglist; - drm_freelist_t freelist; + struct drm_dma_handle **seglist; + struct drm_freelist freelist; } drm_buf_entry_t; /* @@ -440,8 +436,8 @@ typedef struct drm_queue { atomic_t total_flushed; /**< Total flushes statistic */ atomic_t total_locks; /**< Total locks statistics */ #endif - drm_ctx_flags_t flags; /**< Context preserving and 2D-only */ - drm_waitlist_t waitlist; /**< Pending buffers */ + enum drm_ctx_flags flags; /**< Context preserving and 2D-only */ + struct drm_waitlist waitlist; /**< Pending buffers */ wait_queue_head_t flush_queue; /**< Processes waiting until flush */ } drm_queue_t; @@ -449,7 +445,7 @@ typedef struct drm_queue { * Lock data. */ typedef struct drm_lock_data { - drm_hw_lock_t *hw_lock; /**< Hardware lock */ + struct drm_hw_lock *hw_lock; /**< Hardware lock */ struct file *filp; /**< File descr of lock holder (0=kernel) */ wait_queue_head_t lock_queue; /**< Queue of blocked processes */ unsigned long lock_time; /**< Time of last lock in jiffies */ @@ -464,9 +460,9 @@ typedef struct drm_lock_data { */ typedef struct drm_device_dma { - drm_buf_entry_t bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */ + struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */ int buf_count; /**< total number of buffers */ - drm_buf_t **buflist; /**< Vector of pointers into drm_device_dma::bufs */ + struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */ int seg_count; int page_count; /**< number of pages */ unsigned long *pagelist; /**< page list */ @@ -524,7 +520,7 @@ typedef struct drm_sg_mem { typedef struct drm_sigdata { int context; - drm_hw_lock_t *lock; + struct drm_hw_lock *lock; } drm_sigdata_t; @@ -553,13 +549,13 @@ typedef struct drm_mm { */ typedef struct drm_map_list { struct list_head head; /**< list head */ - drm_hash_item_t hash; - drm_map_t *map; /**< mapping */ + struct drm_hash_item hash; + struct drm_map *map; /**< mapping */ drm_u64_t user_token; drm_mm_node_t *file_offset_node; } drm_map_list_t; -typedef drm_map_t drm_local_map_t; +typedef struct drm_map drm_local_map_t; /** * Context handle list @@ -567,7 +563,7 @@ typedef drm_map_t drm_local_map_t; typedef struct drm_ctx_list { struct list_head head; /**< list head */ drm_context_t handle; /**< context handle */ - drm_file_t *tag; /**< associated fd private data */ + struct drm_file *tag; /**< associated fd private data */ } drm_ctx_list_t; typedef struct drm_vbl_sig { @@ -646,9 +642,9 @@ struct drm_driver { struct file * filp); void (*reclaim_buffers_idlelocked) (struct drm_device *dev, struct file * filp); - unsigned long (*get_map_ofs) (drm_map_t * map); + unsigned long (*get_map_ofs) (struct drm_map * map); unsigned long (*get_reg_ofs) (struct drm_device * dev); - void (*set_version) (struct drm_device * dev, drm_set_version_t * sv); + void (*set_version) (struct drm_device * dev, struct drm_set_version * sv); struct drm_fence_driver *fence_driver; struct drm_bo_driver *bo_driver; @@ -713,14 +709,14 @@ typedef struct drm_device { /** \name Performance counters */ /*@{ */ unsigned long counters; - drm_stat_type_t types[15]; + enum drm_stat_type types[15]; atomic_t counts[15]; /*@} */ /** \name Authentication */ /*@{ */ struct list_head filelist; - drm_open_hash_t magiclist; + struct drm_open_hash magiclist; struct list_head magicfree; /*@} */ @@ -728,7 +724,7 @@ typedef struct drm_device { /*@{ */ struct list_head maplist; /**< Linked list of regions */ int map_count; /**< Number of mappable regions */ - drm_open_hash_t map_hash; /**< User token hash table for maps */ + struct drm_open_hash map_hash; /**< User token hash table for maps */ drm_mm_t offset_manager; /**< User token manager */ drm_open_hash_t object_hash; /**< User token hash table for objects */ struct address_space *dev_mapping; /**< For unmap_mapping_range() */ @@ -743,7 +739,7 @@ typedef struct drm_device { struct idr ctx_idr; struct list_head vmalist; /**< List of vmas (for debugging) */ - drm_lock_data_t lock; /**< Information on hardware lock */ + struct drm_lock_data lock; /**< Information on hardware lock */ /*@} */ /** \name DMA queues (contexts) */ @@ -751,8 +747,8 @@ typedef struct drm_device { int queue_count; /**< Number of active DMA queues */ int queue_reserved; /**< Number of reserved DMA queues */ int queue_slots; /**< Actual length of queuelist */ - drm_queue_t **queuelist; /**< Vector of pointers to DMA queues */ - drm_device_dma_t *dma; /**< Optional pointer for DMA support */ + struct drm_queue **queuelist; /**< Vector of pointers to DMA queues */ + struct drm_device_dma *dma; /**< Optional pointer for DMA support */ /*@} */ /** \name Context support */ @@ -792,7 +788,7 @@ typedef struct drm_device { wait_queue_head_t buf_readers; /**< Processes waiting to read */ wait_queue_head_t buf_writers; /**< Processes waiting to ctx switch */ - drm_agp_head_t *agp; /**< AGP data */ + struct drm_agp_head *agp; /**< AGP data */ struct pci_dev *pdev; /**< PCI device structure */ int pci_vendor; /**< PCI vendor id */ @@ -800,15 +796,15 @@ typedef struct drm_device { #ifdef __alpha__ struct pci_controller *hose; #endif - drm_sg_mem_t *sg; /**< Scatter gather memory */ + struct drm_sg_mem *sg; /**< Scatter gather memory */ void *dev_private; /**< device private data */ - drm_sigdata_t sigdata; /**< For block_all_signals */ + struct drm_sigdata sigdata; /**< For block_all_signals */ sigset_t sigmask; struct drm_driver *driver; drm_local_map_t *agp_buffer_map; unsigned int agp_buffer_token; - drm_head_t primary; /**< primary screen head */ + struct drm_head primary; /**< primary screen head */ drm_fence_manager_t fm; drm_buffer_manager_t bm; @@ -915,7 +911,7 @@ unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); /* Mapping support (drm_vm.h) */ extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); -extern unsigned long drm_core_get_map_ofs(drm_map_t * map); +extern unsigned long drm_core_get_map_ofs(struct drm_map * map); extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev); extern pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma); @@ -992,8 +988,8 @@ extern int drm_rmdraw(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int drm_update_drawable_info(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev, - drm_drawable_t id); +extern struct drm_drawable_info *drm_get_drawable_info(drm_device_t *dev, + drm_drawable_t id); extern void drm_drawable_free_all(drm_device_t *dev); /* Authentication IOCTL support (drm_auth.h) */ @@ -1021,12 +1017,12 @@ extern int drm_i_have_hw_lock(struct file *filp); extern int drm_kernel_take_hw_lock(struct file *filp); /* Buffer management support (drm_bufs.h) */ -extern int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request); -extern int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request); -extern int drm_addbufs_fb (drm_device_t * dev, drm_buf_desc_t * request); +extern int drm_addbufs_agp(drm_device_t * dev, struct drm_buf_desc * request); +extern int drm_addbufs_pci(drm_device_t * dev, struct drm_buf_desc * request); +extern int drm_addbufs_fb (drm_device_t * dev, struct drm_buf_desc * request); extern int drm_addmap(drm_device_t * dev, unsigned int offset, - unsigned int size, drm_map_type_t type, - drm_map_flags_t flags, drm_local_map_t ** map_ptr); + unsigned int size, enum drm_map_type type, + enum drm_map_flags flags, drm_local_map_t ** map_ptr); extern int drm_addmap_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int drm_rmmap(drm_device_t *dev, drm_local_map_t *map); @@ -1081,22 +1077,22 @@ extern int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp, extern int drm_agp_release(drm_device_t *dev); extern int drm_agp_release_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode); +extern int drm_agp_enable(drm_device_t *dev, struct drm_agp_mode mode); extern int drm_agp_enable_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_agp_info(drm_device_t * dev, drm_agp_info_t *info); +extern int drm_agp_info(drm_device_t * dev, struct drm_agp_info *info); extern int drm_agp_info_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request); +extern int drm_agp_alloc(drm_device_t *dev, struct drm_agp_buffer *request); extern int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request); +extern int drm_agp_free(drm_device_t *dev, struct drm_agp_buffer *request); extern int drm_agp_free_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request); +extern int drm_agp_unbind(drm_device_t *dev, struct drm_agp_binding *request); extern int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_agp_bind(drm_device_t *dev, drm_agp_binding_t *request); +extern int drm_agp_bind(drm_device_t *dev, struct drm_agp_binding *request); extern int drm_agp_bind_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) @@ -1134,7 +1130,7 @@ extern int drm_proc_cleanup(int minor, extern void drm_sg_cleanup(drm_sg_mem_t * entry); extern int drm_sg_alloc_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_sg_alloc(drm_device_t *dev, drm_scatter_gather_t * request); +extern int drm_sg_alloc(drm_device_t *dev, struct drm_scatter_gather * request); extern int drm_sg_free(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c index f134563a..c037defe 100644 --- a/linux-core/drm_agpsupport.c +++ b/linux-core/drm_agpsupport.c @@ -48,7 +48,7 @@ * Verifies the AGP device has been initialized and acquired and fills in the * drm_agp_info structure with the information in drm_agp_head::agp_info. */ -int drm_agp_info(drm_device_t * dev, drm_agp_info_t *info) +int drm_agp_info(drm_device_t * dev, struct drm_agp_info *info) { DRM_AGP_KERN *kern; @@ -75,14 +75,14 @@ int drm_agp_info_ioctl(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_agp_info_t info; + struct drm_agp_info info; int err; err = drm_agp_info(dev, &info); if (err) return err; - if (copy_to_user((drm_agp_info_t __user *) arg, &info, sizeof(info))) + if (copy_to_user((struct drm_agp_info __user *) arg, &info, sizeof(info))) return -EFAULT; return 0; } @@ -181,7 +181,7 @@ int drm_agp_release_ioctl(struct inode *inode, struct file *filp, * Verifies the AGP device has been acquired but not enabled, and calls * \c agp_enable. */ -int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode) +int drm_agp_enable(drm_device_t *dev, struct drm_agp_mode mode) { if (!dev->agp || !dev->agp->acquired) return -EINVAL; @@ -203,10 +203,10 @@ int drm_agp_enable_ioctl(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_agp_mode_t mode; + struct drm_agp_mode mode; - if (copy_from_user(&mode, (drm_agp_mode_t __user *) arg, sizeof(mode))) + if (copy_from_user(&mode, (struct drm_agp_mode __user *) arg, sizeof(mode))) return -EFAULT; return drm_agp_enable(dev, mode); @@ -224,7 +224,7 @@ int drm_agp_enable_ioctl(struct inode *inode, struct file *filp, * Verifies the AGP device is present and has been acquired, allocates the * memory via alloc_agp() and creates a drm_agp_mem entry for it. */ -int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request) +int drm_agp_alloc(drm_device_t *dev, struct drm_agp_buffer *request) { drm_agp_mem_t *entry; DRM_AGP_MEM *memory; @@ -264,8 +264,8 @@ int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_agp_buffer_t request; - drm_agp_buffer_t __user *argp = (void __user *)arg; + struct drm_agp_buffer request; + struct drm_agp_buffer __user *argp = (void __user *)arg; int err; if (copy_from_user(&request, argp, sizeof(request))) @@ -323,7 +323,7 @@ static drm_agp_mem_t *drm_agp_lookup_entry(drm_device_t * dev, * Verifies the AGP device is present and acquired, looks-up the AGP memory * entry and passes it to the unbind_agp() function. */ -int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request) +int drm_agp_unbind(drm_device_t *dev, struct drm_agp_binding *request) { drm_agp_mem_t *entry; int ret; @@ -347,10 +347,10 @@ int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_agp_binding_t request; + struct drm_agp_binding request; if (copy_from_user - (&request, (drm_agp_binding_t __user *) arg, sizeof(request))) + (&request, (struct drm_agp_binding __user *) arg, sizeof(request))) return -EFAULT; return drm_agp_unbind(dev, &request); @@ -370,7 +370,7 @@ int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp, * is currently bound into the GATT. Looks-up the AGP memory entry and passes * it to bind_agp() function. */ -int drm_agp_bind(drm_device_t *dev, drm_agp_binding_t *request) +int drm_agp_bind(drm_device_t *dev, struct drm_agp_binding *request) { drm_agp_mem_t *entry; int retcode; @@ -398,10 +398,10 @@ int drm_agp_bind_ioctl(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_agp_binding_t request; + struct drm_agp_binding request; if (copy_from_user - (&request, (drm_agp_binding_t __user *) arg, sizeof(request))) + (&request, (struct drm_agp_binding __user *) arg, sizeof(request))) return -EFAULT; return drm_agp_bind(dev, &request); @@ -422,7 +422,7 @@ int drm_agp_bind_ioctl(struct inode *inode, struct file *filp, * unbind_agp(). Frees it via free_agp() as well as the entry itself * and unlinks from the doubly linked list it's inserted in. */ -int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request) +int drm_agp_free(drm_device_t *dev, struct drm_agp_buffer *request) { drm_agp_mem_t *entry; @@ -448,10 +448,10 @@ int drm_agp_free_ioctl(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_agp_buffer_t request; + struct drm_agp_buffer request; if (copy_from_user - (&request, (drm_agp_buffer_t __user *) arg, sizeof(request))) + (&request, (struct drm_agp_buffer __user *) arg, sizeof(request))) return -EFAULT; return drm_agp_free(dev, &request); diff --git a/linux-core/drm_auth.c b/linux-core/drm_auth.c index 6948d858..4c48d872 100644 --- a/linux-core/drm_auth.c +++ b/linux-core/drm_auth.c @@ -45,15 +45,15 @@ * the one with matching magic number, while holding the drm_device::struct_mutex * lock. */ -static drm_file_t *drm_find_file(drm_device_t * dev, drm_magic_t magic) +static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic) { - drm_file_t *retval = NULL; - drm_magic_entry_t *pt; - drm_hash_item_t *hash; + struct drm_file *retval = NULL; + struct drm_magic_entry *pt; + struct drm_hash_item *hash; - mutex_lock(&dev->struct_mutex); - if (!drm_ht_find_item(&dev->magiclist, (unsigned long) magic, &hash)) { - pt = drm_hash_entry(hash, drm_magic_entry_t, hash_item); + mutex_lock(&dev->struct_mutex); + if (!drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) { + pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); retval = pt->priv; } mutex_unlock(&dev->struct_mutex); @@ -71,10 +71,10 @@ static drm_file_t *drm_find_file(drm_device_t * dev, drm_magic_t magic) * associated the magic number hash key in drm_device::magiclist, while holding * the drm_device::struct_mutex lock. */ -static int drm_add_magic(drm_device_t *dev, drm_file_t *priv, +static int drm_add_magic(struct drm_device * dev, struct drm_file * priv, drm_magic_t magic) { - drm_magic_entry_t *entry; + struct drm_magic_entry *entry; DRM_DEBUG("%d\n", magic); @@ -101,10 +101,10 @@ static int drm_add_magic(drm_device_t *dev, drm_file_t *priv, * Searches and unlinks the entry in drm_device::magiclist with the magic * number hash key, while holding the drm_device::struct_mutex lock. */ -static int drm_remove_magic(drm_device_t * dev, drm_magic_t magic) +static int drm_remove_magic(struct drm_device * dev, drm_magic_t magic) { - drm_magic_entry_t *pt; - drm_hash_item_t *hash; + struct drm_magic_entry *pt; + struct drm_hash_item *hash; DRM_DEBUG("%d\n", magic); @@ -113,7 +113,7 @@ static int drm_remove_magic(drm_device_t * dev, drm_magic_t magic) mutex_unlock(&dev->struct_mutex); return -EINVAL; } - pt = drm_hash_entry(hash, drm_magic_entry_t, hash_item); + pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); drm_ht_remove_item(&dev->magiclist, hash); list_del(&pt->head); mutex_unlock(&dev->struct_mutex); @@ -141,9 +141,9 @@ int drm_getmagic(struct inode *inode, struct file *filp, { static drm_magic_t sequence = 0; static DEFINE_SPINLOCK(lock); - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_auth_t auth; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_auth auth; /* Find unique magic */ if (priv->magic) { @@ -161,7 +161,7 @@ int drm_getmagic(struct inode *inode, struct file *filp, } DRM_DEBUG("%u\n", auth.magic); - if (copy_to_user((drm_auth_t __user *) arg, &auth, sizeof(auth))) + if (copy_to_user((struct drm_auth __user *) arg, &auth, sizeof(auth))) return -EFAULT; return 0; } @@ -180,12 +180,12 @@ int drm_getmagic(struct inode *inode, struct file *filp, int drm_authmagic(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_auth_t auth; - drm_file_t *file; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_auth auth; + struct drm_file *file; - if (copy_from_user(&auth, (drm_auth_t __user *) arg, sizeof(auth))) + if (copy_from_user(&auth, (struct drm_auth __user *) arg, sizeof(auth))) return -EFAULT; DRM_DEBUG("%u\n", auth.magic); if ((file = drm_find_file(dev, auth.magic))) { diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c index 2f3e4b2a..3f34de0e 100644 --- a/linux-core/drm_bufs.c +++ b/linux-core/drm_bufs.c @@ -102,11 +102,12 @@ static int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash, * applicable and if supported by the kernel. */ static int drm_addmap_core(drm_device_t * dev, unsigned int offset, - unsigned int size, drm_map_type_t type, - drm_map_flags_t flags, drm_map_list_t ** maplist) + unsigned int size, enum drm_map_type type, + enum drm_map_flags flags, + struct drm_map_list **maplist) { - drm_map_t *map; - drm_map_list_t *list; + struct drm_map *map; + struct drm_map_list *list; drm_dma_handle_t *dmah; unsigned long user_token; int ret; @@ -311,10 +312,10 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset, } int drm_addmap(drm_device_t * dev, unsigned int offset, - unsigned int size, drm_map_type_t type, - drm_map_flags_t flags, drm_local_map_t ** map_ptr) + unsigned int size, enum drm_map_type type, + enum drm_map_flags flags, drm_local_map_t ** map_ptr) { - drm_map_list_t *list; + struct drm_map_list *list; int rc; rc = drm_addmap_core(dev, offset, size, type, flags, &list); @@ -330,9 +331,9 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_map_t map; + struct drm_map map; drm_map_list_t *maplist; - drm_map_t __user *argp = (void __user *)arg; + struct drm_map __user *argp = (void __user *)arg; int err; if (!(filp->f_mode & 3)) @@ -351,7 +352,7 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp, if (err) return err; - if (copy_to_user(argp, maplist->map, sizeof(drm_map_t))) + if (copy_to_user(argp, maplist->map, sizeof(struct drm_map))) return -EFAULT; /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */ @@ -367,7 +368,7 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp, * \param inode device inode. * \param filp file pointer. * \param cmd command. - * \param arg pointer to a drm_map_t structure. + * \param arg pointer to a struct drm_map structure. * \return zero on success or a negative value on error. * * Searches the map on drm_device::maplist, removes it from the list, see if @@ -459,12 +460,12 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_map_t request; + struct drm_map request; drm_local_map_t *map = NULL; drm_map_list_t *r_list; int ret; - if (copy_from_user(&request, (drm_map_t __user *) arg, sizeof(request))) { + if (copy_from_user(&request, (struct drm_map __user *) arg, sizeof(request))) { return -EFAULT; } @@ -512,7 +513,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp, * * Frees any pages and buffers associated with the given entry. */ -static void drm_cleanup_buf_error(drm_device_t * dev, drm_buf_entry_t * entry) +static void drm_cleanup_buf_error(drm_device_t * dev, struct drm_buf_entry * entry) { int i; @@ -550,17 +551,17 @@ static void drm_cleanup_buf_error(drm_device_t * dev, drm_buf_entry_t * entry) * Add AGP buffers for DMA transfers * * \param dev drm_device_t to which the buffers are to be added. - * \param request pointer to a drm_buf_desc_t describing the request. + * \param request pointer to a struct drm_buf_desc describing the request. * \return zero on success or a negative number on failure. * * After some sanity checks creates a drm_buf structure for each buffer and * reallocates the buffer list of the same size order to accommodate the new * buffers. */ -int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) +int drm_addbufs_agp(drm_device_t * dev, struct drm_buf_desc * request) { drm_device_dma_t *dma = dev->dma; - drm_buf_entry_t *entry; + struct drm_buf_entry *entry; drm_agp_mem_t *agp_entry; drm_buf_t *buf; unsigned long offset; @@ -727,7 +728,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) EXPORT_SYMBOL(drm_addbufs_agp); #endif /* __OS_HAS_AGP */ -int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) +int drm_addbufs_pci(drm_device_t * dev, struct drm_buf_desc * request) { drm_device_dma_t *dma = dev->dma; int count; @@ -735,7 +736,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) int size; int total; int page_order; - drm_buf_entry_t *entry; + struct drm_buf_entry *entry; drm_dma_handle_t *dmah; drm_buf_t *buf; int alignment; @@ -953,10 +954,10 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) } EXPORT_SYMBOL(drm_addbufs_pci); -static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request) +static int drm_addbufs_sg(drm_device_t * dev, struct drm_buf_desc * request) { drm_device_dma_t *dma = dev->dma; - drm_buf_entry_t *entry; + struct drm_buf_entry *entry; drm_buf_t *buf; unsigned long offset; unsigned long agp_offset; @@ -1115,10 +1116,10 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request) return 0; } -int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request) +int drm_addbufs_fb(drm_device_t * dev, struct drm_buf_desc * request) { drm_device_dma_t *dma = dev->dma; - drm_buf_entry_t *entry; + struct drm_buf_entry *entry; drm_buf_t *buf; unsigned long offset; unsigned long agp_offset; @@ -1283,7 +1284,7 @@ EXPORT_SYMBOL(drm_addbufs_fb); * \param inode device inode. * \param filp file pointer. * \param cmd command. - * \param arg pointer to a drm_buf_desc_t request. + * \param arg pointer to a struct drm_buf_desc request. * \return zero on success or a negative number on failure. * * According with the memory type specified in drm_buf_desc::flags and the @@ -1294,7 +1295,7 @@ EXPORT_SYMBOL(drm_addbufs_fb); int drm_addbufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_buf_desc_t request; + struct drm_buf_desc request; drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; int ret; @@ -1302,7 +1303,7 @@ int drm_addbufs(struct inode *inode, struct file *filp, if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) return -EINVAL; - if (copy_from_user(&request, (drm_buf_desc_t __user *) arg, + if (copy_from_user(&request, (struct drm_buf_desc __user *) arg, sizeof(request))) return -EFAULT; @@ -1350,8 +1351,8 @@ int drm_infobufs(struct inode *inode, struct file *filp, drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; drm_device_dma_t *dma = dev->dma; - drm_buf_info_t request; - drm_buf_info_t __user *argp = (void __user *)arg; + struct drm_buf_info request; + struct drm_buf_info __user *argp = (void __user *)arg; int i; int count; @@ -1382,9 +1383,9 @@ int drm_infobufs(struct inode *inode, struct file *filp, if (request.count >= count) { for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { if (dma->bufs[i].buf_count) { - drm_buf_desc_t __user *to = + struct drm_buf_desc __user *to = &request.list[count]; - drm_buf_entry_t *from = &dma->bufs[i]; + struct drm_buf_entry *from = &dma->bufs[i]; drm_freelist_t *list = &dma->bufs[i].freelist; if (copy_to_user(&to->count, &from->buf_count, @@ -1438,9 +1439,9 @@ int drm_markbufs(struct inode *inode, struct file *filp, drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; drm_device_dma_t *dma = dev->dma; - drm_buf_desc_t request; + struct drm_buf_desc request; int order; - drm_buf_entry_t *entry; + struct drm_buf_entry *entry; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) return -EINVAL; @@ -1449,7 +1450,7 @@ int drm_markbufs(struct inode *inode, struct file *filp, return -EINVAL; if (copy_from_user(&request, - (drm_buf_desc_t __user *) arg, sizeof(request))) + (struct drm_buf_desc __user *) arg, sizeof(request))) return -EFAULT; DRM_DEBUG("%d, %d, %d\n", @@ -1488,7 +1489,7 @@ int drm_freebufs(struct inode *inode, struct file *filp, drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; drm_device_dma_t *dma = dev->dma; - drm_buf_free_t request; + struct drm_buf_free request; int i; int idx; drm_buf_t *buf; @@ -1500,7 +1501,7 @@ int drm_freebufs(struct inode *inode, struct file *filp, return -EINVAL; if (copy_from_user(&request, - (drm_buf_free_t __user *) arg, sizeof(request))) + (struct drm_buf_free __user *) arg, sizeof(request))) return -EFAULT; DRM_DEBUG("%d\n", request.count); @@ -1544,12 +1545,12 @@ int drm_mapbufs(struct inode *inode, struct file *filp, drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; drm_device_dma_t *dma = dev->dma; - drm_buf_map_t __user *argp = (void __user *)arg; + struct drm_buf_map __user *argp = (void __user *)arg; int retcode = 0; const int zero = 0; unsigned long virtual; unsigned long address; - drm_buf_map_t request; + struct drm_buf_map request; int i; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) @@ -1575,7 +1576,7 @@ int drm_mapbufs(struct inode *inode, struct file *filp, && (dma->flags & _DRM_DMA_USE_SG)) || (drm_core_check_feature(dev, DRIVER_FB_DMA) && (dma->flags & _DRM_DMA_USE_FB))) { - drm_map_t *map = dev->agp_buffer_map; + struct drm_map *map = dev->agp_buffer_map; unsigned long token = dev->agp_buffer_token; if (!map) { diff --git a/linux-core/drm_context.c b/linux-core/drm_context.c index 195c7fb5..6f066ac4 100644 --- a/linux-core/drm_context.c +++ b/linux-core/drm_context.c @@ -145,10 +145,10 @@ int drm_getsareactx(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_ctx_priv_map_t __user *argp = (void __user *)arg; - drm_ctx_priv_map_t request; - drm_map_t *map; - drm_map_list_t *_entry; + struct drm_ctx_priv_map __user *argp = (void __user *)arg; + struct drm_ctx_priv_map request; + struct drm_map *map; + struct drm_map_list *_entry; if (copy_from_user(&request, argp, sizeof(request))) return -EFAULT; @@ -196,12 +196,12 @@ int drm_setsareactx(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_ctx_priv_map_t request; - drm_map_t *map = NULL; - drm_map_list_t *r_list = NULL; + struct drm_ctx_priv_map request; + struct drm_map *map = NULL; + struct drm_map_list *r_list = NULL; if (copy_from_user(&request, - (drm_ctx_priv_map_t __user *) arg, sizeof(request))) + (struct drm_ctx_priv_map __user *) arg, sizeof(request))) return -EFAULT; mutex_lock(&dev->struct_mutex); @@ -301,9 +301,9 @@ static int drm_context_switch_complete(drm_device_t * dev, int new) int drm_resctx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_ctx_res_t res; - drm_ctx_t __user *argp = (void __user *)arg; - drm_ctx_t ctx; + struct drm_ctx_res res; + struct drm_ctx __user *argp = (void __user *)arg; + struct drm_ctx ctx; int i; if (copy_from_user(&res, argp, sizeof(res))) @@ -339,10 +339,10 @@ int drm_addctx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_ctx_list_t *ctx_entry; - drm_ctx_t __user *argp = (void __user *)arg; - drm_ctx_t ctx; + struct drm_device *dev = priv->head->dev; + struct drm_ctx_list *ctx_entry; + struct drm_ctx __user *argp = (void __user *)arg; + struct drm_ctx ctx; if (copy_from_user(&ctx, argp, sizeof(ctx))) return -EFAULT; @@ -406,8 +406,8 @@ int drm_modctx(struct inode *inode, struct file *filp, int drm_getctx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_ctx_t __user *argp = (void __user *)arg; - drm_ctx_t ctx; + struct drm_ctx __user *argp = (void __user *)arg; + struct drm_ctx ctx; if (copy_from_user(&ctx, argp, sizeof(ctx))) return -EFAULT; @@ -436,9 +436,9 @@ int drm_switchctx(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_ctx_t ctx; + struct drm_ctx ctx; - if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) + if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) return -EFAULT; DRM_DEBUG("%d\n", ctx.handle); @@ -461,9 +461,9 @@ int drm_newctx(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_ctx_t ctx; + struct drm_ctx ctx; - if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) + if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) return -EFAULT; DRM_DEBUG("%d\n", ctx.handle); @@ -488,9 +488,9 @@ int drm_rmctx(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_ctx_t ctx; + struct drm_ctx ctx; - if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) + if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) return -EFAULT; DRM_DEBUG("%d\n", ctx.handle); diff --git a/linux-core/drm_drawable.c b/linux-core/drm_drawable.c index 7129980b..5a2a14f9 100644 --- a/linux-core/drm_drawable.c +++ b/linux-core/drm_drawable.c @@ -44,7 +44,7 @@ int drm_adddraw(DRM_IOCTL_ARGS) { DRM_DEVICE; unsigned long irqflags; - drm_draw_t draw; + struct drm_draw draw; int new_id = 0; int ret; @@ -67,7 +67,7 @@ again: DRM_DEBUG("%d\n", draw.handle); - DRM_COPY_TO_USER_IOCTL((drm_draw_t __user *)data, draw, sizeof(draw)); + DRM_COPY_TO_USER_IOCTL((struct drm_draw __user *)data, draw, sizeof(draw)); return 0; } @@ -78,10 +78,10 @@ again: int drm_rmdraw(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_draw_t draw; + struct drm_draw draw; unsigned long irqflags; - DRM_COPY_FROM_USER_IOCTL(draw, (drm_draw_t __user *) data, + DRM_COPY_FROM_USER_IOCTL(draw, (struct drm_draw __user *) data, sizeof(draw)); spin_lock_irqsave(&dev->drw_lock, irqflags); @@ -99,13 +99,13 @@ int drm_rmdraw(DRM_IOCTL_ARGS) int drm_update_drawable_info(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_update_draw_t update; + struct drm_update_draw update; unsigned long irqflags; - drm_clip_rect_t *rects; + struct drm_clip_rect *rects; struct drm_drawable_info *info; int err; - DRM_COPY_FROM_USER_IOCTL(update, (drm_update_draw_t __user *) data, + DRM_COPY_FROM_USER_IOCTL(update, (struct drm_update_draw __user *) data, sizeof(update)); info = idr_find(&dev->drw_idr, update.handle); @@ -123,7 +123,7 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) switch (update.type) { case DRM_DRAWABLE_CLIPRECTS: if (update.num != info->num_rects) { - rects = drm_alloc(update.num * sizeof(drm_clip_rect_t), + rects = drm_alloc(update.num * sizeof(struct drm_clip_rect), DRM_MEM_BUFS); } else rects = info->rects; @@ -135,7 +135,7 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) } if (update.num && DRM_COPY_FROM_USER(rects, - (drm_clip_rect_t __user *) + (struct drm_clip_rect __user *) (unsigned long)update.data, update.num * sizeof(*rects))) { @@ -148,7 +148,7 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) if (rects != info->rects) { drm_free(info->rects, info->num_rects * - sizeof(drm_clip_rect_t), DRM_MEM_BUFS); + sizeof(struct drm_clip_rect), DRM_MEM_BUFS); } info->rects = rects; @@ -168,7 +168,7 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) error: if (rects != info->rects) - drm_free(rects, update.num * sizeof(drm_clip_rect_t), + drm_free(rects, update.num * sizeof(struct drm_clip_rect), DRM_MEM_BUFS); return err; @@ -177,7 +177,7 @@ error: /** * Caller must hold the drawable spinlock! */ -drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id) +struct drm_drawable_info *drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id) { return idr_find(&dev->drw_idr, id); } @@ -189,7 +189,7 @@ static int drm_drawable_free(int idr, void *p, void *data) if (info) { drm_free(info->rects, info->num_rects * - sizeof(drm_clip_rect_t), DRM_MEM_BUFS); + sizeof(struct drm_clip_rect), DRM_MEM_BUFS); drm_free(info, sizeof(*info), DRM_MEM_BUFS); } diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 30200ca4..fd817f88 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -550,8 +550,8 @@ static int drm_version(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_version_t __user *argp = (void __user *)arg; - drm_version_t version; + struct drm_version __user *argp = (void __user *)arg; + struct drm_version version; int len; if (copy_from_user(&version, argp, sizeof(version))) diff --git a/linux-core/drm_ioctl.c b/linux-core/drm_ioctl.c index 97df972f..02f70243 100644 --- a/linux-core/drm_ioctl.c +++ b/linux-core/drm_ioctl.c @@ -54,8 +54,8 @@ int drm_getunique(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_unique_t __user *argp = (void __user *)arg; - drm_unique_t u; + struct drm_unique __user *argp = (void __user *)arg; + struct drm_unique u; if (copy_from_user(&u, argp, sizeof(u))) return -EFAULT; @@ -88,13 +88,13 @@ int drm_setunique(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_unique_t u; + struct drm_unique u; int domain, bus, slot, func, ret; if (dev->unique_len || dev->unique) return -EBUSY; - if (copy_from_user(&u, (drm_unique_t __user *) arg, sizeof(u))) + if (copy_from_user(&u, (struct drm_unique __user *) arg, sizeof(u))) return -EFAULT; if (!u.unique_len || u.unique_len > 1024) @@ -181,9 +181,9 @@ int drm_getmap(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_map_t __user *argp = (void __user *)arg; - drm_map_t map; - drm_map_list_t *r_list = NULL; + struct drm_map __user *argp = (void __user *)arg; + struct drm_map map; + struct drm_map_list *r_list = NULL; struct list_head *list; int idx; int i; @@ -242,8 +242,8 @@ int drm_getclient(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_client_t __user *argp = (drm_client_t __user *)arg; - drm_client_t client; + struct drm_client __user *argp = (struct drm_client __user *)arg; + struct drm_client client; drm_file_t *pt; int idx; int i; @@ -291,7 +291,7 @@ int drm_getstats(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_stats_t stats; + struct drm_stats stats; int i; memset(&stats, 0, sizeof(stats)); @@ -311,7 +311,7 @@ int drm_getstats(struct inode *inode, struct file *filp, mutex_unlock(&dev->struct_mutex); - if (copy_to_user((drm_stats_t __user *) arg, &stats, sizeof(stats))) + if (copy_to_user((struct drm_stats __user *) arg, &stats, sizeof(stats))) return -EFAULT; return 0; } @@ -330,10 +330,10 @@ int drm_getstats(struct inode *inode, struct file *filp, int drm_setversion(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_set_version_t sv; - drm_set_version_t retv; + struct drm_set_version sv; + struct drm_set_version retv; int if_version; - drm_set_version_t __user *argp = (void __user *)data; + struct drm_set_version __user *argp = (void __user *)data; if (copy_from_user(&sv, argp, sizeof(sv))) return -EFAULT; diff --git a/linux-core/drm_irq.c b/linux-core/drm_irq.c index 88716712..2e2c4d9c 100644 --- a/linux-core/drm_irq.c +++ b/linux-core/drm_irq.c @@ -55,8 +55,8 @@ int drm_irq_by_busid(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_irq_busid_t __user *argp = (void __user *)arg; - drm_irq_busid_t p; + struct drm_irq_busid __user *argp = (void __user *)arg; + struct drm_irq_busid p; if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) return -EINVAL; @@ -197,11 +197,11 @@ int drm_control(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_control_t ctl; + struct drm_control ctl; /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */ - if (copy_from_user(&ctl, (drm_control_t __user *) arg, sizeof(ctl))) + if (copy_from_user(&ctl, (struct drm_control __user *) arg, sizeof(ctl))) return -EFAULT; switch (ctl.func) { @@ -244,8 +244,8 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_wait_vblank_t __user *argp = (void __user *)data; - drm_wait_vblank_t vblwait; + union drm_wait_vblank __user *argp = (void __user *)data; + union drm_wait_vblank vblwait; struct timeval now; int ret = 0; unsigned int flags, seq; diff --git a/linux-core/drm_lock.c b/linux-core/drm_lock.c index f02df36b..6d348251 100644 --- a/linux-core/drm_lock.c +++ b/linux-core/drm_lock.c @@ -54,12 +54,12 @@ int drm_lock(struct inode *inode, struct file *filp, drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; DECLARE_WAITQUEUE(entry, current); - drm_lock_t lock; + struct drm_lock lock; int ret = 0; ++priv->lock_count; - if (copy_from_user(&lock, (drm_lock_t __user *) arg, sizeof(lock))) + if (copy_from_user(&lock, (struct drm_lock __user *) arg, sizeof(lock))) return -EFAULT; if (lock.context == DRM_KERNEL_CONTEXT) { @@ -154,10 +154,10 @@ int drm_unlock(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_lock_t lock; + struct drm_lock lock; unsigned long irqflags; - if (copy_from_user(&lock, (drm_lock_t __user *) arg, sizeof(lock))) + if (copy_from_user(&lock, (struct drm_lock __user *) arg, sizeof(lock))) return -EFAULT; if (lock.context == DRM_KERNEL_CONTEXT) { diff --git a/linux-core/drm_proc.c b/linux-core/drm_proc.c index e93a0406..e59f2afa 100644 --- a/linux-core/drm_proc.c +++ b/linux-core/drm_proc.c @@ -209,8 +209,8 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request, { drm_device_t *dev = (drm_device_t *) data; int len = 0; - drm_map_t *map; - drm_map_list_t *r_list; + struct drm_map *map; + struct drm_map_list *r_list; /* Hardcoded from _DRM_FRAME_BUFFER, _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, diff --git a/linux-core/drm_scatter.c b/linux-core/drm_scatter.c index c0d6db24..138ae087 100644 --- a/linux-core/drm_scatter.c +++ b/linux-core/drm_scatter.c @@ -36,7 +36,7 @@ #define DEBUG_SCATTER 0 -void drm_sg_cleanup(drm_sg_mem_t * entry) +void drm_sg_cleanup(struct drm_sg_mem *entry) { struct page *page; int i; @@ -63,9 +63,9 @@ EXPORT_SYMBOL(drm_sg_cleanup); # define ScatterHandle(x) (unsigned int)(x) #endif -int drm_sg_alloc(drm_device_t * dev, drm_scatter_gather_t * request) +int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request) { - drm_sg_mem_t *entry; + struct drm_sg_mem *entry; unsigned long pages, i, j; DRM_DEBUG("%s\n", __FUNCTION__); @@ -191,8 +191,8 @@ int drm_sg_alloc_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { drm_file_t *priv = filp->private_data; - drm_scatter_gather_t __user *argp = (void __user *)arg; - drm_scatter_gather_t request; + struct drm_scatter_gather __user *argp = (void __user *)arg; + struct drm_scatter_gather request; int ret; if (copy_from_user(&request, argp, sizeof(request))) @@ -216,14 +216,14 @@ int drm_sg_free(struct inode *inode, struct file *filp, { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_scatter_gather_t request; - drm_sg_mem_t *entry; + struct drm_scatter_gather request; + struct drm_sg_mem *entry; if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL; if (copy_from_user(&request, - (drm_scatter_gather_t __user *) arg, + (struct drm_scatter_gather __user *) arg, sizeof(request))) return -EFAULT; diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 72d63c10..7451adc5 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -87,7 +87,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, { drm_file_t *priv = vma->vm_file->private_data; drm_device_t *dev = priv->head->dev; - drm_map_t *map = NULL; + struct drm_map *map = NULL; drm_map_list_t *r_list; drm_hash_item_t *hash; @@ -172,7 +172,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, unsigned long address) { - drm_map_t *map = (drm_map_t *) vma->vm_private_data; + struct drm_map *map = (struct drm_map *) vma->vm_private_data; unsigned long offset; unsigned long i; struct page *page; @@ -206,7 +206,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) drm_file_t *priv = vma->vm_file->private_data; drm_device_t *dev = priv->head->dev; drm_vma_entry_t *pt, *temp; - drm_map_t *map; + struct drm_map *map; drm_map_list_t *r_list; int found_maps = 0; @@ -321,7 +321,7 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma, static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, unsigned long address) { - drm_map_t *map = (drm_map_t *) vma->vm_private_data; + struct drm_map *map = (struct drm_map *) vma->vm_private_data; drm_file_t *priv = vma->vm_file->private_data; drm_device_t *dev = priv->head->dev; drm_sg_mem_t *entry = dev->sg; @@ -524,7 +524,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) return 0; } -unsigned long drm_core_get_map_ofs(drm_map_t * map) +unsigned long drm_core_get_map_ofs(struct drm_map * map) { return map->offset; } @@ -557,9 +557,9 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->head->dev; - drm_map_t *map = NULL; + struct drm_map *map = NULL; unsigned long offset = 0; - drm_hash_item_t *hash; + struct drm_hash_item *hash; DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", vma->vm_start, vma->vm_end, vma->vm_pgoff); diff --git a/linux-core/i810_dma.c b/linux-core/i810_dma.c index 49379434..a4e0c390 100644 --- a/linux-core/i810_dma.c +++ b/linux-core/i810_dma.c @@ -346,7 +346,7 @@ static int i810_dma_initialize(drm_device_t * dev, drm_i810_private_t * dev_priv, drm_i810_init_t * init) { - drm_map_list_t *r_list; + struct drm_map_list *r_list; memset(dev_priv, 0, sizeof(drm_i810_private_t)); list_for_each_entry(r_list, &dev->maplist, head) { @@ -692,7 +692,7 @@ static void i810_dma_dispatch_clear(drm_device_t * dev, int flags, drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; int nbox = sarea_priv->nbox; - drm_clip_rect_t *pbox = sarea_priv->boxes; + struct drm_clip_rect *pbox = sarea_priv->boxes; int pitch = dev_priv->pitch; int cpp = 2; int i; @@ -765,7 +765,7 @@ static void i810_dma_dispatch_swap(drm_device_t * dev) drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; int nbox = sarea_priv->nbox; - drm_clip_rect_t *pbox = sarea_priv->boxes; + struct drm_clip_rect *pbox = sarea_priv->boxes; int pitch = dev_priv->pitch; int cpp = 2; int i; @@ -812,7 +812,7 @@ static void i810_dma_dispatch_vertex(drm_device_t * dev, drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_buf_priv_t *buf_priv = buf->dev_private; drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_clip_rect_t *box = sarea_priv->boxes; + struct drm_clip_rect *box = sarea_priv->boxes; int nbox = sarea_priv->nbox; unsigned long address = (unsigned long)buf->bus_address; unsigned long start = address - dev->agp->base; @@ -1140,7 +1140,7 @@ static int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd, DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n", current->pid, retcode, d.granted); - if (copy_to_user((drm_dma_t __user *) arg, &d, sizeof(d))) + if (copy_to_user((void __user *) arg, &d, sizeof(d))) return -EFAULT; sarea_priv->last_dispatch = (int)hw_status[5]; diff --git a/linux-core/i810_drm.h b/linux-core/i810_drm.h index beec4a2a..db59550d 100644 --- a/linux-core/i810_drm.h +++ b/linux-core/i810_drm.h @@ -163,7 +163,7 @@ typedef struct _drm_i810_sarea { unsigned int dirty; unsigned int nbox; - drm_clip_rect_t boxes[I810_NR_SAREA_CLIPRECTS]; + struct drm_clip_rect boxes[I810_NR_SAREA_CLIPRECTS]; /* Maintain an LRU of contiguous regions of texture space. If * you think you own a region of texture memory, and it has an diff --git a/linux-core/i810_drv.h b/linux-core/i810_drv.h index 69d79499..dbe9d708 100644 --- a/linux-core/i810_drv.h +++ b/linux-core/i810_drv.h @@ -77,8 +77,8 @@ typedef struct _drm_i810_ring_buffer { } drm_i810_ring_buffer_t; typedef struct drm_i810_private { - drm_map_t *sarea_map; - drm_map_t *mmio_map; + struct drm_map *sarea_map; + struct drm_map *mmio_map; drm_i810_sarea_t *sarea_priv; drm_i810_ring_buffer_t ring; -- cgit v1.2.3 From 1a07256d601a94466b7905680f5b929bf3f2390a Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 16 Jul 2007 11:30:53 +1000 Subject: drm: remove ttm userspace typedefs --- linux-core/drm_bo.c | 2 +- linux-core/drm_fence.c | 18 +++++++++--------- linux-core/drm_objects.h | 2 +- 3 files changed, 11 insertions(+), 11 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 2ce3dbca..4a80cf39 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1589,7 +1589,7 @@ static int drm_bo_handle_wait(drm_file_t *priv, uint32_t handle, int drm_buffer_object_create(drm_device_t *dev, unsigned long size, - drm_bo_type_t type, + enum drm_bo_type type, uint64_t mask, uint32_t hint, uint32_t page_alignment, diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index b6f14249..ccd9b19c 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -570,7 +570,7 @@ int drm_fence_create_ioctl(DRM_IOCTL_ARGS) DRM_DEVICE; int ret; drm_fence_manager_t *fm = &dev->fm; - drm_fence_arg_t arg; + struct drm_fence_arg arg; drm_fence_object_t *fence; unsigned long flags; ret = 0; @@ -618,7 +618,7 @@ int drm_fence_destroy_ioctl(DRM_IOCTL_ARGS) DRM_DEVICE; int ret; drm_fence_manager_t *fm = &dev->fm; - drm_fence_arg_t arg; + struct drm_fence_arg arg; drm_user_object_t *uo; ret = 0; @@ -646,7 +646,7 @@ int drm_fence_reference_ioctl(DRM_IOCTL_ARGS) DRM_DEVICE; int ret; drm_fence_manager_t *fm = &dev->fm; - drm_fence_arg_t arg; + struct drm_fence_arg arg; drm_fence_object_t *fence; drm_user_object_t *uo; unsigned long flags; @@ -680,7 +680,7 @@ int drm_fence_unreference_ioctl(DRM_IOCTL_ARGS) DRM_DEVICE; int ret; drm_fence_manager_t *fm = &dev->fm; - drm_fence_arg_t arg; + struct drm_fence_arg arg; ret = 0; if (!fm->initialized) { @@ -697,7 +697,7 @@ int drm_fence_signaled_ioctl(DRM_IOCTL_ARGS) DRM_DEVICE; int ret; drm_fence_manager_t *fm = &dev->fm; - drm_fence_arg_t arg; + struct drm_fence_arg arg; drm_fence_object_t *fence; unsigned long flags; ret = 0; @@ -729,7 +729,7 @@ int drm_fence_flush_ioctl(DRM_IOCTL_ARGS) DRM_DEVICE; int ret; drm_fence_manager_t *fm = &dev->fm; - drm_fence_arg_t arg; + struct drm_fence_arg arg; drm_fence_object_t *fence; unsigned long flags; ret = 0; @@ -763,7 +763,7 @@ int drm_fence_wait_ioctl(DRM_IOCTL_ARGS) DRM_DEVICE; int ret; drm_fence_manager_t *fm = &dev->fm; - drm_fence_arg_t arg; + struct drm_fence_arg arg; drm_fence_object_t *fence; unsigned long flags; ret = 0; @@ -799,7 +799,7 @@ int drm_fence_emit_ioctl(DRM_IOCTL_ARGS) DRM_DEVICE; int ret; drm_fence_manager_t *fm = &dev->fm; - drm_fence_arg_t arg; + struct drm_fence_arg arg; drm_fence_object_t *fence; unsigned long flags; ret = 0; @@ -834,7 +834,7 @@ int drm_fence_buffers_ioctl(DRM_IOCTL_ARGS) DRM_DEVICE; int ret; drm_fence_manager_t *fm = &dev->fm; - drm_fence_arg_t arg; + struct drm_fence_arg arg; drm_fence_object_t *fence; unsigned long flags; ret = 0; diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 8b65f90a..4bd9047c 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -337,7 +337,7 @@ typedef struct drm_buffer_object { atomic_t usage; unsigned long buffer_start; - drm_bo_type_t type; + enum drm_bo_type type; unsigned long offset; atomic_t mapped; drm_bo_mem_reg_t mem; -- cgit v1.2.3 From 21ee6fbfb8f2219a454458204afc9c5fcd89f9a8 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 16 Jul 2007 12:32:51 +1000 Subject: drm: remove drmP.h internal typedefs --- linux-core/ati_pcigart.c | 8 +- linux-core/drmP.h | 238 ++++++++++++++++++++++---------------------- linux-core/drm_agpsupport.c | 92 ++++++++--------- linux-core/drm_bo.c | 96 +++++++++--------- linux-core/drm_bo_move.c | 10 +- linux-core/drm_bufs.c | 88 ++++++++-------- linux-core/drm_context.c | 36 +++---- linux-core/drm_dma.c | 12 +-- linux-core/drm_drawable.c | 4 +- linux-core/drm_drv.c | 34 +++---- linux-core/drm_fence.c | 24 ++--- linux-core/drm_fops.c | 26 ++--- linux-core/drm_ioctl.c | 26 ++--- linux-core/drm_irq.c | 28 +++--- linux-core/drm_lock.c | 20 ++-- linux-core/drm_memory.c | 8 +- linux-core/drm_mm.c | 66 ++++++------ linux-core/drm_object.c | 32 +++--- linux-core/drm_objects.h | 38 +++---- linux-core/drm_pci.c | 6 +- linux-core/drm_proc.c | 36 +++---- linux-core/drm_scatter.c | 6 +- linux-core/drm_sman.c | 12 +-- linux-core/drm_stub.c | 20 ++-- linux-core/drm_sysfs.c | 4 +- linux-core/drm_vm.c | 68 ++++++------- linux-core/i810_dma.c | 136 ++++++++++++------------- linux-core/i810_drv.h | 14 +-- linux-core/i915_buffer.c | 10 +- linux-core/i915_fence.c | 10 +- linux-core/mga_drv.c | 4 +- linux-core/nouveau_drv.c | 2 +- linux-core/sis_drv.c | 4 +- linux-core/sis_mm.c | 14 +-- linux-core/via_buffer.c | 8 +- linux-core/via_dmablit.c | 20 ++-- linux-core/via_dmablit.h | 2 +- linux-core/via_fence.c | 8 +- linux-core/via_mm.c | 4 +- 39 files changed, 637 insertions(+), 637 deletions(-) (limited to 'linux-core') diff --git a/linux-core/ati_pcigart.c b/linux-core/ati_pcigart.c index 524618a8..7241c2a8 100644 --- a/linux-core/ati_pcigart.c +++ b/linux-core/ati_pcigart.c @@ -81,9 +81,9 @@ static void drm_ati_free_pcigart_table(void *address, int order) free_pages((unsigned long)address, order); } -int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info) +int drm_ati_pcigart_cleanup(struct drm_device *dev, struct ati_pcigart_info *gart_info) { - drm_sg_mem_t *entry = dev->sg; + struct drm_sg_mem *entry = dev->sg; unsigned long pages; int i; int order; @@ -132,9 +132,9 @@ int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info) } EXPORT_SYMBOL(drm_ati_pcigart_cleanup); -int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info) +int drm_ati_pcigart_init(struct drm_device *dev, struct ati_pcigart_info *gart_info) { - drm_sg_mem_t *entry = dev->sg; + struct drm_sg_mem *entry = dev->sg; void *address = NULL; unsigned long pages; u32 *pci_gart, page_base, bus_address = 0; diff --git a/linux-core/drmP.h b/linux-core/drmP.h index cf2ed2ed..df7481fe 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -291,22 +291,22 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, #define DRM_MASTER 0x2 #define DRM_ROOT_ONLY 0x4 -typedef struct drm_ioctl_desc { +struct drm_ioctl_desc { drm_ioctl_t *func; int flags; -} drm_ioctl_desc_t; +}; -typedef struct drm_magic_entry { +struct drm_magic_entry { struct list_head head; struct drm_hash_item hash_item; struct drm_file *priv; -} drm_magic_entry_t; +}; -typedef struct drm_vma_entry { +struct drm_vma_entry { struct list_head head; struct vm_area_struct *vma; pid_t pid; -} drm_vma_entry_t; +}; /** * DMA buffer. @@ -340,7 +340,7 @@ typedef struct drm_buf { } drm_buf_t; /** bufs is one longer than it has to be */ -typedef struct drm_waitlist { +struct drm_waitlist { int count; /**< Number of possible buffers */ struct drm_buf **bufs; /**< List of pointers to buffers */ struct drm_buf **rp; /**< Read pointer */ @@ -348,9 +348,9 @@ typedef struct drm_waitlist { struct drm_buf **end; /**< End pointer */ spinlock_t read_lock; spinlock_t write_lock; -} drm_waitlist_t; +}; -typedef struct drm_freelist { +struct drm_freelist { int initialized; /**< Freelist in use */ atomic_t count; /**< Number of free buffers */ struct drm_buf *next; /**< End pointer */ @@ -360,7 +360,7 @@ typedef struct drm_freelist { int high_mark; /**< High water mark */ atomic_t wfh; /**< If waiting for high mark */ spinlock_t lock; -} drm_freelist_t; +}; typedef struct drm_dma_handle { dma_addr_t busaddr; @@ -371,7 +371,7 @@ typedef struct drm_dma_handle { /** * Buffer entry. There is one of this for each buffer size order. */ -typedef struct drm_buf_entry { +struct drm_buf_entry { int buf_size; /**< size */ int buf_count; /**< number of buffers */ struct drm_buf *buflist; /**< buffer list */ @@ -379,7 +379,7 @@ typedef struct drm_buf_entry { int page_order; struct drm_dma_handle **seglist; struct drm_freelist freelist; -} drm_buf_entry_t; +}; /* * This should be small enough to allow the use of kmalloc for hash tables @@ -395,7 +395,7 @@ typedef enum{ /** File private data */ -typedef struct drm_file { +struct drm_file { int authenticated; int master; int minor; @@ -420,10 +420,10 @@ typedef struct drm_file { drm_open_hash_t refd_object_hash[_DRM_NO_REF_TYPES]; void *driver_priv; -} drm_file_t; +}; /** Wait queue */ -typedef struct drm_queue { +struct drm_queue { atomic_t use_count; /**< Outstanding uses (+1) */ atomic_t finalization; /**< Finalization in progress */ atomic_t block_count; /**< Count of processes waiting */ @@ -439,12 +439,12 @@ typedef struct drm_queue { enum drm_ctx_flags flags; /**< Context preserving and 2D-only */ struct drm_waitlist waitlist; /**< Pending buffers */ wait_queue_head_t flush_queue; /**< Processes waiting until flush */ -} drm_queue_t; +}; /** * Lock data. */ -typedef struct drm_lock_data { +struct drm_lock_data { struct drm_hw_lock *hw_lock; /**< Hardware lock */ struct file *filp; /**< File descr of lock holder (0=kernel) */ wait_queue_head_t lock_queue; /**< Queue of blocked processes */ @@ -453,12 +453,12 @@ typedef struct drm_lock_data { uint32_t kernel_waiters; uint32_t user_waiters; int idle_has_lock; -} drm_lock_data_t; +}; /** * DMA data. */ -typedef struct drm_device_dma { +struct drm_device_dma { struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */ int buf_count; /**< total number of buffers */ @@ -474,25 +474,25 @@ typedef struct drm_device_dma { _DRM_DMA_USE_PCI_RO = 0x08 } flags; -} drm_device_dma_t; +}; /** * AGP memory entry. Stored as a doubly linked list. */ -typedef struct drm_agp_mem { +struct drm_agp_mem { unsigned long handle; /**< handle */ DRM_AGP_MEM *memory; unsigned long bound; /**< address */ int pages; struct list_head head; -} drm_agp_mem_t; +}; /** * AGP data. * * \sa drm_agp_init)() and drm_device::agp. */ -typedef struct drm_agp_head { +struct drm_agp_head { DRM_AGP_KERN agp_info; /**< AGP device information */ struct list_head memory; unsigned long mode; /**< AGP mode */ @@ -505,30 +505,30 @@ typedef struct drm_agp_head { int agp_mtrr; int cant_use_aperture; unsigned long page_mask; -} drm_agp_head_t; +}; /** * Scatter-gather memory. */ -typedef struct drm_sg_mem { +struct drm_sg_mem { unsigned long handle; void *virtual; int pages; struct page **pagelist; dma_addr_t *busaddr; -} drm_sg_mem_t; +}; -typedef struct drm_sigdata { +struct drm_sigdata { int context; struct drm_hw_lock *lock; -} drm_sigdata_t; +}; /* * Generic memory manager structs */ -typedef struct drm_mm_node { +struct drm_mm_node { struct list_head fl_entry; struct list_head ml_entry; int free; @@ -536,42 +536,42 @@ typedef struct drm_mm_node { unsigned long size; struct drm_mm *mm; void *private; -} drm_mm_node_t; +}; -typedef struct drm_mm { +struct drm_mm { struct list_head fl_entry; struct list_head ml_entry; -} drm_mm_t; +}; /** * Mappings list */ -typedef struct drm_map_list { +struct drm_map_list { struct list_head head; /**< list head */ struct drm_hash_item hash; struct drm_map *map; /**< mapping */ drm_u64_t user_token; - drm_mm_node_t *file_offset_node; -} drm_map_list_t; + struct drm_mm_node *file_offset_node; +}; typedef struct drm_map drm_local_map_t; /** * Context handle list */ -typedef struct drm_ctx_list { +struct drm_ctx_list { struct list_head head; /**< list head */ drm_context_t handle; /**< context handle */ struct drm_file *tag; /**< associated fd private data */ -} drm_ctx_list_t; +}; -typedef struct drm_vbl_sig { +struct drm_vbl_sig { struct list_head head; unsigned int sequence; struct siginfo info; struct task_struct *task; -} drm_vbl_sig_t; +}; /* location of GART table */ #define DRM_ATI_GART_MAIN 1 @@ -581,14 +581,14 @@ typedef struct drm_vbl_sig { #define DRM_ATI_GART_PCIE 2 #define DRM_ATI_GART_IGP 3 -typedef struct ati_pcigart_info { +struct ati_pcigart_info { int gart_table_location; int gart_reg_if; void *addr; dma_addr_t bus_addr; drm_local_map_t mapping; int table_size; -} drm_ati_pcigart_info; +}; #include "drm_objects.h" @@ -602,9 +602,9 @@ struct drm_device; struct drm_driver { int (*load) (struct drm_device *, unsigned long flags); int (*firstopen) (struct drm_device *); - int (*open) (struct drm_device *, drm_file_t *); + int (*open) (struct drm_device *, struct drm_file *); void (*preclose) (struct drm_device *, struct file * filp); - void (*postclose) (struct drm_device *, drm_file_t *); + void (*postclose) (struct drm_device *, struct drm_file *); void (*lastclose) (struct drm_device *); int (*unload) (struct drm_device *); int (*dma_ioctl) (DRM_IOCTL_ARGS); @@ -659,7 +659,7 @@ struct drm_driver { /* variables */ u32 driver_features; int dev_priv_size; - drm_ioctl_desc_t *ioctls; + struct drm_ioctl_desc *ioctls; int num_ioctls; struct file_operations fops; struct pci_driver pci_driver; @@ -670,20 +670,20 @@ struct drm_driver { * that may contain multiple heads. Embed one per head of these in the * private drm_device structure. */ -typedef struct drm_head { +struct drm_head { int minor; /**< Minor device number */ struct drm_device *dev; struct proc_dir_entry *dev_root; /**< proc directory entry */ dev_t device; /**< Device number for mknod */ struct class_device *dev_class; -} drm_head_t; +}; /** * DRM device structure. This structure represent a complete card that * may contain multiple heads. */ -typedef struct drm_device { +struct drm_device { char *unique; /**< Unique identifier: e.g., busid */ int unique_len; /**< Length of unique field */ char *devname; /**< For /proc/interrupts */ @@ -725,8 +725,8 @@ typedef struct drm_device { struct list_head maplist; /**< Linked list of regions */ int map_count; /**< Number of mappable regions */ struct drm_open_hash map_hash; /**< User token hash table for maps */ - drm_mm_t offset_manager; /**< User token manager */ - drm_open_hash_t object_hash; /**< User token hash table for objects */ + struct drm_mm offset_manager; /**< User token manager */ + struct drm_open_hash object_hash; /**< User token hash table for objects */ struct address_space *dev_mapping; /**< For unmap_mapping_range() */ struct page *ttm_dummy_page; @@ -814,15 +814,15 @@ typedef struct drm_device { spinlock_t drw_lock; struct idr drw_idr; /*@} */ -} drm_device_t; +}; #if __OS_HAS_AGP -typedef struct drm_agp_ttm_backend { +struct drm_agp_ttm_backend { drm_ttm_backend_t backend; DRM_AGP_MEM *mem; struct agp_bridge_data *bridge; int populated; -} drm_agp_ttm_backend_t; +}; #endif @@ -900,7 +900,7 @@ extern int drm_ioctl(struct inode *inode, struct file *filp, extern long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_lastclose(drm_device_t * dev); +extern int drm_lastclose(struct drm_device *dev); /* Device support (drm_fops.h) */ extern int drm_open(struct inode *inode, struct file *filp); @@ -924,7 +924,7 @@ extern void *drm_calloc(size_t nmemb, size_t size, int area); extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area); extern unsigned long drm_alloc_pages(int order, int area); extern void drm_free_pages(unsigned long address, int order, int area); -extern DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type); +extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type); extern int drm_free_agp(DRM_AGP_MEM * handle, int pages); extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start); extern int drm_unbind_agp(DRM_AGP_MEM * handle); @@ -972,9 +972,9 @@ extern int drm_newctx(struct inode *inode, struct file *filp, extern int drm_rmctx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_ctxbitmap_init(drm_device_t * dev); -extern void drm_ctxbitmap_cleanup(drm_device_t * dev); -extern void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle); +extern int drm_ctxbitmap_init(struct drm_device *dev); +extern void drm_ctxbitmap_cleanup(struct drm_device *dev); +extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle); extern int drm_setsareactx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); @@ -988,9 +988,9 @@ extern int drm_rmdraw(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int drm_update_drawable_info(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern struct drm_drawable_info *drm_get_drawable_info(drm_device_t *dev, +extern struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id); -extern void drm_drawable_free_all(drm_device_t *dev); +extern void drm_drawable_free_all(struct drm_device *dev); /* Authentication IOCTL support (drm_auth.h) */ extern int drm_getmagic(struct inode *inode, struct file *filp, @@ -1003,10 +1003,10 @@ extern int drm_lock(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int drm_unlock(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_lock_take(drm_lock_data_t *lock_data, unsigned int context); -extern int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context); -extern void drm_idlelock_take(drm_lock_data_t *lock_data); -extern void drm_idlelock_release(drm_lock_data_t *lock_data); +extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); +extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context); +extern void drm_idlelock_take(struct drm_lock_data *lock_data); +extern void drm_idlelock_release(struct drm_lock_data *lock_data); /* * These are exported to drivers so that they can implement fencing using @@ -1017,16 +1017,16 @@ extern int drm_i_have_hw_lock(struct file *filp); extern int drm_kernel_take_hw_lock(struct file *filp); /* Buffer management support (drm_bufs.h) */ -extern int drm_addbufs_agp(drm_device_t * dev, struct drm_buf_desc * request); -extern int drm_addbufs_pci(drm_device_t * dev, struct drm_buf_desc * request); -extern int drm_addbufs_fb (drm_device_t * dev, struct drm_buf_desc * request); -extern int drm_addmap(drm_device_t * dev, unsigned int offset, +extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request); +extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request); +extern int drm_addbufs_fb (struct drm_device *dev, struct drm_buf_desc * request); +extern int drm_addmap(struct drm_device *dev, unsigned int offset, unsigned int size, enum drm_map_type type, enum drm_map_flags flags, drm_local_map_t ** map_ptr); extern int drm_addmap_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_rmmap(drm_device_t *dev, drm_local_map_t *map); -extern int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map); +extern int drm_rmmap(struct drm_device *dev, drm_local_map_t *map); +extern int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map); extern int drm_rmmap_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int drm_addbufs(struct inode *inode, struct file *filp, @@ -1040,59 +1040,59 @@ extern int drm_freebufs(struct inode *inode, struct file *filp, extern int drm_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int drm_order(unsigned long size); -extern unsigned long drm_get_resource_start(drm_device_t *dev, +extern unsigned long drm_get_resource_start(struct drm_device *dev, unsigned int resource); -extern unsigned long drm_get_resource_len(drm_device_t *dev, +extern unsigned long drm_get_resource_len(struct drm_device *dev, unsigned int resource); -extern drm_map_list_t *drm_find_matching_map(drm_device_t *dev, - drm_local_map_t *map); +extern struct drm_map_list *drm_find_matching_map(struct drm_device *dev, + drm_local_map_t *map); /* DMA support (drm_dma.h) */ -extern int drm_dma_setup(drm_device_t * dev); -extern void drm_dma_takedown(drm_device_t * dev); -extern void drm_free_buffer(drm_device_t * dev, drm_buf_t * buf); -extern void drm_core_reclaim_buffers(drm_device_t *dev, struct file *filp); +extern int drm_dma_setup(struct drm_device *dev); +extern void drm_dma_takedown(struct drm_device *dev); +extern void drm_free_buffer(struct drm_device *dev, drm_buf_t * buf); +extern void drm_core_reclaim_buffers(struct drm_device *dev, struct file *filp); /* IRQ support (drm_irq.h) */ extern int drm_control(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS); -extern int drm_irq_uninstall(drm_device_t *dev); -extern void drm_driver_irq_preinstall(drm_device_t * dev); -extern void drm_driver_irq_postinstall(drm_device_t * dev); -extern void drm_driver_irq_uninstall(drm_device_t * dev); +extern int drm_irq_uninstall(struct drm_device *dev); +extern void drm_driver_irq_preinstall(struct drm_device *dev); +extern void drm_driver_irq_postinstall(struct drm_device *dev); +extern void drm_driver_irq_uninstall(struct drm_device *dev); extern int drm_wait_vblank(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_vblank_wait(drm_device_t * dev, unsigned int *vbl_seq); -extern void drm_vbl_send_signals(drm_device_t * dev); -extern void drm_locked_tasklet(drm_device_t *dev, void(*func)(drm_device_t*)); +extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); +extern void drm_vbl_send_signals(struct drm_device *dev); +extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*)); /* AGP/GART support (drm_agpsupport.h) */ -extern drm_agp_head_t *drm_agp_init(drm_device_t *dev); -extern int drm_agp_acquire(drm_device_t * dev); +extern struct drm_agp_head *drm_agp_init(struct drm_device *dev); +extern int drm_agp_acquire(struct drm_device *dev); extern int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_agp_release(drm_device_t *dev); +extern int drm_agp_release(struct drm_device *dev); extern int drm_agp_release_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_agp_enable(drm_device_t *dev, struct drm_agp_mode mode); +extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); extern int drm_agp_enable_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_agp_info(drm_device_t * dev, struct drm_agp_info *info); +extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info); extern int drm_agp_info_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_agp_alloc(drm_device_t *dev, struct drm_agp_buffer *request); +extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); extern int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_agp_free(drm_device_t *dev, struct drm_agp_buffer *request); +extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); extern int drm_agp_free_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_agp_unbind(drm_device_t *dev, struct drm_agp_binding *request); +extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); extern int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_agp_bind(drm_device_t *dev, struct drm_agp_binding *request); +extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); extern int drm_agp_bind_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) @@ -1107,18 +1107,18 @@ extern drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev); /* Stub support (drm_stub.h) */ extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver *driver); -extern int drm_put_dev(drm_device_t * dev); -extern int drm_put_head(drm_head_t * head); +extern int drm_put_dev(struct drm_device *dev); +extern int drm_put_head(struct drm_head * head); extern unsigned int drm_debug; /* 1 to enable debug output */ extern unsigned int drm_cards_limit; -extern drm_head_t **drm_heads; +extern struct drm_head **drm_heads; extern struct drm_sysfs_class *drm_class; extern struct proc_dir_entry *drm_proc_root; extern drm_local_map_t *drm_getsarea(struct drm_device *dev); /* Proc support (drm_proc.h) */ -extern int drm_proc_init(drm_device_t * dev, +extern int drm_proc_init(struct drm_device *dev, int minor, struct proc_dir_entry *root, struct proc_dir_entry **dev_root); @@ -1127,21 +1127,21 @@ extern int drm_proc_cleanup(int minor, struct proc_dir_entry *dev_root); /* Scatter Gather Support (drm_scatter.h) */ -extern void drm_sg_cleanup(drm_sg_mem_t * entry); +extern void drm_sg_cleanup(struct drm_sg_mem * entry); extern int drm_sg_alloc_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_sg_alloc(drm_device_t *dev, struct drm_scatter_gather * request); +extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request); extern int drm_sg_free(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); /* ATI PCIGART support (ati_pcigart.h) */ -extern int drm_ati_pcigart_init(drm_device_t * dev, drm_ati_pcigart_info *gart_info); -extern int drm_ati_pcigart_cleanup(drm_device_t * dev, drm_ati_pcigart_info *gart_info); +extern int drm_ati_pcigart_init(struct drm_device *dev, struct ati_pcigart_info *gart_info); +extern int drm_ati_pcigart_cleanup(struct drm_device *dev, struct ati_pcigart_info *gart_info); -extern drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, +extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size, size_t align, dma_addr_t maxaddr); -extern void __drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah); -extern void drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah); +extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah); +extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah); /* sysfs support (drm_sysfs.c) */ struct drm_sysfs_class; @@ -1149,26 +1149,26 @@ extern struct drm_sysfs_class *drm_sysfs_create(struct module *owner, char *name); extern void drm_sysfs_destroy(struct drm_sysfs_class *cs); extern struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, - drm_head_t * head); + struct drm_head * head); extern void drm_sysfs_device_remove(struct class_device *class_dev); /* * Basic memory manager support (drm_mm.c) */ -extern drm_mm_node_t * drm_mm_get_block(drm_mm_node_t * parent, unsigned long size, +extern struct drm_mm_node * drm_mm_get_block(struct drm_mm_node * parent, unsigned long size, unsigned alignment); -extern void drm_mm_put_block(drm_mm_node_t *cur); -extern drm_mm_node_t *drm_mm_search_free(const drm_mm_t *mm, unsigned long size, +extern void drm_mm_put_block(struct drm_mm_node *cur); +extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size, unsigned alignment, int best_match); -extern int drm_mm_init(drm_mm_t *mm, unsigned long start, unsigned long size); -extern void drm_mm_takedown(drm_mm_t *mm); -extern int drm_mm_clean(drm_mm_t *mm); -extern unsigned long drm_mm_tail_space(drm_mm_t *mm); -extern int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size); -extern int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size); - -static inline drm_mm_t *drm_get_mm(drm_mm_node_t *block) +extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size); +extern void drm_mm_takedown(struct drm_mm *mm); +extern int drm_mm_clean(struct drm_mm *mm); +extern unsigned long drm_mm_tail_space(struct drm_mm *mm); +extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size); +extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size); + +static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block) { return block->mm; } @@ -1179,14 +1179,14 @@ extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev); static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev, unsigned int token) { - drm_map_list_t *_entry; + struct drm_map_list *_entry; list_for_each_entry(_entry, &dev->maplist, head) if (_entry->user_token == token) return _entry->map; return NULL; } -static __inline__ int drm_device_is_agp(drm_device_t *dev) +static __inline__ int drm_device_is_agp(struct drm_device *dev) { if ( dev->driver->device_is_agp != NULL ) { int err = (*dev->driver->device_is_agp)( dev ); @@ -1199,7 +1199,7 @@ static __inline__ int drm_device_is_agp(drm_device_t *dev) return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP); } -static __inline__ int drm_device_is_pcie(drm_device_t *dev) +static __inline__ int drm_device_is_pcie(struct drm_device *dev) { return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP); } diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c index c037defe..541d95cd 100644 --- a/linux-core/drm_agpsupport.c +++ b/linux-core/drm_agpsupport.c @@ -48,7 +48,7 @@ * Verifies the AGP device has been initialized and acquired and fills in the * drm_agp_info structure with the information in drm_agp_head::agp_info. */ -int drm_agp_info(drm_device_t * dev, struct drm_agp_info *info) +int drm_agp_info(struct drm_device * dev, struct drm_agp_info *info) { DRM_AGP_KERN *kern; @@ -73,8 +73,8 @@ EXPORT_SYMBOL(drm_agp_info); int drm_agp_info_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_agp_info info; int err; @@ -96,7 +96,7 @@ int drm_agp_info_ioctl(struct inode *inode, struct file *filp, * Verifies the AGP device hasn't been acquired before and calls * \c agp_backend_acquire. */ -int drm_agp_acquire(drm_device_t * dev) +int drm_agp_acquire(struct drm_device * dev) { #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) int retcode; @@ -134,9 +134,9 @@ EXPORT_SYMBOL(drm_agp_acquire); int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; + struct drm_file *priv = filp->private_data; - return drm_agp_acquire( (drm_device_t *) priv->head->dev ); + return drm_agp_acquire( (struct drm_device *) priv->head->dev ); } /** @@ -147,7 +147,7 @@ int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp, * * Verifies the AGP device has been acquired and calls \c agp_backend_release. */ -int drm_agp_release(drm_device_t *dev) +int drm_agp_release(struct drm_device *dev) { if (!dev->agp || !dev->agp->acquired) return -EINVAL; @@ -165,8 +165,8 @@ EXPORT_SYMBOL(drm_agp_release); int drm_agp_release_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; return drm_agp_release(dev); } @@ -181,7 +181,7 @@ int drm_agp_release_ioctl(struct inode *inode, struct file *filp, * Verifies the AGP device has been acquired but not enabled, and calls * \c agp_enable. */ -int drm_agp_enable(drm_device_t *dev, struct drm_agp_mode mode) +int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode) { if (!dev->agp || !dev->agp->acquired) return -EINVAL; @@ -201,8 +201,8 @@ EXPORT_SYMBOL(drm_agp_enable); int drm_agp_enable_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_agp_mode mode; @@ -224,9 +224,9 @@ int drm_agp_enable_ioctl(struct inode *inode, struct file *filp, * Verifies the AGP device is present and has been acquired, allocates the * memory via alloc_agp() and creates a drm_agp_mem entry for it. */ -int drm_agp_alloc(drm_device_t *dev, struct drm_agp_buffer *request) +int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request) { - drm_agp_mem_t *entry; + struct drm_agp_mem *entry; DRM_AGP_MEM *memory; unsigned long pages; u32 type; @@ -262,8 +262,8 @@ EXPORT_SYMBOL(drm_agp_alloc); int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_agp_buffer request; struct drm_agp_buffer __user *argp = (void __user *)arg; int err; @@ -276,7 +276,7 @@ int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp, return err; if (copy_to_user(argp, &request, sizeof(request))) { - drm_agp_mem_t *entry; + struct drm_agp_mem *entry; list_for_each_entry(entry, &dev->agp->memory, head) { if (entry->handle == request.handle) break; @@ -299,10 +299,10 @@ int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp, * * Walks through drm_agp_head::memory until finding a matching handle. */ -static drm_agp_mem_t *drm_agp_lookup_entry(drm_device_t * dev, +static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev, unsigned long handle) { - drm_agp_mem_t *entry; + struct drm_agp_mem *entry; list_for_each_entry(entry, &dev->agp->memory, head) { if (entry->handle == handle) @@ -323,9 +323,9 @@ static drm_agp_mem_t *drm_agp_lookup_entry(drm_device_t * dev, * Verifies the AGP device is present and acquired, looks-up the AGP memory * entry and passes it to the unbind_agp() function. */ -int drm_agp_unbind(drm_device_t *dev, struct drm_agp_binding *request) +int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request) { - drm_agp_mem_t *entry; + struct drm_agp_mem *entry; int ret; if (!dev->agp || !dev->agp->acquired) @@ -345,8 +345,8 @@ EXPORT_SYMBOL(drm_agp_unbind); int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_agp_binding request; if (copy_from_user @@ -370,9 +370,9 @@ int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp, * is currently bound into the GATT. Looks-up the AGP memory entry and passes * it to bind_agp() function. */ -int drm_agp_bind(drm_device_t *dev, struct drm_agp_binding *request) +int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request) { - drm_agp_mem_t *entry; + struct drm_agp_mem *entry; int retcode; int page; @@ -396,8 +396,8 @@ EXPORT_SYMBOL(drm_agp_bind); int drm_agp_bind_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_agp_binding request; if (copy_from_user @@ -422,9 +422,9 @@ int drm_agp_bind_ioctl(struct inode *inode, struct file *filp, * unbind_agp(). Frees it via free_agp() as well as the entry itself * and unlinks from the doubly linked list it's inserted in. */ -int drm_agp_free(drm_device_t *dev, struct drm_agp_buffer *request) +int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request) { - drm_agp_mem_t *entry; + struct drm_agp_mem *entry; if (!dev->agp || !dev->agp->acquired) return -EINVAL; @@ -446,8 +446,8 @@ EXPORT_SYMBOL(drm_agp_free); int drm_agp_free_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_agp_buffer request; if (copy_from_user @@ -467,9 +467,9 @@ int drm_agp_free_ioctl(struct inode *inode, struct file *filp, * via the inter_module_* functions. Creates and initializes a drm_agp_head * structure. */ -drm_agp_head_t *drm_agp_init(drm_device_t *dev) +struct drm_agp_head *drm_agp_init(struct drm_device *dev) { - drm_agp_head_t *head = NULL; + struct drm_agp_head *head = NULL; if (!(head = drm_alloc(sizeof(*head), DRM_MEM_AGPLISTS))) return NULL; @@ -559,11 +559,11 @@ static int drm_agp_needs_unbind_cache_adjust(drm_ttm_backend_t *backend) { } -static int drm_agp_populate(drm_ttm_backend_t *backend, unsigned long num_pages, +static int drm_agp_populate(struct drm_ttm_backend *backend, unsigned long num_pages, struct page **pages) { - drm_agp_ttm_backend_t *agp_be = - container_of(backend, drm_agp_ttm_backend_t, backend); + struct drm_agp_ttm_backend *agp_be = + container_of(backend, struct drm_agp_ttm_backend, backend); struct page **cur_page, **last_page = pages + num_pages; DRM_AGP_MEM *mem; @@ -594,8 +594,8 @@ static int drm_agp_bind_ttm(drm_ttm_backend_t *backend, unsigned long offset, int cached) { - drm_agp_ttm_backend_t *agp_be = - container_of(backend, drm_agp_ttm_backend_t, backend); + struct drm_agp_ttm_backend *agp_be = + container_of(backend, struct drm_agp_ttm_backend, backend); DRM_AGP_MEM *mem = agp_be->mem; int ret; @@ -614,8 +614,8 @@ static int drm_agp_bind_ttm(drm_ttm_backend_t *backend, static int drm_agp_unbind_ttm(drm_ttm_backend_t *backend) { - drm_agp_ttm_backend_t *agp_be = - container_of(backend, drm_agp_ttm_backend_t, backend); + struct drm_agp_ttm_backend *agp_be = + container_of(backend, struct drm_agp_ttm_backend, backend); DRM_DEBUG("drm_agp_unbind_ttm\n"); if (agp_be->mem->is_bound) @@ -626,8 +626,8 @@ static int drm_agp_unbind_ttm(drm_ttm_backend_t *backend) { static void drm_agp_clear_ttm(drm_ttm_backend_t *backend) { - drm_agp_ttm_backend_t *agp_be = - container_of(backend, drm_agp_ttm_backend_t, backend); + struct drm_agp_ttm_backend *agp_be = + container_of(backend, struct drm_agp_ttm_backend, backend); DRM_AGP_MEM *mem = agp_be->mem; DRM_DEBUG("drm_agp_clear_ttm\n"); @@ -642,11 +642,11 @@ static void drm_agp_clear_ttm(drm_ttm_backend_t *backend) { static void drm_agp_destroy_ttm(drm_ttm_backend_t *backend) { - drm_agp_ttm_backend_t *agp_be; + struct drm_agp_ttm_backend *agp_be; if (backend) { DRM_DEBUG("drm_agp_destroy_ttm\n"); - agp_be = container_of(backend, drm_agp_ttm_backend_t, backend); + agp_be = container_of(backend, struct drm_agp_ttm_backend, backend); if (agp_be) { if (agp_be->mem) { backend->func->clear(backend); @@ -666,10 +666,10 @@ static drm_ttm_backend_func_t agp_ttm_backend = .destroy = drm_agp_destroy_ttm, }; -drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev) +struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev) { - drm_agp_ttm_backend_t *agp_be; + struct drm_agp_ttm_backend *agp_be; struct agp_kern_info *info; if (!dev->agp) { diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 4a80cf39..10d928ea 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -135,7 +135,7 @@ static void drm_bo_vm_post_move(drm_buffer_object_t * bo) static int drm_bo_add_ttm(drm_buffer_object_t * bo) { - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; int ret = 0; bo->ttm = NULL; @@ -168,7 +168,7 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t * bo, drm_bo_mem_reg_t * mem, int evict, int no_wait) { - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem); int new_is_pci = drm_mem_reg_is_pci(dev, mem); @@ -294,7 +294,7 @@ int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors) { - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; if (bo->fence) { @@ -329,7 +329,7 @@ static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors) static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all) { - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; DRM_ASSERT_LOCKED(&dev->struct_mutex); @@ -391,7 +391,7 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all) static void drm_bo_destroy_locked(drm_buffer_object_t * bo) { - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; DRM_ASSERT_LOCKED(&dev->struct_mutex); @@ -438,7 +438,7 @@ static void drm_bo_destroy_locked(drm_buffer_object_t * bo) * Call dev->struct_mutex locked. */ -static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all) +static void drm_bo_delayed_delete(struct drm_device * dev, int remove_all) { drm_buffer_manager_t *bm = &dev->bm; @@ -470,12 +470,12 @@ static void drm_bo_delayed_workqueue(struct work_struct *work) #endif { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; drm_buffer_manager_t *bm = &dev->bm; #else drm_buffer_manager_t *bm = container_of(work, drm_buffer_manager_t, wq.work); - drm_device_t *dev = container_of(bm, drm_device_t, bm); + struct drm_device *dev = container_of(bm, struct drm_device, bm); #endif DRM_DEBUG("Delayed delete Worker\n"); @@ -505,7 +505,7 @@ void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo) } } -static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo) +static void drm_bo_base_deref_locked(struct drm_file * priv, drm_user_object_t * uo) { drm_buffer_object_t *bo = drm_user_object_entry(uo, drm_buffer_object_t, base); @@ -519,7 +519,7 @@ static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo) static void drm_bo_usage_deref_unlocked(drm_buffer_object_t ** bo) { struct drm_buffer_object *tmp_bo = *bo; - drm_device_t *dev = tmp_bo->dev; + struct drm_device *dev = tmp_bo->dev; *bo = NULL; if (atomic_dec_and_test(&tmp_bo->usage)) { @@ -535,13 +535,13 @@ static void drm_bo_usage_deref_unlocked(drm_buffer_object_t ** bo) * and deregister fence object usage. */ -int drm_fence_buffer_objects(drm_file_t * priv, +int drm_fence_buffer_objects(struct drm_file * priv, struct list_head *list, uint32_t fence_flags, drm_fence_object_t * fence, drm_fence_object_t ** used_fence) { - drm_device_t *dev = priv->head->dev; + struct drm_device *dev = priv->head->dev; drm_buffer_manager_t *bm = &dev->bm; drm_buffer_object_t *entry; @@ -639,7 +639,7 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, int no_wait) { int ret = 0; - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; drm_bo_mem_reg_t evict_mem; /* @@ -705,11 +705,11 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, return ret; } -static int drm_bo_mem_force_space(drm_device_t * dev, +static int drm_bo_mem_force_space(struct drm_device * dev, drm_bo_mem_reg_t * mem, uint32_t mem_type, int no_wait) { - drm_mm_node_t *node; + struct drm_mm_node *node; drm_buffer_manager_t *bm = &dev->bm; drm_buffer_object_t *entry; drm_mem_type_manager_t *man = &bm->man[mem_type]; @@ -794,7 +794,7 @@ static int drm_bo_mt_compatible(drm_mem_type_manager_t * man, int drm_bo_mem_space(drm_buffer_object_t * bo, drm_bo_mem_reg_t * mem, int no_wait) { - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; drm_mem_type_manager_t *man; @@ -806,7 +806,7 @@ int drm_bo_mem_space(drm_buffer_object_t * bo, int type_found = 0; int type_ok = 0; int has_eagain = 0; - drm_mm_node_t *node = NULL; + struct drm_mm_node *node = NULL; int ret; mem->mm_node = NULL; @@ -921,7 +921,7 @@ static int drm_bo_new_mask(drm_buffer_object_t * bo, * Call dev->struct_mutex locked. */ -drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv, +drm_buffer_object_t *drm_lookup_buffer_object(struct drm_file * priv, uint32_t handle, int check_owner) { drm_user_object_t *uo; @@ -1102,12 +1102,12 @@ static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo, * unregistered. */ -static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle, +static int drm_buffer_object_map(struct drm_file * priv, uint32_t handle, uint32_t map_flags, unsigned hint, struct drm_bo_info_rep *rep) { drm_buffer_object_t *bo; - drm_device_t *dev = priv->head->dev; + struct drm_device *dev = priv->head->dev; int ret = 0; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; @@ -1183,9 +1183,9 @@ static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle, return ret; } -static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle) +static int drm_buffer_object_unmap(struct drm_file * priv, uint32_t handle) { - drm_device_t *dev = priv->head->dev; + struct drm_device *dev = priv->head->dev; drm_buffer_object_t *bo; drm_ref_object_t *ro; int ret = 0; @@ -1215,7 +1215,7 @@ static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle) * Call struct-sem locked. */ -static void drm_buffer_user_object_unmap(drm_file_t * priv, +static void drm_buffer_user_object_unmap(struct drm_file * priv, drm_user_object_t * uo, drm_ref_t action) { @@ -1241,7 +1241,7 @@ static void drm_buffer_user_object_unmap(drm_file_t * priv, int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, int no_wait, int move_unfenced) { - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; int ret = 0; drm_bo_mem_reg_t mem; @@ -1318,7 +1318,7 @@ static int drm_bo_mem_compat(drm_bo_mem_reg_t * mem) return 1; } -static int drm_bo_check_fake(drm_device_t * dev, drm_bo_mem_reg_t * mem) +static int drm_bo_check_fake(struct drm_device * dev, drm_bo_mem_reg_t * mem) { drm_buffer_manager_t *bm = &dev->bm; drm_mem_type_manager_t *man; @@ -1364,7 +1364,7 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, uint32_t fence_class, int move_unfenced, int no_wait) { - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; drm_bo_driver_t *driver = dev->driver->bo_driver; uint32_t ftype; @@ -1489,7 +1489,7 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, return 0; } -static int drm_bo_handle_validate(drm_file_t * priv, +static int drm_bo_handle_validate(struct drm_file * priv, uint32_t handle, uint32_t fence_class, uint64_t flags, uint64_t mask, uint32_t hint, @@ -1532,7 +1532,7 @@ static int drm_bo_handle_validate(drm_file_t * priv, return ret; } -static int drm_bo_handle_info(drm_file_t *priv, uint32_t handle, +static int drm_bo_handle_info(struct drm_file *priv, uint32_t handle, struct drm_bo_info_rep *rep) { struct drm_device *dev = priv->head->dev; @@ -1554,7 +1554,7 @@ static int drm_bo_handle_info(drm_file_t *priv, uint32_t handle, return 0; } -static int drm_bo_handle_wait(drm_file_t *priv, uint32_t handle, +static int drm_bo_handle_wait(struct drm_file *priv, uint32_t handle, uint32_t hint, struct drm_bo_info_rep *rep) { @@ -1587,7 +1587,7 @@ static int drm_bo_handle_wait(drm_file_t *priv, uint32_t handle, return ret; } -int drm_buffer_object_create(drm_device_t *dev, +int drm_buffer_object_create(struct drm_device *dev, unsigned long size, enum drm_bo_type type, uint64_t mask, @@ -1672,10 +1672,10 @@ int drm_buffer_object_create(drm_device_t *dev, return ret; } -static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo, +static int drm_bo_add_user_object(struct drm_file * priv, drm_buffer_object_t * bo, int shareable) { - drm_device_t *dev = priv->head->dev; + struct drm_device *dev = priv->head->dev; int ret; mutex_lock(&dev->struct_mutex); @@ -1693,7 +1693,7 @@ static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo, return ret; } -static int drm_bo_lock_test(drm_device_t * dev, struct file *filp) +static int drm_bo_lock_test(struct drm_device * dev, struct file *filp) { LOCK_TEST_WITH_RETURN(dev, filp); return 0; @@ -1973,7 +1973,7 @@ int drm_bo_wait_idle_ioctl(DRM_IOCTL_ARGS) *Call dev->struct_sem locked. */ -static void drm_bo_clean_unfenced(drm_device_t *dev) +static void drm_bo_clean_unfenced(struct drm_device *dev) { drm_buffer_manager_t *bm = &dev->bm; struct list_head *head, *list; @@ -2003,7 +2003,7 @@ static int drm_bo_leave_list(drm_buffer_object_t * bo, uint32_t mem_type, int free_pinned, int allow_errors) { - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; int ret = 0; mutex_lock(&bo->mutex); @@ -2063,7 +2063,7 @@ static drm_buffer_object_t *drm_bo_entry(struct list_head *list, * dev->struct_mutex locked. */ -static int drm_bo_force_list_clean(drm_device_t * dev, +static int drm_bo_force_list_clean(struct drm_device * dev, struct list_head *head, unsigned mem_type, int free_pinned, @@ -2128,7 +2128,7 @@ restart: return 0; } -int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type) +int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type) { drm_buffer_manager_t *bm = &dev->bm; drm_mem_type_manager_t *man = &bm->man[mem_type]; @@ -2170,7 +2170,7 @@ int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type) *point since we have the hardware lock. */ -static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type) +static int drm_bo_lock_mm(struct drm_device * dev, unsigned mem_type) { int ret; drm_buffer_manager_t *bm = &dev->bm; @@ -2196,7 +2196,7 @@ static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type) return ret; } -int drm_bo_init_mm(drm_device_t * dev, +int drm_bo_init_mm(struct drm_device * dev, unsigned type, unsigned long p_offset, unsigned long p_size) { @@ -2245,7 +2245,7 @@ EXPORT_SYMBOL(drm_bo_init_mm); * any clients still running when we set the initialized flag to zero. */ -int drm_bo_driver_finish(drm_device_t * dev) +int drm_bo_driver_finish(struct drm_device * dev) { drm_buffer_manager_t *bm = &dev->bm; int ret = 0; @@ -2296,7 +2296,7 @@ int drm_bo_driver_finish(drm_device_t * dev) return ret; } -int drm_bo_driver_init(drm_device_t * dev) +int drm_bo_driver_init(struct drm_device * dev) { drm_bo_driver_t *driver = dev->driver->bo_driver; drm_buffer_manager_t *bm = &dev->bm; @@ -2492,7 +2492,7 @@ int drm_mm_unlock_ioctl(DRM_IOCTL_ARGS) * buffer object vm functions. */ -int drm_mem_reg_is_pci(drm_device_t * dev, drm_bo_mem_reg_t * mem) +int drm_mem_reg_is_pci(struct drm_device * dev, drm_bo_mem_reg_t * mem) { drm_buffer_manager_t *bm = &dev->bm; drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; @@ -2526,7 +2526,7 @@ EXPORT_SYMBOL(drm_mem_reg_is_pci); * Otherwise returns zero. */ -int drm_bo_pci_offset(drm_device_t * dev, +int drm_bo_pci_offset(struct drm_device * dev, drm_bo_mem_reg_t * mem, unsigned long *bus_base, unsigned long *bus_offset, unsigned long *bus_size) @@ -2557,7 +2557,7 @@ int drm_bo_pci_offset(drm_device_t * dev, void drm_bo_unmap_virtual(drm_buffer_object_t * bo) { - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT; loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT; @@ -2569,9 +2569,9 @@ void drm_bo_unmap_virtual(drm_buffer_object_t * bo) static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo) { - drm_map_list_t *list = &bo->map_list; + struct drm_map_list *list = &bo->map_list; drm_local_map_t *map; - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; DRM_ASSERT_LOCKED(&dev->struct_mutex); if (list->user_token) { @@ -2595,9 +2595,9 @@ static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo) static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo) { - drm_map_list_t *list = &bo->map_list; + struct drm_map_list *list = &bo->map_list; drm_local_map_t *map; - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; DRM_ASSERT_LOCKED(&dev->struct_mutex); list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ); diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 8ef2a8ff..1e0d26ce 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -102,7 +102,7 @@ EXPORT_SYMBOL(drm_bo_move_ttm); * Call bo->mutex locked. */ -int drm_mem_reg_ioremap(drm_device_t * dev, drm_bo_mem_reg_t * mem, +int drm_mem_reg_ioremap(struct drm_device * dev, drm_bo_mem_reg_t * mem, void **virtual) { drm_buffer_manager_t *bm = &dev->bm; @@ -137,7 +137,7 @@ int drm_mem_reg_ioremap(drm_device_t * dev, drm_bo_mem_reg_t * mem, * Call bo->mutex locked. */ -void drm_mem_reg_iounmap(drm_device_t * dev, drm_bo_mem_reg_t * mem, +void drm_mem_reg_iounmap(struct drm_device * dev, drm_bo_mem_reg_t * mem, void *virtual) { drm_buffer_manager_t *bm; @@ -203,7 +203,7 @@ static int drm_copy_ttm_io_page(drm_ttm_t * ttm, void *dst, unsigned long page) int drm_bo_move_memcpy(drm_buffer_object_t * bo, int evict, int no_wait, drm_bo_mem_reg_t * new_mem) { - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type]; drm_ttm_t *ttm = bo->ttm; drm_bo_mem_reg_t *old_mem = &bo->mem; @@ -285,7 +285,7 @@ int drm_buffer_object_transfer(drm_buffer_object_t * bo, drm_buffer_object_t ** new_obj) { drm_buffer_object_t *fbo; - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; drm_buffer_manager_t *bm = &dev->bm; fbo = drm_ctl_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ); @@ -330,7 +330,7 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo, uint32_t fence_type, uint32_t fence_flags, drm_bo_mem_reg_t * new_mem) { - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type]; drm_bo_mem_reg_t *old_mem = &bo->mem; int ret; diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c index 3f34de0e..75eeafdd 100644 --- a/linux-core/drm_bufs.c +++ b/linux-core/drm_bufs.c @@ -36,21 +36,21 @@ #include #include "drmP.h" -unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource) +unsigned long drm_get_resource_start(struct drm_device *dev, unsigned int resource) { return pci_resource_start(dev->pdev, resource); } EXPORT_SYMBOL(drm_get_resource_start); -unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource) +unsigned long drm_get_resource_len(struct drm_device *dev, unsigned int resource) { return pci_resource_len(dev->pdev, resource); } EXPORT_SYMBOL(drm_get_resource_len); -drm_map_list_t *drm_find_matching_map(drm_device_t *dev, drm_local_map_t *map) +struct drm_map_list *drm_find_matching_map(struct drm_device *dev, drm_local_map_t *map) { - drm_map_list_t *entry; + struct drm_map_list *entry; list_for_each_entry(entry, &dev->maplist, head) { if (entry->map && map->type == entry->map->type && ((entry->map->offset == map->offset) || @@ -63,7 +63,7 @@ drm_map_list_t *drm_find_matching_map(drm_device_t *dev, drm_local_map_t *map) } EXPORT_SYMBOL(drm_find_matching_map); -static int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash, +static int drm_map_handle(struct drm_device *dev, drm_hash_item_t *hash, unsigned long user_token, int hashed_handle) { int use_hashed_handle; @@ -101,7 +101,7 @@ static int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash, * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where * applicable and if supported by the kernel. */ -static int drm_addmap_core(drm_device_t * dev, unsigned int offset, +static int drm_addmap_core(struct drm_device *dev, unsigned int offset, unsigned int size, enum drm_map_type type, enum drm_map_flags flags, struct drm_map_list **maplist) @@ -213,7 +213,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset, } break; case _DRM_AGP: { - drm_agp_mem_t *entry; + struct drm_agp_mem *entry; int valid = 0; if (!drm_core_has_AGP(dev)) { @@ -311,7 +311,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset, return 0; } -int drm_addmap(drm_device_t * dev, unsigned int offset, +int drm_addmap(struct drm_device *dev, unsigned int offset, unsigned int size, enum drm_map_type type, enum drm_map_flags flags, drm_local_map_t ** map_ptr) { @@ -329,10 +329,10 @@ EXPORT_SYMBOL(drm_addmap); int drm_addmap_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_map map; - drm_map_list_t *maplist; + struct drm_map_list *maplist; struct drm_map __user *argp = (void __user *)arg; int err; @@ -377,9 +377,9 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp, * * \sa drm_addmap */ -int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map) +int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map) { - drm_map_list_t *r_list = NULL, *list_t; + struct drm_map_list *r_list = NULL, *list_t; drm_dma_handle_t dmah; int found = 0; @@ -434,7 +434,7 @@ int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map) } EXPORT_SYMBOL(drm_rmmap_locked); -int drm_rmmap(drm_device_t *dev, drm_local_map_t *map) +int drm_rmmap(struct drm_device *dev, drm_local_map_t *map) { int ret; @@ -458,11 +458,11 @@ EXPORT_SYMBOL(drm_rmmap); int drm_rmmap_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_map request; drm_local_map_t *map = NULL; - drm_map_list_t *r_list; + struct drm_map_list *r_list; int ret; if (copy_from_user(&request, (struct drm_map __user *) arg, sizeof(request))) { @@ -513,7 +513,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp, * * Frees any pages and buffers associated with the given entry. */ -static void drm_cleanup_buf_error(drm_device_t * dev, struct drm_buf_entry * entry) +static void drm_cleanup_buf_error(struct drm_device *dev, struct drm_buf_entry * entry) { int i; @@ -550,7 +550,7 @@ static void drm_cleanup_buf_error(drm_device_t * dev, struct drm_buf_entry * ent /** * Add AGP buffers for DMA transfers * - * \param dev drm_device_t to which the buffers are to be added. + * \param dev struct drm_device to which the buffers are to be added. * \param request pointer to a struct drm_buf_desc describing the request. * \return zero on success or a negative number on failure. * @@ -558,12 +558,12 @@ static void drm_cleanup_buf_error(drm_device_t * dev, struct drm_buf_entry * ent * reallocates the buffer list of the same size order to accommodate the new * buffers. */ -int drm_addbufs_agp(drm_device_t * dev, struct drm_buf_desc * request) +int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; struct drm_buf_entry *entry; - drm_agp_mem_t *agp_entry; - drm_buf_t *buf; + struct drm_agp_mem *agp_entry; + struct drm_buf *buf; unsigned long offset; unsigned long agp_offset; int count; @@ -728,9 +728,9 @@ int drm_addbufs_agp(drm_device_t * dev, struct drm_buf_desc * request) EXPORT_SYMBOL(drm_addbufs_agp); #endif /* __OS_HAS_AGP */ -int drm_addbufs_pci(drm_device_t * dev, struct drm_buf_desc * request) +int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int count; int order; int size; @@ -954,9 +954,9 @@ int drm_addbufs_pci(drm_device_t * dev, struct drm_buf_desc * request) } EXPORT_SYMBOL(drm_addbufs_pci); -static int drm_addbufs_sg(drm_device_t * dev, struct drm_buf_desc * request) +static int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc * request) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; struct drm_buf_entry *entry; drm_buf_t *buf; unsigned long offset; @@ -1116,9 +1116,9 @@ static int drm_addbufs_sg(drm_device_t * dev, struct drm_buf_desc * request) return 0; } -int drm_addbufs_fb(drm_device_t * dev, struct drm_buf_desc * request) +int drm_addbufs_fb(struct drm_device *dev, struct drm_buf_desc *request) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; struct drm_buf_entry *entry; drm_buf_t *buf; unsigned long offset; @@ -1296,8 +1296,8 @@ int drm_addbufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { struct drm_buf_desc request; - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; int ret; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) @@ -1348,9 +1348,9 @@ int drm_addbufs(struct inode *inode, struct file *filp, int drm_infobufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_device_dma_t *dma = dev->dma; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_device_dma *dma = dev->dma; struct drm_buf_info request; struct drm_buf_info __user *argp = (void __user *)arg; int i; @@ -1386,7 +1386,7 @@ int drm_infobufs(struct inode *inode, struct file *filp, struct drm_buf_desc __user *to = &request.list[count]; struct drm_buf_entry *from = &dma->bufs[i]; - drm_freelist_t *list = &dma->bufs[i].freelist; + struct drm_freelist *list = &dma->bufs[i].freelist; if (copy_to_user(&to->count, &from->buf_count, sizeof(from->buf_count)) || @@ -1436,9 +1436,9 @@ int drm_infobufs(struct inode *inode, struct file *filp, int drm_markbufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_device_dma_t *dma = dev->dma; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_device_dma *dma = dev->dma; struct drm_buf_desc request; int order; struct drm_buf_entry *entry; @@ -1486,9 +1486,9 @@ int drm_markbufs(struct inode *inode, struct file *filp, int drm_freebufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_device_dma_t *dma = dev->dma; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_device_dma *dma = dev->dma; struct drm_buf_free request; int i; int idx; @@ -1542,9 +1542,9 @@ int drm_freebufs(struct inode *inode, struct file *filp, int drm_mapbufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_device_dma_t *dma = dev->dma; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_device_dma *dma = dev->dma; struct drm_buf_map __user *argp = (void __user *)arg; int retcode = 0; const int zero = 0; diff --git a/linux-core/drm_context.c b/linux-core/drm_context.c index 6f066ac4..95d28898 100644 --- a/linux-core/drm_context.c +++ b/linux-core/drm_context.c @@ -56,7 +56,7 @@ * in drm_device::ctx_idr, while holding the drm_device::struct_mutex * lock. */ -void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle) +void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle) { mutex_lock(&dev->struct_mutex); idr_remove(&dev->ctx_idr, ctx_handle); @@ -72,7 +72,7 @@ void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle) * Allocate a new idr from drm_device::ctx_idr while holding the * drm_device::struct_mutex lock. */ -static int drm_ctxbitmap_next(drm_device_t * dev) +static int drm_ctxbitmap_next(struct drm_device *dev) { int new_id; int ret; @@ -101,7 +101,7 @@ again: * * Initialise the drm_device::ctx_idr */ -int drm_ctxbitmap_init(drm_device_t * dev) +int drm_ctxbitmap_init(struct drm_device *dev) { idr_init(&dev->ctx_idr); return 0; @@ -115,7 +115,7 @@ int drm_ctxbitmap_init(drm_device_t * dev) * Free all idr members using drm_ctx_sarea_free helper function * while holding the drm_device::struct_mutex lock. */ -void drm_ctxbitmap_cleanup(drm_device_t * dev) +void drm_ctxbitmap_cleanup(struct drm_device *dev) { mutex_lock(&dev->struct_mutex); idr_remove_all(&dev->ctx_idr); @@ -143,8 +143,8 @@ void drm_ctxbitmap_cleanup(drm_device_t * dev) int drm_getsareactx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_ctx_priv_map __user *argp = (void __user *)arg; struct drm_ctx_priv_map request; struct drm_map *map; @@ -194,8 +194,8 @@ int drm_getsareactx(struct inode *inode, struct file *filp, int drm_setsareactx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_ctx_priv_map request; struct drm_map *map = NULL; struct drm_map_list *r_list = NULL; @@ -243,7 +243,7 @@ int drm_setsareactx(struct inode *inode, struct file *filp, * * Attempt to set drm_device::context_flag. */ -static int drm_context_switch(drm_device_t * dev, int old, int new) +static int drm_context_switch(struct drm_device *dev, int old, int new) { if (test_and_set_bit(0, &dev->context_flag)) { DRM_ERROR("Reentering -- FIXME\n"); @@ -271,7 +271,7 @@ static int drm_context_switch(drm_device_t * dev, int old, int new) * hardware lock is held, clears the drm_device::context_flag and wakes up * drm_device::context_wait. */ -static int drm_context_switch_complete(drm_device_t * dev, int new) +static int drm_context_switch_complete(struct drm_device *dev, int new) { dev->last_context = new; /* PRE/POST: This is the _only_ writer. */ dev->last_switch = jiffies; @@ -338,7 +338,7 @@ int drm_resctx(struct inode *inode, struct file *filp, int drm_addctx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; + struct drm_file *priv = filp->private_data; struct drm_device *dev = priv->head->dev; struct drm_ctx_list *ctx_entry; struct drm_ctx __user *argp = (void __user *)arg; @@ -434,8 +434,8 @@ int drm_getctx(struct inode *inode, struct file *filp, int drm_switchctx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_ctx ctx; if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) @@ -459,8 +459,8 @@ int drm_switchctx(struct inode *inode, struct file *filp, int drm_newctx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_ctx ctx; if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) @@ -486,8 +486,8 @@ int drm_newctx(struct inode *inode, struct file *filp, int drm_rmctx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_ctx ctx; if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) @@ -505,7 +505,7 @@ int drm_rmctx(struct inode *inode, struct file *filp, mutex_lock(&dev->ctxlist_mutex); if (!list_empty(&dev->ctxlist)) { - drm_ctx_list_t *pos, *n; + struct drm_ctx_list *pos, *n; list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { if (pos->handle == ctx.handle) { diff --git a/linux-core/drm_dma.c b/linux-core/drm_dma.c index a7eee1a4..6990f8d4 100644 --- a/linux-core/drm_dma.c +++ b/linux-core/drm_dma.c @@ -43,7 +43,7 @@ * * Allocate and initialize a drm_device_dma structure. */ -int drm_dma_setup(drm_device_t * dev) +int drm_dma_setup(struct drm_device * dev) { int i; @@ -67,9 +67,9 @@ int drm_dma_setup(drm_device_t * dev) * Free all pages associated with DMA buffers, the buffers and pages lists, and * finally the the drm_device::dma structure itself. */ -void drm_dma_takedown(drm_device_t * dev) +void drm_dma_takedown(struct drm_device * dev) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int i, j; if (!dma) @@ -129,7 +129,7 @@ void drm_dma_takedown(drm_device_t * dev) * * Resets the fields of \p buf. */ -void drm_free_buffer(drm_device_t * dev, drm_buf_t * buf) +void drm_free_buffer(struct drm_device * dev, drm_buf_t * buf) { if (!buf) return; @@ -152,9 +152,9 @@ void drm_free_buffer(drm_device_t * dev, drm_buf_t * buf) * * Frees each buffer associated with \p filp not already on the hardware. */ -void drm_core_reclaim_buffers(drm_device_t *dev, struct file *filp) +void drm_core_reclaim_buffers(struct drm_device *dev, struct file *filp) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int i; if (!dma) diff --git a/linux-core/drm_drawable.c b/linux-core/drm_drawable.c index 5a2a14f9..d6cdba56 100644 --- a/linux-core/drm_drawable.c +++ b/linux-core/drm_drawable.c @@ -177,7 +177,7 @@ error: /** * Caller must hold the drawable spinlock! */ -struct drm_drawable_info *drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id) +struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id) { return idr_find(&dev->drw_idr, id); } @@ -196,7 +196,7 @@ static int drm_drawable_free(int idr, void *p, void *data) return 0; } -void drm_drawable_free_all(drm_device_t *dev) +void drm_drawable_free_all(struct drm_device *dev) { idr_for_each(&dev->drw_idr, drm_drawable_free, NULL); idr_remove_all(&dev->drw_idr); diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index fd817f88..84efbfe7 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -48,14 +48,14 @@ #include "drmP.h" #include "drm_core.h" -static void drm_cleanup(drm_device_t * dev); +static void drm_cleanup(struct drm_device * dev); int drm_fb_loaded = 0; static int drm_version(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); /** Ioctl table */ -static drm_ioctl_desc_t drm_ioctls[] = { +static struct drm_ioctl_desc drm_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = {drm_version, 0}, [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = {drm_getunique, 0}, [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = {drm_getmagic, 0}, @@ -168,11 +168,11 @@ static drm_ioctl_desc_t drm_ioctls[] = { * * \sa drm_device */ -int drm_lastclose(drm_device_t * dev) +int drm_lastclose(struct drm_device * dev) { - drm_magic_entry_t *pt, *next; - drm_map_list_t *r_list, *list_t; - drm_vma_entry_t *vma, *vma_temp; + struct drm_magic_entry *pt, *next; + struct drm_map_list *r_list, *list_t; + struct drm_vma_entry *vma, *vma_temp; int i; DRM_DEBUG("\n"); @@ -220,7 +220,7 @@ int drm_lastclose(drm_device_t * dev) /* Clear AGP information */ if (drm_core_has_AGP(dev) && dev->agp) { - drm_agp_mem_t *entry, *tempe; + struct drm_agp_mem *entry, *tempe; /* Remove AGP resources, but leave dev->agp intact until drv_cleanup is called. */ @@ -288,7 +288,7 @@ int drm_lastclose(drm_device_t * dev) void drm_cleanup_pci(struct pci_dev *pdev) { - drm_device_t *dev = pci_get_drvdata(pdev); + struct drm_device *dev = pci_get_drvdata(pdev); pci_set_drvdata(pdev, NULL); pci_release_regions(pdev); @@ -374,7 +374,7 @@ EXPORT_SYMBOL(drm_init); * * \sa drm_init */ -static void drm_cleanup(drm_device_t * dev) +static void drm_cleanup(struct drm_device * dev) { DRM_DEBUG("\n"); @@ -419,8 +419,8 @@ static void drm_cleanup(drm_device_t * dev) void drm_exit(struct drm_driver *driver) { int i; - drm_device_t *dev = NULL; - drm_head_t *head; + struct drm_device *dev = NULL; + struct drm_head *head; DRM_DEBUG("\n"); if (drm_fb_loaded) { @@ -548,8 +548,8 @@ module_exit(drm_core_exit); static int drm_version(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_version __user *argp = (void __user *)arg; struct drm_version version; int len; @@ -584,9 +584,9 @@ static int drm_version(struct inode *inode, struct file *filp, int drm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_ioctl_desc_t *ioctl; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_ioctl_desc *ioctl; drm_ioctl_t *func; unsigned int nr = DRM_IOCTL_NR(cmd); int retcode = -EINVAL; @@ -635,7 +635,7 @@ EXPORT_SYMBOL(drm_ioctl); drm_local_map_t *drm_getsarea(struct drm_device *dev) { - drm_map_list_t *entry; + struct drm_map_list *entry; list_for_each_entry(entry, &dev->maplist, head) { if (entry->map && entry->map->type == _DRM_SHM && diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index ccd9b19c..4f24b4b5 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -34,7 +34,7 @@ * Typically called by the IRQ handler. */ -void drm_fence_handler(drm_device_t * dev, uint32_t class, +void drm_fence_handler(struct drm_device * dev, uint32_t class, uint32_t sequence, uint32_t type) { int wake = 0; @@ -114,7 +114,7 @@ void drm_fence_handler(drm_device_t * dev, uint32_t class, EXPORT_SYMBOL(drm_fence_handler); -static void drm_fence_unring(drm_device_t * dev, struct list_head *ring) +static void drm_fence_unring(struct drm_device * dev, struct list_head *ring) { drm_fence_manager_t *fm = &dev->fm; unsigned long flags; @@ -180,7 +180,7 @@ void drm_fence_reference_unlocked(struct drm_fence_object **dst, } -static void drm_fence_object_destroy(drm_file_t *priv, drm_user_object_t * base) +static void drm_fence_object_destroy(struct drm_file *priv, drm_user_object_t * base) { drm_fence_object_t *fence = drm_user_object_entry(base, drm_fence_object_t, base); @@ -262,7 +262,7 @@ int drm_fence_object_flush(drm_fence_object_t * fence, * wrapped around and reused. */ -void drm_fence_flush_old(drm_device_t * dev, uint32_t class, uint32_t sequence) +void drm_fence_flush_old(struct drm_device * dev, uint32_t class, uint32_t sequence) { drm_fence_manager_t *fm = &dev->fm; drm_fence_class_manager_t *fc = &fm->class[class]; @@ -435,7 +435,7 @@ int drm_fence_object_emit(drm_fence_object_t * fence, return 0; } -static int drm_fence_object_init(drm_device_t * dev, uint32_t class, +static int drm_fence_object_init(struct drm_device * dev, uint32_t class, uint32_t type, uint32_t fence_flags, drm_fence_object_t * fence) @@ -471,10 +471,10 @@ static int drm_fence_object_init(drm_device_t * dev, uint32_t class, return ret; } -int drm_fence_add_user_object(drm_file_t * priv, drm_fence_object_t * fence, +int drm_fence_add_user_object(struct drm_file * priv, drm_fence_object_t * fence, int shareable) { - drm_device_t *dev = priv->head->dev; + struct drm_device *dev = priv->head->dev; int ret; mutex_lock(&dev->struct_mutex); @@ -491,7 +491,7 @@ out: } EXPORT_SYMBOL(drm_fence_add_user_object); -int drm_fence_object_create(drm_device_t * dev, uint32_t class, uint32_t type, +int drm_fence_object_create(struct drm_device * dev, uint32_t class, uint32_t type, unsigned flags, drm_fence_object_t ** c_fence) { drm_fence_object_t *fence; @@ -514,7 +514,7 @@ int drm_fence_object_create(drm_device_t * dev, uint32_t class, uint32_t type, EXPORT_SYMBOL(drm_fence_object_create); -void drm_fence_manager_init(drm_device_t * dev) +void drm_fence_manager_init(struct drm_device * dev) { drm_fence_manager_t *fm = &dev->fm; drm_fence_class_manager_t *class; @@ -544,13 +544,13 @@ void drm_fence_manager_init(drm_device_t * dev) write_unlock(&fm->lock); } -void drm_fence_manager_takedown(drm_device_t * dev) +void drm_fence_manager_takedown(struct drm_device * dev) { } -drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle) +drm_fence_object_t *drm_lookup_fence_object(struct drm_file * priv, uint32_t handle) { - drm_device_t *dev = priv->head->dev; + struct drm_device *dev = priv->head->dev; drm_user_object_t *uo; drm_fence_object_t *fence; diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c index e54d5079..5ea3f9cf 100644 --- a/linux-core/drm_fops.c +++ b/linux-core/drm_fops.c @@ -39,9 +39,9 @@ #include static int drm_open_helper(struct inode *inode, struct file *filp, - drm_device_t * dev); + struct drm_device * dev); -static int drm_setup(drm_device_t * dev) +static int drm_setup(struct drm_device * dev) { drm_local_map_t *map; int i; @@ -128,7 +128,7 @@ static int drm_setup(drm_device_t * dev) */ int drm_open(struct inode *inode, struct file *filp) { - drm_device_t *dev = NULL; + struct drm_device *dev = NULL; int minor = iminor(inode); int retcode = 0; @@ -176,7 +176,7 @@ EXPORT_SYMBOL(drm_open); */ int drm_stub_open(struct inode *inode, struct file *filp) { - drm_device_t *dev = NULL; + struct drm_device *dev = NULL; int minor = iminor(inode); int err = -ENODEV; const struct file_operations *old_fops; @@ -232,10 +232,10 @@ static int drm_cpu_valid(void) * filp and add it into the double linked list in \p dev. */ static int drm_open_helper(struct inode *inode, struct file *filp, - drm_device_t * dev) + struct drm_device * dev) { int minor = iminor(inode); - drm_file_t *priv; + struct drm_file *priv; int ret; int i,j; @@ -320,8 +320,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp, /** No-op. */ int drm_fasync(int fd, struct file *filp, int on) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; int retcode; DRM_DEBUG("fd = %d, device = 0x%lx\n", fd, @@ -335,7 +335,7 @@ EXPORT_SYMBOL(drm_fasync); static void drm_object_release(struct file *filp) { - drm_file_t *priv = filp->private_data; + struct drm_file *priv = filp->private_data; struct list_head *head; drm_user_object_t *user_object; drm_ref_object_t *ref_object; @@ -386,8 +386,8 @@ static void drm_object_release(struct file *filp) { */ int drm_release(struct inode *inode, struct file *filp) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev; int retcode = 0; lock_kernel(); @@ -466,7 +466,7 @@ int drm_release(struct inode *inode, struct file *filp) mutex_lock(&dev->ctxlist_mutex); if (!list_empty(&dev->ctxlist)) { - drm_ctx_list_t *pos, *n; + struct drm_ctx_list *pos, *n; list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { if (pos->tag == priv && @@ -488,7 +488,7 @@ int drm_release(struct inode *inode, struct file *filp) mutex_lock(&dev->struct_mutex); drm_object_release(filp); if (priv->remove_auth_on_close == 1) { - drm_file_t *temp; + struct drm_file *temp; list_for_each_entry(temp, &dev->filelist, lhead) temp->authenticated = 0; diff --git a/linux-core/drm_ioctl.c b/linux-core/drm_ioctl.c index 02f70243..a7bacbb8 100644 --- a/linux-core/drm_ioctl.c +++ b/linux-core/drm_ioctl.c @@ -52,8 +52,8 @@ int drm_getunique(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_unique __user *argp = (void __user *)arg; struct drm_unique u; @@ -86,8 +86,8 @@ int drm_getunique(struct inode *inode, struct file *filp, int drm_setunique(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_unique u; int domain, bus, slot, func, ret; @@ -134,7 +134,7 @@ int drm_setunique(struct inode *inode, struct file *filp, return 0; } -static int drm_set_busid(drm_device_t * dev) +static int drm_set_busid(struct drm_device * dev) { int len; if (dev->unique != NULL) @@ -179,8 +179,8 @@ static int drm_set_busid(drm_device_t * dev) int drm_getmap(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_map __user *argp = (void __user *)arg; struct drm_map map; struct drm_map_list *r_list = NULL; @@ -201,7 +201,7 @@ int drm_getmap(struct inode *inode, struct file *filp, i = 0; list_for_each(list, &dev->maplist) { if (i == idx) { - r_list = list_entry(list, drm_map_list_t, head); + r_list = list_entry(list, struct drm_map_list, head); break; } i++; @@ -240,11 +240,11 @@ int drm_getmap(struct inode *inode, struct file *filp, int drm_getclient(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_client __user *argp = (struct drm_client __user *)arg; struct drm_client client; - drm_file_t *pt; + struct drm_file *pt; int idx; int i; @@ -289,8 +289,8 @@ int drm_getclient(struct inode *inode, struct file *filp, int drm_getstats(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_stats stats; int i; diff --git a/linux-core/drm_irq.c b/linux-core/drm_irq.c index 2e2c4d9c..140ceca6 100644 --- a/linux-core/drm_irq.c +++ b/linux-core/drm_irq.c @@ -53,8 +53,8 @@ int drm_irq_by_busid(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_irq_busid __user *argp = (void __user *)arg; struct drm_irq_busid p; @@ -86,7 +86,7 @@ int drm_irq_by_busid(struct inode *inode, struct file *filp, * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions * before and after the installation. */ -static int drm_irq_install(drm_device_t * dev) +static int drm_irq_install(struct drm_device * dev) { int ret; unsigned long sh_flags = 0; @@ -154,7 +154,7 @@ static int drm_irq_install(drm_device_t * dev) * * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq. */ -int drm_irq_uninstall(drm_device_t * dev) +int drm_irq_uninstall(struct drm_device * dev) { int irq_enabled; @@ -195,8 +195,8 @@ EXPORT_SYMBOL(drm_irq_uninstall); int drm_control(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_control ctl; /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */ @@ -242,8 +242,8 @@ int drm_control(struct inode *inode, struct file *filp, */ int drm_wait_vblank(DRM_IOCTL_ARGS) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; union drm_wait_vblank __user *argp = (void __user *)data; union drm_wait_vblank vblwait; struct timeval now; @@ -292,7 +292,7 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) unsigned long irqflags; struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_sigs2 : &dev->vbl_sigs; - drm_vbl_sig_t *vbl_sig; + struct drm_vbl_sig *vbl_sig; spin_lock_irqsave(&dev->vbl_lock, irqflags); @@ -322,7 +322,7 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) if (! (vbl_sig = - drm_alloc(sizeof(drm_vbl_sig_t), DRM_MEM_DRIVER))) { + drm_alloc(sizeof(struct drm_vbl_sig), DRM_MEM_DRIVER))) { return -ENOMEM; } @@ -369,7 +369,7 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) * * If a signal is not requested, then calls vblank_wait(). */ -void drm_vbl_send_signals(drm_device_t * dev) +void drm_vbl_send_signals(struct drm_device * dev) { unsigned long flags; int i; @@ -377,7 +377,7 @@ void drm_vbl_send_signals(drm_device_t * dev) spin_lock_irqsave(&dev->vbl_lock, flags); for (i = 0; i < 2; i++) { - drm_vbl_sig_t *vbl_sig, *tmp; + struct drm_vbl_sig *vbl_sig, *tmp; struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs; unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 : &dev->vbl_received); @@ -413,7 +413,7 @@ EXPORT_SYMBOL(drm_vbl_send_signals); */ static void drm_locked_tasklet_func(unsigned long data) { - drm_device_t *dev = (drm_device_t*)data; + struct drm_device *dev = (struct drm_device*)data; unsigned long irqflags; spin_lock_irqsave(&dev->tasklet_lock, irqflags); @@ -450,7 +450,7 @@ static void drm_locked_tasklet_func(unsigned long data) * context, it must not make any assumptions about this. Also, the HW lock will * be held with the kernel context or any client context. */ -void drm_locked_tasklet(drm_device_t *dev, void (*func)(drm_device_t*)) +void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device*)) { unsigned long irqflags; static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0); diff --git a/linux-core/drm_lock.c b/linux-core/drm_lock.c index 6d348251..1ba01aab 100644 --- a/linux-core/drm_lock.c +++ b/linux-core/drm_lock.c @@ -51,8 +51,8 @@ static int drm_notifier(void *priv); int drm_lock(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; DECLARE_WAITQUEUE(entry, current); struct drm_lock lock; int ret = 0; @@ -152,8 +152,8 @@ int drm_lock(struct inode *inode, struct file *filp, int drm_unlock(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_lock lock; unsigned long irqflags; @@ -202,7 +202,7 @@ int drm_unlock(struct inode *inode, struct file *filp, * * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction. */ -int drm_lock_take(drm_lock_data_t *lock_data, +int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context) { unsigned int old, new, prev; @@ -252,7 +252,7 @@ int drm_lock_take(drm_lock_data_t *lock_data, * Resets the lock file pointer. * Marks the lock as held by the given context, via the \p cmpxchg instruction. */ -static int drm_lock_transfer(drm_lock_data_t *lock_data, +static int drm_lock_transfer(struct drm_lock_data *lock_data, unsigned int context) { unsigned int old, new, prev; @@ -278,7 +278,7 @@ static int drm_lock_transfer(drm_lock_data_t *lock_data, * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task * waiting on the lock queue. */ -int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context) +int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context) { unsigned int old, new, prev; volatile unsigned int *lock = &lock_data->hw_lock->lock; @@ -320,7 +320,7 @@ int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context) */ static int drm_notifier(void *priv) { - drm_sigdata_t *s = (drm_sigdata_t *) priv; + struct drm_sigdata *s = (struct drm_sigdata *) priv; unsigned int old, new, prev; /* Allow signal delivery if lock isn't held */ @@ -351,7 +351,7 @@ static int drm_notifier(void *priv) * having to worry about starvation. */ -void drm_idlelock_take(drm_lock_data_t *lock_data) +void drm_idlelock_take(struct drm_lock_data *lock_data) { int ret = 0; @@ -370,7 +370,7 @@ void drm_idlelock_take(drm_lock_data_t *lock_data) } EXPORT_SYMBOL(drm_idlelock_take); -void drm_idlelock_release(drm_lock_data_t *lock_data) +void drm_idlelock_release(struct drm_lock_data *lock_data) { unsigned int old, prev; volatile unsigned int *lock = &lock_data->hw_lock->lock; diff --git a/linux-core/drm_memory.c b/linux-core/drm_memory.c index b1423c12..454c33e8 100644 --- a/linux-core/drm_memory.c +++ b/linux-core/drm_memory.c @@ -214,7 +214,7 @@ void drm_free_pages(unsigned long address, int order, int area) #if __OS_HAS_AGP static void *agp_remap(unsigned long offset, unsigned long size, - drm_device_t * dev) + struct drm_device * dev) { unsigned long *phys_addr_map, i, num_pages = PAGE_ALIGN(size) / PAGE_SIZE; @@ -258,12 +258,12 @@ static void *agp_remap(unsigned long offset, unsigned long size, /** Wrapper around agp_allocate_memory() */ #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) -DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type) +DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type) { return drm_agp_allocate_memory(pages, type); } #else -DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type) +DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type) { return drm_agp_allocate_memory(dev->agp->bridge, pages, type); } @@ -289,7 +289,7 @@ int drm_unbind_agp(DRM_AGP_MEM * handle) #else /* __OS_HAS_AGP*/ static void *agp_remap(unsigned long offset, unsigned long size, - drm_device_t * dev) + struct drm_device * dev) { return NULL; } diff --git a/linux-core/drm_mm.c b/linux-core/drm_mm.c index 2caf596b..cf0d92fa 100644 --- a/linux-core/drm_mm.c +++ b/linux-core/drm_mm.c @@ -44,26 +44,26 @@ #include "drmP.h" #include -unsigned long drm_mm_tail_space(drm_mm_t *mm) +unsigned long drm_mm_tail_space(struct drm_mm *mm) { struct list_head *tail_node; - drm_mm_node_t *entry; + struct drm_mm_node *entry; tail_node = mm->ml_entry.prev; - entry = list_entry(tail_node, drm_mm_node_t, ml_entry); + entry = list_entry(tail_node, struct drm_mm_node, ml_entry); if (!entry->free) return 0; return entry->size; } -int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size) +int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size) { struct list_head *tail_node; - drm_mm_node_t *entry; + struct drm_mm_node *entry; tail_node = mm->ml_entry.prev; - entry = list_entry(tail_node, drm_mm_node_t, ml_entry); + entry = list_entry(tail_node, struct drm_mm_node, ml_entry); if (!entry->free) return -ENOMEM; @@ -75,13 +75,13 @@ int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size) } -static int drm_mm_create_tail_node(drm_mm_t *mm, +static int drm_mm_create_tail_node(struct drm_mm *mm, unsigned long start, unsigned long size) { - drm_mm_node_t *child; + struct drm_mm_node *child; - child = (drm_mm_node_t *) + child = (struct drm_mm_node *) drm_ctl_alloc(sizeof(*child), DRM_MEM_MM); if (!child) return -ENOMEM; @@ -98,13 +98,13 @@ static int drm_mm_create_tail_node(drm_mm_t *mm, } -int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size) +int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size) { struct list_head *tail_node; - drm_mm_node_t *entry; + struct drm_mm_node *entry; tail_node = mm->ml_entry.prev; - entry = list_entry(tail_node, drm_mm_node_t, ml_entry); + entry = list_entry(tail_node, struct drm_mm_node, ml_entry); if (!entry->free) { return drm_mm_create_tail_node(mm, entry->start + entry->size, size); } @@ -112,12 +112,12 @@ int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size) return 0; } -static drm_mm_node_t *drm_mm_split_at_start(drm_mm_node_t *parent, +static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, unsigned long size) { - drm_mm_node_t *child; + struct drm_mm_node *child; - child = (drm_mm_node_t *) + child = (struct drm_mm_node *) drm_ctl_alloc(sizeof(*child), DRM_MEM_MM); if (!child) return NULL; @@ -137,12 +137,12 @@ static drm_mm_node_t *drm_mm_split_at_start(drm_mm_node_t *parent, return child; } -drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent, +struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent, unsigned long size, unsigned alignment) { - drm_mm_node_t *align_splitoff = NULL; - drm_mm_node_t *child; + struct drm_mm_node *align_splitoff = NULL; + struct drm_mm_node *child; unsigned tmp = 0; if (alignment) @@ -173,26 +173,26 @@ drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent, * Otherwise add to the free stack. */ -void drm_mm_put_block(drm_mm_node_t * cur) +void drm_mm_put_block(struct drm_mm_node * cur) { - drm_mm_t *mm = cur->mm; + struct drm_mm *mm = cur->mm; struct list_head *cur_head = &cur->ml_entry; struct list_head *root_head = &mm->ml_entry; - drm_mm_node_t *prev_node = NULL; - drm_mm_node_t *next_node; + struct drm_mm_node *prev_node = NULL; + struct drm_mm_node *next_node; int merged = 0; if (cur_head->prev != root_head) { - prev_node = list_entry(cur_head->prev, drm_mm_node_t, ml_entry); + prev_node = list_entry(cur_head->prev, struct drm_mm_node, ml_entry); if (prev_node->free) { prev_node->size += cur->size; merged = 1; } } if (cur_head->next != root_head) { - next_node = list_entry(cur_head->next, drm_mm_node_t, ml_entry); + next_node = list_entry(cur_head->next, struct drm_mm_node, ml_entry); if (next_node->free) { if (merged) { prev_node->size += next_node->size; @@ -217,14 +217,14 @@ void drm_mm_put_block(drm_mm_node_t * cur) } EXPORT_SYMBOL(drm_mm_put_block); -drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm, +struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm, unsigned long size, unsigned alignment, int best_match) { struct list_head *list; const struct list_head *free_stack = &mm->fl_entry; - drm_mm_node_t *entry; - drm_mm_node_t *best; + struct drm_mm_node *entry; + struct drm_mm_node *best; unsigned long best_size; unsigned wasted; @@ -232,7 +232,7 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm, best_size = ~0UL; list_for_each(list, free_stack) { - entry = list_entry(list, drm_mm_node_t, fl_entry); + entry = list_entry(list, struct drm_mm_node, fl_entry); wasted = 0; if (entry->size < size) @@ -258,14 +258,14 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm, return best; } -int drm_mm_clean(drm_mm_t * mm) +int drm_mm_clean(struct drm_mm * mm) { struct list_head *head = &mm->ml_entry; return (head->next->next == head); } -int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size) +int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) { INIT_LIST_HEAD(&mm->ml_entry); INIT_LIST_HEAD(&mm->fl_entry); @@ -275,12 +275,12 @@ int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size) EXPORT_SYMBOL(drm_mm_init); -void drm_mm_takedown(drm_mm_t * mm) +void drm_mm_takedown(struct drm_mm * mm) { struct list_head *bnode = mm->fl_entry.next; - drm_mm_node_t *entry; + struct drm_mm_node *entry; - entry = list_entry(bnode, drm_mm_node_t, fl_entry); + entry = list_entry(bnode, struct drm_mm_node, fl_entry); if (entry->ml_entry.next != &mm->ml_entry || entry->fl_entry.next != &mm->fl_entry) { diff --git a/linux-core/drm_object.c b/linux-core/drm_object.c index 567a7d2b..3c60605c 100644 --- a/linux-core/drm_object.c +++ b/linux-core/drm_object.c @@ -30,10 +30,10 @@ #include "drmP.h" -int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item, +int drm_add_user_object(struct drm_file * priv, drm_user_object_t * item, int shareable) { - drm_device_t *dev = priv->head->dev; + struct drm_device *dev = priv->head->dev; int ret; DRM_ASSERT_LOCKED(&dev->struct_mutex); @@ -51,9 +51,9 @@ int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item, return 0; } -drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, uint32_t key) +drm_user_object_t *drm_lookup_user_object(struct drm_file * priv, uint32_t key) { - drm_device_t *dev = priv->head->dev; + struct drm_device *dev = priv->head->dev; drm_hash_item_t *hash; int ret; drm_user_object_t *item; @@ -77,9 +77,9 @@ drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, uint32_t key) return item; } -static void drm_deref_user_object(drm_file_t * priv, drm_user_object_t * item) +static void drm_deref_user_object(struct drm_file * priv, drm_user_object_t * item) { - drm_device_t *dev = priv->head->dev; + struct drm_device *dev = priv->head->dev; int ret; if (atomic_dec_and_test(&item->refcount)) { @@ -90,7 +90,7 @@ static void drm_deref_user_object(drm_file_t * priv, drm_user_object_t * item) } } -int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item) +int drm_remove_user_object(struct drm_file * priv, drm_user_object_t * item) { DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); @@ -105,7 +105,7 @@ int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item) return 0; } -static int drm_object_ref_action(drm_file_t * priv, drm_user_object_t * ro, +static int drm_object_ref_action(struct drm_file * priv, drm_user_object_t * ro, drm_ref_t action) { int ret = 0; @@ -124,7 +124,7 @@ static int drm_object_ref_action(drm_file_t * priv, drm_user_object_t * ro, return ret; } -int drm_add_ref_object(drm_file_t * priv, drm_user_object_t * referenced_object, +int drm_add_ref_object(struct drm_file * priv, drm_user_object_t * referenced_object, drm_ref_t ref_action) { int ret = 0; @@ -181,7 +181,7 @@ int drm_add_ref_object(drm_file_t * priv, drm_user_object_t * referenced_object, return ret; } -drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv, +drm_ref_object_t *drm_lookup_ref_object(struct drm_file * priv, drm_user_object_t * referenced_object, drm_ref_t ref_action) { @@ -197,7 +197,7 @@ drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv, return drm_hash_entry(hash, drm_ref_object_t, hash); } -static void drm_remove_other_references(drm_file_t * priv, +static void drm_remove_other_references(struct drm_file * priv, drm_user_object_t * ro) { int i; @@ -214,7 +214,7 @@ static void drm_remove_other_references(drm_file_t * priv, } } -void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item) +void drm_remove_ref_object(struct drm_file * priv, drm_ref_object_t * item) { int ret; drm_user_object_t *user_object = (drm_user_object_t *) item->hash.key; @@ -244,10 +244,10 @@ void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item) } -int drm_user_object_ref(drm_file_t * priv, uint32_t user_token, +int drm_user_object_ref(struct drm_file * priv, uint32_t user_token, drm_object_type_t type, drm_user_object_t ** object) { - drm_device_t *dev = priv->head->dev; + struct drm_device *dev = priv->head->dev; drm_user_object_t *uo; drm_hash_item_t *hash; int ret; @@ -274,10 +274,10 @@ int drm_user_object_ref(drm_file_t * priv, uint32_t user_token, return ret; } -int drm_user_object_unref(drm_file_t * priv, uint32_t user_token, +int drm_user_object_unref(struct drm_file * priv, uint32_t user_token, drm_object_type_t type) { - drm_device_t *dev = priv->head->dev; + struct drm_device *dev = priv->head->dev; drm_user_object_t *uo; drm_ref_object_t *ro; int ret; diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 4bd9047c..cfca5bf0 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -61,13 +61,13 @@ typedef struct drm_user_object { drm_object_type_t type; atomic_t refcount; int shareable; - drm_file_t *owner; - void (*ref_struct_locked) (drm_file_t * priv, + struct drm_file *owner; + void (*ref_struct_locked) (struct drm_file * priv, struct drm_user_object * obj, drm_ref_t ref_action); - void (*unref) (drm_file_t * priv, struct drm_user_object * obj, + void (*unref) (struct drm_file * priv, struct drm_user_object * obj, drm_ref_t unref_action); - void (*remove) (drm_file_t * priv, struct drm_user_object * obj); + void (*remove) (struct drm_file * priv, struct drm_user_object * obj); } drm_user_object_t; /* @@ -88,13 +88,13 @@ typedef struct drm_ref_object { * Must be called with the struct_mutex held. */ -extern int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item, +extern int drm_add_user_object(struct drm_file * priv, drm_user_object_t * item, int shareable); /** * Must be called with the struct_mutex held. */ -extern drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, +extern drm_user_object_t *drm_lookup_user_object(struct drm_file * priv, uint32_t key); /* @@ -104,13 +104,13 @@ extern drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, * This function may temporarily release the struct_mutex. */ -extern int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item); +extern int drm_remove_user_object(struct drm_file * priv, drm_user_object_t * item); /* * Must be called with the struct_mutex held. May temporarily release it. */ -extern int drm_add_ref_object(drm_file_t * priv, +extern int drm_add_ref_object(struct drm_file * priv, drm_user_object_t * referenced_object, drm_ref_t ref_action); @@ -118,7 +118,7 @@ extern int drm_add_ref_object(drm_file_t * priv, * Must be called with the struct_mutex held. */ -drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv, +drm_ref_object_t *drm_lookup_ref_object(struct drm_file * priv, drm_user_object_t * referenced_object, drm_ref_t ref_action); /* @@ -128,11 +128,11 @@ drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv, * This function may temporarily release the struct_mutex. */ -extern void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item); -extern int drm_user_object_ref(drm_file_t * priv, uint32_t user_token, +extern void drm_remove_ref_object(struct drm_file * priv, drm_ref_object_t * item); +extern int drm_user_object_ref(struct drm_file * priv, uint32_t user_token, drm_object_type_t type, drm_user_object_t ** object); -extern int drm_user_object_unref(drm_file_t * priv, uint32_t user_token, +extern int drm_user_object_unref(struct drm_file * priv, uint32_t user_token, drm_object_type_t type); /*************************************************** @@ -210,7 +210,7 @@ extern int drm_fence_object_wait(drm_fence_object_t * fence, extern int drm_fence_object_create(struct drm_device *dev, uint32_t type, uint32_t fence_flags, uint32_t class, drm_fence_object_t ** c_fence); -extern int drm_fence_add_user_object(drm_file_t * priv, +extern int drm_fence_add_user_object(struct drm_file * priv, drm_fence_object_t * fence, int shareable); extern int drm_fence_create_ioctl(DRM_IOCTL_ARGS); @@ -317,7 +317,7 @@ extern int drm_destroy_ttm(drm_ttm_t * ttm); */ typedef struct drm_bo_mem_reg { - drm_mm_node_t *mm_node; + struct drm_mm_node *mm_node; unsigned long size; unsigned long num_pages; uint32_t page_alignment; @@ -353,14 +353,14 @@ typedef struct drm_buffer_object { struct mutex mutex; /* For pinned buffers */ - drm_mm_node_t *pinned_node; + struct drm_mm_node *pinned_node; uint32_t pinned_mem_type; struct list_head pinned_lru; /* For vm */ drm_ttm_t *ttm; - drm_map_list_t map_list; + struct drm_map_list map_list; uint32_t memory_type; unsigned long bus_offset; uint32_t vm_flags; @@ -380,7 +380,7 @@ typedef struct drm_buffer_object { typedef struct drm_mem_type_manager { int has_type; int use_type; - drm_mm_t manager; + struct drm_mm manager; struct list_head lru; struct list_head pinned; uint32_t flags; @@ -403,7 +403,7 @@ typedef struct drm_buffer_manager { struct mutex evict_mutex; int nice_mode; int initialized; - drm_file_t *last_to_validate; + struct drm_file *last_to_validate; drm_mem_type_manager_t man[DRM_BO_MEM_TYPES]; struct list_head unfenced; struct list_head ddestroy; @@ -462,7 +462,7 @@ extern int drm_bo_pci_offset(struct drm_device *dev, extern int drm_mem_reg_is_pci(struct drm_device *dev, drm_bo_mem_reg_t * mem); extern void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo); -extern int drm_fence_buffer_objects(drm_file_t * priv, +extern int drm_fence_buffer_objects(struct drm_file * priv, struct list_head *list, uint32_t fence_flags, drm_fence_object_t * fence, diff --git a/linux-core/drm_pci.c b/linux-core/drm_pci.c index 76252204..a608eed3 100644 --- a/linux-core/drm_pci.c +++ b/linux-core/drm_pci.c @@ -47,7 +47,7 @@ /** * \brief Allocate a PCI consistent memory block, for DMA. */ -drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align, +drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align, dma_addr_t maxaddr) { drm_dma_handle_t *dmah; @@ -123,7 +123,7 @@ EXPORT_SYMBOL(drm_pci_alloc); * * This function is for internal use in the Linux-specific DRM core code. */ -void __drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah) +void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t *dmah) { unsigned long addr; size_t sz; @@ -167,7 +167,7 @@ void __drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah) /** * \brief Free a PCI consistent memory block */ -void drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah) +void drm_pci_free(struct drm_device * dev, drm_dma_handle_t *dmah) { __drm_pci_free(dev, dmah); kfree(dmah); diff --git a/linux-core/drm_proc.c b/linux-core/drm_proc.c index e59f2afa..f33bd93d 100644 --- a/linux-core/drm_proc.c +++ b/linux-core/drm_proc.c @@ -90,7 +90,7 @@ static struct drm_proc_list { * "/proc/dri/%minor%/", and each entry in proc_list as * "/proc/dri/%minor%/%name%". */ -int drm_proc_init(drm_device_t * dev, int minor, +int drm_proc_init(struct drm_device * dev, int minor, struct proc_dir_entry *root, struct proc_dir_entry **dev_root) { struct proc_dir_entry *ent; @@ -165,7 +165,7 @@ int drm_proc_cleanup(int minor, struct proc_dir_entry *root, static int drm_name_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int len = 0; if (offset > DRM_PROC_LIMIT) { @@ -207,7 +207,7 @@ static int drm_name_info(char *buf, char **start, off_t offset, int request, static int drm__vm_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int len = 0; struct drm_map *map; struct drm_map_list *r_list; @@ -264,7 +264,7 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request, static int drm_vm_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int ret; mutex_lock(&dev->struct_mutex); @@ -287,10 +287,10 @@ static int drm_vm_info(char *buf, char **start, off_t offset, int request, static int drm__queues_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int len = 0; int i; - drm_queue_t *q; + struct drm_queue *q; if (offset > DRM_PROC_LIMIT) { *eof = 1; @@ -337,7 +337,7 @@ static int drm__queues_info(char *buf, char **start, off_t offset, static int drm_queues_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int ret; mutex_lock(&dev->struct_mutex); @@ -360,9 +360,9 @@ static int drm_queues_info(char *buf, char **start, off_t offset, int request, static int drm__bufs_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int len = 0; - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int i; if (!dma || offset > DRM_PROC_LIMIT) { @@ -409,7 +409,7 @@ static int drm__bufs_info(char *buf, char **start, off_t offset, int request, static int drm_bufs_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int ret; mutex_lock(&dev->struct_mutex); @@ -432,7 +432,7 @@ static int drm_bufs_info(char *buf, char **start, off_t offset, int request, static int drm__objects_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int len = 0; drm_buffer_manager_t *bm = &dev->bm; drm_fence_manager_t *fm = &dev->fm; @@ -496,7 +496,7 @@ static int drm__objects_info(char *buf, char **start, off_t offset, int request, static int drm_objects_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int ret; mutex_lock(&dev->struct_mutex); @@ -519,9 +519,9 @@ static int drm_objects_info(char *buf, char **start, off_t offset, int request, static int drm__clients_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int len = 0; - drm_file_t *priv; + struct drm_file *priv; if (offset > DRM_PROC_LIMIT) { *eof = 1; @@ -552,7 +552,7 @@ static int drm__clients_info(char *buf, char **start, off_t offset, static int drm_clients_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int ret; mutex_lock(&dev->struct_mutex); @@ -566,9 +566,9 @@ static int drm_clients_info(char *buf, char **start, off_t offset, static int drm__vma_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int len = 0; - drm_vma_entry_t *pt; + struct drm_vma_entry *pt; struct vm_area_struct *vma; #if defined(__i386__) unsigned int pgprot; @@ -625,7 +625,7 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request, static int drm_vma_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int ret; mutex_lock(&dev->struct_mutex); diff --git a/linux-core/drm_scatter.c b/linux-core/drm_scatter.c index 138ae087..7c13610d 100644 --- a/linux-core/drm_scatter.c +++ b/linux-core/drm_scatter.c @@ -190,7 +190,7 @@ EXPORT_SYMBOL(drm_sg_alloc); int drm_sg_alloc_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; + struct drm_file *priv = filp->private_data; struct drm_scatter_gather __user *argp = (void __user *)arg; struct drm_scatter_gather request; int ret; @@ -214,8 +214,8 @@ int drm_sg_alloc_ioctl(struct inode *inode, struct file *filp, int drm_sg_free(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_scatter_gather request; struct drm_sg_mem *entry; diff --git a/linux-core/drm_sman.c b/linux-core/drm_sman.c index e15db6d6..8e4bfbd8 100644 --- a/linux-core/drm_sman.c +++ b/linux-core/drm_sman.c @@ -88,8 +88,8 @@ EXPORT_SYMBOL(drm_sman_init); static void *drm_sman_mm_allocate(void *private, unsigned long size, unsigned alignment) { - drm_mm_t *mm = (drm_mm_t *) private; - drm_mm_node_t *tmp; + struct drm_mm *mm = (struct drm_mm *) private; + struct drm_mm_node *tmp; tmp = drm_mm_search_free(mm, size, alignment, 1); if (!tmp) { @@ -101,21 +101,21 @@ static void *drm_sman_mm_allocate(void *private, unsigned long size, static void drm_sman_mm_free(void *private, void *ref) { - drm_mm_node_t *node = (drm_mm_node_t *) ref; + struct drm_mm_node *node = (struct drm_mm_node *) ref; drm_mm_put_block(node); } static void drm_sman_mm_destroy(void *private) { - drm_mm_t *mm = (drm_mm_t *) private; + struct drm_mm *mm = (struct drm_mm *) private; drm_mm_takedown(mm); drm_free(mm, sizeof(*mm), DRM_MEM_MM); } static unsigned long drm_sman_mm_offset(void *private, void *ref) { - drm_mm_node_t *node = (drm_mm_node_t *) ref; + struct drm_mm_node *node = (struct drm_mm_node *) ref; return node->start; } @@ -124,7 +124,7 @@ drm_sman_set_range(drm_sman_t * sman, unsigned int manager, unsigned long start, unsigned long size) { drm_sman_mm_t *sman_mm; - drm_mm_t *mm; + struct drm_mm *mm; int ret; BUG_ON(manager >= sman->num_managers); diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c index b96408ab..eba6deed 100644 --- a/linux-core/drm_stub.c +++ b/linux-core/drm_stub.c @@ -50,11 +50,11 @@ MODULE_PARM_DESC(debug, "Enable debug output"); module_param_named(cards_limit, drm_cards_limit, int, 0444); module_param_named(debug, drm_debug, int, 0600); -drm_head_t **drm_heads; +struct drm_head **drm_heads; struct drm_sysfs_class *drm_class; struct proc_dir_entry *drm_proc_root; -static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev, +static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver *driver) { @@ -160,9 +160,9 @@ error_out_unreg: * create the proc init entry via proc_init(). This routines assigns * minor numbers to secondary heads of multi-headed cards */ -static int drm_get_head(drm_device_t * dev, drm_head_t * head) +static int drm_get_head(struct drm_device * dev, struct drm_head * head) { - drm_head_t **heads = drm_heads; + struct drm_head **heads = drm_heads; int ret; int minor; @@ -171,7 +171,7 @@ static int drm_get_head(drm_device_t * dev, drm_head_t * head) for (minor = 0; minor < drm_cards_limit; minor++, heads++) { if (!*heads) { - *head = (drm_head_t) { + *head = (struct drm_head) { .dev = dev, .device = MKDEV(DRM_MAJOR, minor), .minor = minor, @@ -202,7 +202,7 @@ static int drm_get_head(drm_device_t * dev, drm_head_t * head) err_g2: drm_proc_cleanup(minor, drm_proc_root, head->dev_root); err_g1: - *head = (drm_head_t) { + *head = (struct drm_head) { .dev = NULL}; return ret; } @@ -221,7 +221,7 @@ err_g1: int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver *driver) { - drm_device_t *dev; + struct drm_device *dev; int ret; DRM_DEBUG("\n"); @@ -282,7 +282,7 @@ EXPORT_SYMBOL(drm_get_dev); * "drm" data, otherwise unregisters the "drm" data, frees the dev list and * unregisters the character device. */ -int drm_put_dev(drm_device_t * dev) +int drm_put_dev(struct drm_device * dev) { DRM_DEBUG("release primary %s\n", dev->driver->pci_driver.name); @@ -310,7 +310,7 @@ int drm_put_dev(drm_device_t * dev) * last minor released. * */ -int drm_put_head(drm_head_t * head) +int drm_put_head(struct drm_head * head) { int minor = head->minor; @@ -319,7 +319,7 @@ int drm_put_head(drm_head_t * head) drm_proc_cleanup(minor, drm_proc_root, head->dev_root); drm_sysfs_device_remove(head->dev_class); - *head = (drm_head_t){.dev = NULL}; + *head = (struct drm_head){.dev = NULL}; drm_heads[minor] = NULL; return 0; diff --git a/linux-core/drm_sysfs.c b/linux-core/drm_sysfs.c index 9b2f5dce..1090e719 100644 --- a/linux-core/drm_sysfs.c +++ b/linux-core/drm_sysfs.c @@ -123,7 +123,7 @@ void drm_sysfs_destroy(struct drm_sysfs_class *cs) static ssize_t show_dri(struct class_device *class_device, char *buf) { - drm_device_t * dev = ((drm_head_t *)class_get_devdata(class_device))->dev; + struct drm_device * dev = ((struct drm_head *)class_get_devdata(class_device))->dev; if (dev->driver->dri_library_name) return dev->driver->dri_library_name(dev, buf); return snprintf(buf, PAGE_SIZE, "%s\n", dev->driver->pci_driver.name); @@ -148,7 +148,7 @@ static struct class_device_attribute class_device_attrs[] = { * created with a call to drm_sysfs_create(). */ struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, - drm_head_t * head) + struct drm_head * head) { struct simple_dev *s_dev = NULL; int i, retval; diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 7451adc5..de2fba1a 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -85,11 +85,11 @@ pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma) static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, unsigned long address) { - drm_file_t *priv = vma->vm_file->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->head->dev; struct drm_map *map = NULL; - drm_map_list_t *r_list; - drm_hash_item_t *hash; + struct drm_map_list *r_list; + struct drm_hash_item *hash; /* * Find the right map @@ -103,7 +103,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) goto vm_nopage_error; - r_list = drm_hash_entry(hash, drm_map_list_t, hash); + r_list = drm_hash_entry(hash, struct drm_map_list, hash); map = r_list->map; if (map && map->type == _DRM_AGP) { @@ -203,11 +203,11 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, */ static void drm_vm_shm_close(struct vm_area_struct *vma) { - drm_file_t *priv = vma->vm_file->private_data; - drm_device_t *dev = priv->head->dev; - drm_vma_entry_t *pt, *temp; + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_vma_entry *pt, *temp; struct drm_map *map; - drm_map_list_t *r_list; + struct drm_map_list *r_list; int found_maps = 0; DRM_DEBUG("0x%08lx,0x%08lx\n", @@ -285,9 +285,9 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma, unsigned long address) { - drm_file_t *priv = vma->vm_file->private_data; - drm_device_t *dev = priv->head->dev; - drm_device_dma_t *dma = dev->dma; + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_device_dma *dma = dev->dma; unsigned long offset; unsigned long page_nr; struct page *page; @@ -322,9 +322,9 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, unsigned long address) { struct drm_map *map = (struct drm_map *) vma->vm_private_data; - drm_file_t *priv = vma->vm_file->private_data; - drm_device_t *dev = priv->head->dev; - drm_sg_mem_t *entry = dev->sg; + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_sg_mem *entry = dev->sg; unsigned long offset; unsigned long map_offset; unsigned long page_offset; @@ -418,9 +418,9 @@ static struct vm_operations_struct drm_vm_sg_ops = { */ static void drm_vm_open_locked(struct vm_area_struct *vma) { - drm_file_t *priv = vma->vm_file->private_data; - drm_device_t *dev = priv->head->dev; - drm_vma_entry_t *vma_entry; + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_vma_entry *vma_entry; DRM_DEBUG("0x%08lx,0x%08lx\n", vma->vm_start, vma->vm_end - vma->vm_start); @@ -436,8 +436,8 @@ static void drm_vm_open_locked(struct vm_area_struct *vma) static void drm_vm_open(struct vm_area_struct *vma) { - drm_file_t *priv = vma->vm_file->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->head->dev; mutex_lock(&dev->struct_mutex); drm_vm_open_locked(vma); @@ -454,9 +454,9 @@ static void drm_vm_open(struct vm_area_struct *vma) */ static void drm_vm_close(struct vm_area_struct *vma) { - drm_file_t *priv = vma->vm_file->private_data; - drm_device_t *dev = priv->head->dev; - drm_vma_entry_t *pt, *temp; + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_vma_entry *pt, *temp; DRM_DEBUG("0x%08lx,0x%08lx\n", vma->vm_start, vma->vm_end - vma->vm_start); @@ -486,9 +486,9 @@ static void drm_vm_close(struct vm_area_struct *vma) */ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev; - drm_device_dma_t *dma; + struct drm_file *priv = filp->private_data; + struct drm_device *dev; + struct drm_device_dma *dma; unsigned long length = vma->vm_end - vma->vm_start; dev = priv->head->dev; @@ -555,8 +555,8 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs); */ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_map *map = NULL; unsigned long offset = 0; struct drm_hash_item *hash; @@ -585,7 +585,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) return -EINVAL; } - map = drm_hash_entry(hash, drm_map_list_t, hash)->map; + map = drm_hash_entry(hash, struct drm_map_list, hash)->map; if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) return -EPERM; @@ -676,8 +676,8 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) int drm_mmap(struct file *filp, struct vm_area_struct *vma) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; int ret; mutex_lock(&dev->struct_mutex); @@ -717,7 +717,7 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, unsigned long page_offset; struct page *page = NULL; drm_ttm_t *ttm; - drm_device_t *dev; + struct drm_device *dev; unsigned long pfn; int err; unsigned long bus_base; @@ -816,7 +816,7 @@ static void drm_bo_vm_open_locked(struct vm_area_struct *vma) static void drm_bo_vm_open(struct vm_area_struct *vma) { drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; mutex_lock(&dev->struct_mutex); drm_bo_vm_open_locked(vma); @@ -832,7 +832,7 @@ static void drm_bo_vm_open(struct vm_area_struct *vma) static void drm_bo_vm_close(struct vm_area_struct *vma) { drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; drm_vm_close(vma); if (bo) { diff --git a/linux-core/i810_dma.c b/linux-core/i810_dma.c index a4e0c390..31dc1c86 100644 --- a/linux-core/i810_dma.c +++ b/linux-core/i810_dma.c @@ -46,9 +46,9 @@ #define I810_BUF_UNMAPPED 0 #define I810_BUF_MAPPED 1 -static inline void i810_print_status_page(drm_device_t * dev) +static inline void i810_print_status_page(struct drm_device * dev) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; drm_i810_private_t *dev_priv = dev->dev_private; u32 *temp = dev_priv->hw_status_page; int i; @@ -64,9 +64,9 @@ static inline void i810_print_status_page(drm_device_t * dev) } } -static drm_buf_t *i810_freelist_get(drm_device_t * dev) +static drm_buf_t *i810_freelist_get(struct drm_device * dev) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int i; int used; @@ -89,7 +89,7 @@ static drm_buf_t *i810_freelist_get(drm_device_t * dev) * yet, the hardware updates in use for us once its on the ring buffer. */ -static int i810_freelist_put(drm_device_t * dev, drm_buf_t * buf) +static int i810_freelist_put(struct drm_device * dev, drm_buf_t * buf) { drm_i810_buf_priv_t *buf_priv = buf->dev_private; int used; @@ -106,8 +106,8 @@ static int i810_freelist_put(drm_device_t * dev, drm_buf_t * buf) static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev; drm_i810_private_t *dev_priv; drm_buf_t *buf; drm_i810_buf_priv_t *buf_priv; @@ -141,8 +141,8 @@ static const struct file_operations i810_buffer_fops = { static int i810_map_buffer(drm_buf_t * buf, struct file *filp) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; drm_i810_buf_priv_t *buf_priv = buf->dev_private; drm_i810_private_t *dev_priv = dev->dev_private; const struct file_operations *old_fops; @@ -191,7 +191,7 @@ static int i810_unmap_buffer(drm_buf_t * buf) return retcode; } -static int i810_dma_get_buffer(drm_device_t * dev, drm_i810_dma_t * d, +static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d, struct file *filp) { drm_buf_t *buf; @@ -221,9 +221,9 @@ static int i810_dma_get_buffer(drm_device_t * dev, drm_i810_dma_t * d, return retcode; } -static int i810_dma_cleanup(drm_device_t * dev) +static int i810_dma_cleanup(struct drm_device * dev) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; /* Make sure interrupts are disabled here because the uninstall ioctl * may not have been called from userspace and after dev_private @@ -262,7 +262,7 @@ static int i810_dma_cleanup(drm_device_t * dev) return 0; } -static int i810_wait_ring(drm_device_t * dev, int n) +static int i810_wait_ring(struct drm_device * dev, int n) { drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_ring_buffer_t *ring = &(dev_priv->ring); @@ -295,7 +295,7 @@ static int i810_wait_ring(drm_device_t * dev, int n) return iters; } -static void i810_kernel_lost_context(drm_device_t * dev) +static void i810_kernel_lost_context(struct drm_device * dev) { drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_ring_buffer_t *ring = &(dev_priv->ring); @@ -307,9 +307,9 @@ static void i810_kernel_lost_context(drm_device_t * dev) ring->space += ring->Size; } -static int i810_freelist_init(drm_device_t * dev, drm_i810_private_t * dev_priv) +static int i810_freelist_init(struct drm_device * dev, drm_i810_private_t * dev_priv) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int my_idx = 24; u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx); int i; @@ -342,7 +342,7 @@ static int i810_freelist_init(drm_device_t * dev, drm_i810_private_t * dev_priv) return 0; } -static int i810_dma_initialize(drm_device_t * dev, +static int i810_dma_initialize(struct drm_device * dev, drm_i810_private_t * dev_priv, drm_i810_init_t * init) { @@ -495,8 +495,8 @@ static int i810_dma_init_compat(drm_i810_init_t * init, unsigned long arg) static int i810_dma_init(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; drm_i810_private_t *dev_priv; drm_i810_init_t init; int retcode = 0; @@ -553,7 +553,7 @@ static int i810_dma_init(struct inode *inode, struct file *filp, * Use 'volatile' & local var tmp to force the emitted values to be * identical to the verified ones. */ -static void i810EmitContextVerified(drm_device_t * dev, +static void i810EmitContextVerified(struct drm_device * dev, volatile unsigned int *code) { drm_i810_private_t *dev_priv = dev->dev_private; @@ -586,7 +586,7 @@ static void i810EmitContextVerified(drm_device_t * dev, ADVANCE_LP_RING(); } -static void i810EmitTexVerified(drm_device_t * dev, volatile unsigned int *code) +static void i810EmitTexVerified(struct drm_device * dev, volatile unsigned int *code) { drm_i810_private_t *dev_priv = dev->dev_private; int i, j = 0; @@ -619,7 +619,7 @@ static void i810EmitTexVerified(drm_device_t * dev, volatile unsigned int *code) /* Need to do some additional checking when setting the dest buffer. */ -static void i810EmitDestVerified(drm_device_t * dev, +static void i810EmitDestVerified(struct drm_device * dev, volatile unsigned int *code) { drm_i810_private_t *dev_priv = dev->dev_private; @@ -654,7 +654,7 @@ static void i810EmitDestVerified(drm_device_t * dev, ADVANCE_LP_RING(); } -static void i810EmitState(drm_device_t * dev) +static void i810EmitState(struct drm_device * dev) { drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; @@ -685,7 +685,7 @@ static void i810EmitState(drm_device_t * dev) /* need to verify */ -static void i810_dma_dispatch_clear(drm_device_t * dev, int flags, +static void i810_dma_dispatch_clear(struct drm_device * dev, int flags, unsigned int clear_color, unsigned int clear_zval) { @@ -760,7 +760,7 @@ static void i810_dma_dispatch_clear(drm_device_t * dev, int flags, } } -static void i810_dma_dispatch_swap(drm_device_t * dev) +static void i810_dma_dispatch_swap(struct drm_device * dev) { drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; @@ -806,7 +806,7 @@ static void i810_dma_dispatch_swap(drm_device_t * dev) } } -static void i810_dma_dispatch_vertex(drm_device_t * dev, +static void i810_dma_dispatch_vertex(struct drm_device * dev, drm_buf_t * buf, int discard, int used) { drm_i810_private_t *dev_priv = dev->dev_private; @@ -886,7 +886,7 @@ static void i810_dma_dispatch_vertex(drm_device_t * dev, } } -static void i810_dma_dispatch_flip(drm_device_t * dev) +static void i810_dma_dispatch_flip(struct drm_device * dev) { drm_i810_private_t *dev_priv = dev->dev_private; int pitch = dev_priv->pitch; @@ -933,7 +933,7 @@ static void i810_dma_dispatch_flip(drm_device_t * dev) } -static void i810_dma_quiescent(drm_device_t * dev) +static void i810_dma_quiescent(struct drm_device * dev) { drm_i810_private_t *dev_priv = dev->dev_private; RING_LOCALS; @@ -952,10 +952,10 @@ static void i810_dma_quiescent(drm_device_t * dev) i810_wait_ring(dev, dev_priv->ring.Size - 8); } -static int i810_flush_queue(drm_device_t * dev) +static int i810_flush_queue(struct drm_device * dev) { drm_i810_private_t *dev_priv = dev->dev_private; - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int i, ret = 0; RING_LOCALS; @@ -987,9 +987,9 @@ static int i810_flush_queue(drm_device_t * dev) } /* Must be called with the lock held */ -static void i810_reclaim_buffers(drm_device_t *dev, struct file *filp) +static void i810_reclaim_buffers(struct drm_device *dev, struct file *filp) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int i; if (!dma) @@ -1020,8 +1020,8 @@ static void i810_reclaim_buffers(drm_device_t *dev, struct file *filp) static int i810_flush_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; LOCK_TEST_WITH_RETURN(dev, filp); @@ -1032,9 +1032,9 @@ static int i810_flush_ioctl(struct inode *inode, struct file *filp, static int i810_dma_vertex(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_device_dma_t *dma = dev->dma; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_device_dma *dma = dev->dma; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; u32 *hw_status = dev_priv->hw_status_page; drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) @@ -1068,8 +1068,8 @@ static int i810_dma_vertex(struct inode *inode, struct file *filp, static int i810_clear_bufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; drm_i810_clear_t clear; if (copy_from_user @@ -1091,8 +1091,8 @@ static int i810_clear_bufs(struct inode *inode, struct file *filp, static int i810_swap_bufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; DRM_DEBUG("i810_swap_bufs\n"); @@ -1105,8 +1105,8 @@ static int i810_swap_bufs(struct inode *inode, struct file *filp, static int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; u32 *hw_status = dev_priv->hw_status_page; drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) @@ -1119,8 +1119,8 @@ static int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd, static int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; int retcode = 0; drm_i810_dma_t d; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; @@ -1161,7 +1161,7 @@ static int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd, return 0; } -static void i810_dma_dispatch_mc(drm_device_t * dev, drm_buf_t * buf, int used, +static void i810_dma_dispatch_mc(struct drm_device * dev, drm_buf_t * buf, int used, unsigned int last_render) { drm_i810_private_t *dev_priv = dev->dev_private; @@ -1224,9 +1224,9 @@ static void i810_dma_dispatch_mc(drm_device_t * dev, drm_buf_t * buf, int used, static int i810_dma_mc(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_device_dma_t *dma = dev->dma; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_device_dma *dma = dev->dma; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; u32 *hw_status = dev_priv->hw_status_page; drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) @@ -1255,8 +1255,8 @@ static int i810_dma_mc(struct inode *inode, struct file *filp, static int i810_rstatus(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; return (int)(((u32 *) (dev_priv->hw_status_page))[4]); @@ -1265,8 +1265,8 @@ static int i810_rstatus(struct inode *inode, struct file *filp, static int i810_ov0_info(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; drm_i810_overlay_t data; @@ -1281,8 +1281,8 @@ static int i810_ov0_info(struct inode *inode, struct file *filp, static int i810_fstatus(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; LOCK_TEST_WITH_RETURN(dev, filp); @@ -1292,8 +1292,8 @@ static int i810_fstatus(struct inode *inode, struct file *filp, static int i810_ov0_flip(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; LOCK_TEST_WITH_RETURN(dev, filp); @@ -1305,7 +1305,7 @@ static int i810_ov0_flip(struct inode *inode, struct file *filp, /* Not sure why this isn't set all the time: */ -static void i810_do_init_pageflip(drm_device_t * dev) +static void i810_do_init_pageflip(struct drm_device * dev) { drm_i810_private_t *dev_priv = dev->dev_private; @@ -1315,7 +1315,7 @@ static void i810_do_init_pageflip(drm_device_t * dev) dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; } -static int i810_do_cleanup_pageflip(drm_device_t * dev) +static int i810_do_cleanup_pageflip(struct drm_device * dev) { drm_i810_private_t *dev_priv = dev->dev_private; @@ -1330,8 +1330,8 @@ static int i810_do_cleanup_pageflip(drm_device_t * dev) static int i810_flip_bufs(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; drm_i810_private_t *dev_priv = dev->dev_private; DRM_DEBUG("%s\n", __FUNCTION__); @@ -1345,7 +1345,7 @@ static int i810_flip_bufs(struct inode *inode, struct file *filp, return 0; } -int i810_driver_load(drm_device_t *dev, unsigned long flags) +int i810_driver_load(struct drm_device *dev, unsigned long flags) { /* i810 has 4 more counters */ dev->counters += 4; @@ -1357,12 +1357,12 @@ int i810_driver_load(drm_device_t *dev, unsigned long flags) return 0; } -void i810_driver_lastclose(drm_device_t * dev) +void i810_driver_lastclose(struct drm_device * dev) { i810_dma_cleanup(dev); } -void i810_driver_preclose(drm_device_t * dev, DRMFILE filp) +void i810_driver_preclose(struct drm_device * dev, DRMFILE filp) { if (dev->dev_private) { drm_i810_private_t *dev_priv = dev->dev_private; @@ -1372,18 +1372,18 @@ void i810_driver_preclose(drm_device_t * dev, DRMFILE filp) } } -void i810_driver_reclaim_buffers_locked(drm_device_t * dev, struct file *filp) +void i810_driver_reclaim_buffers_locked(struct drm_device * dev, struct file *filp) { i810_reclaim_buffers(dev, filp); } -int i810_driver_dma_quiescent(drm_device_t * dev) +int i810_driver_dma_quiescent(struct drm_device * dev) { i810_dma_quiescent(dev); return 0; } -drm_ioctl_desc_t i810_ioctls[] = { +struct drm_ioctl_desc i810_ioctls[] = { [DRM_IOCTL_NR(DRM_I810_INIT)] = {i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, [DRM_IOCTL_NR(DRM_I810_VERTEX)] = {i810_dma_vertex, DRM_AUTH}, [DRM_IOCTL_NR(DRM_I810_CLEAR)] = {i810_clear_bufs, DRM_AUTH}, @@ -1414,7 +1414,7 @@ int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls); * \returns * A value of 1 is always retured to indictate every i810 is AGP. */ -int i810_driver_device_is_agp(drm_device_t * dev) +int i810_driver_device_is_agp(struct drm_device * dev) { return 1; } diff --git a/linux-core/i810_drv.h b/linux-core/i810_drv.h index dbe9d708..06eac774 100644 --- a/linux-core/i810_drv.h +++ b/linux-core/i810_drv.h @@ -115,17 +115,17 @@ typedef struct drm_i810_private { } drm_i810_private_t; /* i810_dma.c */ -extern int i810_driver_dma_quiescent(drm_device_t * dev); -extern void i810_driver_reclaim_buffers_locked(drm_device_t * dev, +extern int i810_driver_dma_quiescent(struct drm_device * dev); +extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev, struct file *filp); extern int i810_driver_load(struct drm_device *, unsigned long flags); -extern void i810_driver_lastclose(drm_device_t * dev); -extern void i810_driver_preclose(drm_device_t * dev, DRMFILE filp); -extern void i810_driver_reclaim_buffers_locked(drm_device_t * dev, +extern void i810_driver_lastclose(struct drm_device * dev); +extern void i810_driver_preclose(struct drm_device * dev, DRMFILE filp); +extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev, struct file *filp); -extern int i810_driver_device_is_agp(drm_device_t * dev); +extern int i810_driver_device_is_agp(struct drm_device * dev); -extern drm_ioctl_desc_t i810_ioctls[]; +extern struct drm_ioctl_desc i810_ioctls[]; extern int i810_max_ioctl; #define I810_BASE(reg) ((unsigned long) \ diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index 2850fb94..6aeccfcb 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -33,7 +33,7 @@ #include "i915_drm.h" #include "i915_drv.h" -drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t * dev) +drm_ttm_backend_t *i915_create_ttm_backend_entry(struct drm_device * dev) { return drm_agp_init_ttm(dev); } @@ -47,7 +47,7 @@ int i915_fence_types(drm_buffer_object_t *bo, uint32_t * type) return 0; } -int i915_invalidate_caches(drm_device_t * dev, uint64_t flags) +int i915_invalidate_caches(struct drm_device * dev, uint64_t flags) { /* * FIXME: Only emit once per batchbuffer submission. @@ -63,7 +63,7 @@ int i915_invalidate_caches(drm_device_t * dev, uint64_t flags) return i915_emit_mi_flush(dev, flush_cmd); } -int i915_init_mem_type(drm_device_t * dev, uint32_t type, +int i915_init_mem_type(struct drm_device * dev, uint32_t type, drm_mem_type_manager_t * man) { switch (type) { @@ -116,7 +116,7 @@ uint32_t i915_evict_mask(drm_buffer_object_t *bo) } } -static void i915_emit_copy_blit(drm_device_t * dev, +static void i915_emit_copy_blit(struct drm_device * dev, uint32_t src_offset, uint32_t dst_offset, uint32_t pages, int direction) @@ -183,7 +183,7 @@ static int i915_move_blit(drm_buffer_object_t * bo, static int i915_move_flip(drm_buffer_object_t * bo, int evict, int no_wait, drm_bo_mem_reg_t * new_mem) { - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; drm_bo_mem_reg_t tmp_mem; int ret; diff --git a/linux-core/i915_fence.c b/linux-core/i915_fence.c index 00873485..a71e5dac 100644 --- a/linux-core/i915_fence.c +++ b/linux-core/i915_fence.c @@ -38,7 +38,7 @@ * Implements an intel sync flush operation. */ -static void i915_perform_flush(drm_device_t * dev) +static void i915_perform_flush(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_fence_manager_t *fm = &dev->fm; @@ -109,7 +109,7 @@ static void i915_perform_flush(drm_device_t * dev) } -void i915_poke_flush(drm_device_t * dev, uint32_t class) +void i915_poke_flush(struct drm_device * dev, uint32_t class) { drm_fence_manager_t *fm = &dev->fm; unsigned long flags; @@ -119,7 +119,7 @@ void i915_poke_flush(drm_device_t * dev, uint32_t class) write_unlock_irqrestore(&fm->lock, flags); } -int i915_fence_emit_sequence(drm_device_t * dev, uint32_t class, uint32_t flags, +int i915_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t flags, uint32_t * sequence, uint32_t * native_type) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -135,7 +135,7 @@ int i915_fence_emit_sequence(drm_device_t * dev, uint32_t class, uint32_t flags, return 0; } -void i915_fence_handler(drm_device_t * dev) +void i915_fence_handler(struct drm_device * dev) { drm_fence_manager_t *fm = &dev->fm; @@ -144,7 +144,7 @@ void i915_fence_handler(drm_device_t * dev) write_unlock(&fm->lock); } -int i915_fence_has_irq(drm_device_t *dev, uint32_t class, uint32_t flags) +int i915_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags) { /* * We have an irq that tells us when we have a new breadcrumb. diff --git a/linux-core/mga_drv.c b/linux-core/mga_drv.c index ef6f1e44..1eb6d9e6 100644 --- a/linux-core/mga_drv.c +++ b/linux-core/mga_drv.c @@ -36,7 +36,7 @@ #include "drm_pciids.h" -static int mga_driver_device_is_agp(drm_device_t * dev); +static int mga_driver_device_is_agp(struct drm_device * dev); static struct pci_device_id pciidlist[] = { mga_PCI_IDS @@ -127,7 +127,7 @@ MODULE_LICENSE("GPL and additional rights"); * \returns * If the device is a PCI G450, zero is returned. Otherwise 2 is returned. */ -static int mga_driver_device_is_agp(drm_device_t * dev) +static int mga_driver_device_is_agp(struct drm_device * dev) { const struct pci_dev * const pdev = dev->pdev; diff --git a/linux-core/nouveau_drv.c b/linux-core/nouveau_drv.c index ac030d89..6c73b0d3 100644 --- a/linux-core/nouveau_drv.c +++ b/linux-core/nouveau_drv.c @@ -32,7 +32,7 @@ static struct pci_device_id pciidlist[] = { nouveau_PCI_IDS }; -extern drm_ioctl_desc_t nouveau_ioctls[]; +extern struct drm_ioctl_desc nouveau_ioctls[]; extern int nouveau_max_ioctl; static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); diff --git a/linux-core/sis_drv.c b/linux-core/sis_drv.c index 114ec8f9..b4c3f93b 100644 --- a/linux-core/sis_drv.c +++ b/linux-core/sis_drv.c @@ -36,7 +36,7 @@ static struct pci_device_id pciidlist[] = { }; -static int sis_driver_load(drm_device_t *dev, unsigned long chipset) +static int sis_driver_load(struct drm_device *dev, unsigned long chipset) { drm_sis_private_t *dev_priv; int ret; @@ -55,7 +55,7 @@ static int sis_driver_load(drm_device_t *dev, unsigned long chipset) return ret; } -static int sis_driver_unload(drm_device_t *dev) +static int sis_driver_unload(struct drm_device *dev) { drm_sis_private_t *dev_priv = dev->dev_private; diff --git a/linux-core/sis_mm.c b/linux-core/sis_mm.c index 21c1f2d7..306ed453 100644 --- a/linux-core/sis_mm.c +++ b/linux-core/sis_mm.c @@ -122,7 +122,7 @@ static int sis_fb_init(DRM_IOCTL_ARGS) return 0; } -static int sis_drm_alloc(drm_device_t * dev, drm_file_t * priv, +static int sis_drm_alloc(struct drm_device * dev, struct drm_file * priv, unsigned long data, int pool) { drm_sis_private_t *dev_priv = dev->dev_private; @@ -228,9 +228,9 @@ static int sis_ioctl_agp_alloc(DRM_IOCTL_ARGS) return sis_drm_alloc(dev, priv, data, AGP_TYPE); } -static drm_local_map_t *sis_reg_init(drm_device_t *dev) +static drm_local_map_t *sis_reg_init(struct drm_device *dev) { - drm_map_list_t *entry; + struct drm_map_list *entry; drm_local_map_t *map; list_for_each_entry(entry, &dev->maplist, head) { @@ -245,7 +245,7 @@ static drm_local_map_t *sis_reg_init(drm_device_t *dev) } int -sis_idle(drm_device_t *dev) +sis_idle(struct drm_device *dev) { drm_sis_private_t *dev_priv = dev->dev_private; uint32_t idle_reg; @@ -314,10 +314,10 @@ void sis_lastclose(struct drm_device *dev) mutex_unlock(&dev->struct_mutex); } -void sis_reclaim_buffers_locked(drm_device_t * dev, struct file *filp) +void sis_reclaim_buffers_locked(struct drm_device * dev, struct file *filp) { drm_sis_private_t *dev_priv = dev->dev_private; - drm_file_t *priv = filp->private_data; + struct drm_file *priv = filp->private_data; mutex_lock(&dev->struct_mutex); if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)priv)) { @@ -334,7 +334,7 @@ void sis_reclaim_buffers_locked(drm_device_t * dev, struct file *filp) return; } -drm_ioctl_desc_t sis_ioctls[] = { +struct drm_ioctl_desc sis_ioctls[] = { [DRM_IOCTL_NR(DRM_SIS_FB_ALLOC)] = {sis_fb_alloc, DRM_AUTH}, [DRM_IOCTL_NR(DRM_SIS_FB_FREE)] = {sis_drm_free, DRM_AUTH}, [DRM_IOCTL_NR(DRM_SIS_AGP_INIT)] = diff --git a/linux-core/via_buffer.c b/linux-core/via_buffer.c index 86883998..e452611d 100644 --- a/linux-core/via_buffer.c +++ b/linux-core/via_buffer.c @@ -32,7 +32,7 @@ #include "via_drm.h" #include "via_drv.h" -drm_ttm_backend_t *via_create_ttm_backend_entry(drm_device_t * dev) +drm_ttm_backend_t *via_create_ttm_backend_entry(struct drm_device * dev) { return drm_agp_init_ttm(dev); } @@ -43,7 +43,7 @@ int via_fence_types(drm_buffer_object_t *bo, uint32_t * type) return 0; } -int via_invalidate_caches(drm_device_t * dev, uint64_t flags) +int via_invalidate_caches(struct drm_device * dev, uint64_t flags) { /* * FIXME: Invalidate texture caches here. @@ -53,7 +53,7 @@ int via_invalidate_caches(drm_device_t * dev, uint64_t flags) } -static int via_vram_info(drm_device_t *dev, +static int via_vram_info(struct drm_device *dev, unsigned long *offset, unsigned long *size) { @@ -81,7 +81,7 @@ static int via_vram_info(drm_device_t *dev, return 0; } -int via_init_mem_type(drm_device_t * dev, uint32_t type, +int via_init_mem_type(struct drm_device * dev, uint32_t type, drm_mem_type_manager_t * man) { switch (type) { diff --git a/linux-core/via_dmablit.c b/linux-core/via_dmablit.c index 2f508374..5108c867 100644 --- a/linux-core/via_dmablit.c +++ b/linux-core/via_dmablit.c @@ -206,7 +206,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg) */ static void -via_fire_dmablit(drm_device_t *dev, drm_via_sg_info_t *vsg, int engine) +via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine) { drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; @@ -286,7 +286,7 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg) } static void -via_abort_dmablit(drm_device_t *dev, int engine) +via_abort_dmablit(struct drm_device *dev, int engine) { drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; @@ -294,7 +294,7 @@ via_abort_dmablit(drm_device_t *dev, int engine) } static void -via_dmablit_engine_off(drm_device_t *dev, int engine) +via_dmablit_engine_off(struct drm_device *dev, int engine) { drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; @@ -311,7 +311,7 @@ via_dmablit_engine_off(drm_device_t *dev, int engine) */ void -via_dmablit_handler(drm_device_t *dev, int engine, int from_irq) +via_dmablit_handler(struct drm_device *dev, int engine, int from_irq) { drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; drm_via_blitq_t *blitq = dev_priv->blit_queues + engine; @@ -432,7 +432,7 @@ via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_que */ static int -via_dmablit_sync(drm_device_t *dev, uint32_t handle, int engine) +via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine) { drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; @@ -465,7 +465,7 @@ static void via_dmablit_timer(unsigned long data) { drm_via_blitq_t *blitq = (drm_via_blitq_t *) data; - drm_device_t *dev = blitq->dev; + struct drm_device *dev = blitq->dev; int engine = (int) (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues); @@ -509,7 +509,7 @@ via_dmablit_workqueue(struct work_struct *work) #else drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq); #endif - drm_device_t *dev = blitq->dev; + struct drm_device *dev = blitq->dev; unsigned long irqsave; drm_via_sg_info_t *cur_sg; int cur_released; @@ -552,7 +552,7 @@ via_dmablit_workqueue(struct work_struct *work) void -via_init_dmablit(drm_device_t *dev) +via_init_dmablit(struct drm_device *dev) { int i,j; drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; @@ -594,7 +594,7 @@ via_init_dmablit(drm_device_t *dev) static int -via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) +via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) { int draw = xfer->to_fb; int ret = 0; @@ -740,7 +740,7 @@ via_dmablit_release_slot(drm_via_blitq_t *blitq) static int -via_dmablit(drm_device_t *dev, drm_via_dmablit_t *xfer) +via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer) { drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; drm_via_sg_info_t *vsg; diff --git a/linux-core/via_dmablit.h b/linux-core/via_dmablit.h index f6ae03ec..726ad25d 100644 --- a/linux-core/via_dmablit.h +++ b/linux-core/via_dmablit.h @@ -59,7 +59,7 @@ typedef struct _drm_via_sg_info { } drm_via_sg_info_t; typedef struct _drm_via_blitq { - drm_device_t *dev; + struct drm_device *dev; uint32_t cur_blit_handle; uint32_t done_blit_handle; unsigned serviced; diff --git a/linux-core/via_fence.c b/linux-core/via_fence.c index 02249939..ce4366d2 100644 --- a/linux-core/via_fence.c +++ b/linux-core/via_fence.c @@ -39,7 +39,7 @@ */ -static uint32_t via_perform_flush(drm_device_t *dev, uint32_t class) +static uint32_t via_perform_flush(struct drm_device *dev, uint32_t class) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; drm_fence_class_manager_t *fc = &dev->fm.class[class]; @@ -113,7 +113,7 @@ static uint32_t via_perform_flush(drm_device_t *dev, uint32_t class) * Emit a fence sequence. */ -int via_fence_emit_sequence(drm_device_t * dev, uint32_t class, uint32_t flags, +int via_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t flags, uint32_t * sequence, uint32_t * native_type) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; @@ -152,7 +152,7 @@ int via_fence_emit_sequence(drm_device_t * dev, uint32_t class, uint32_t flags, * Manual poll (from the fence manager). */ -void via_poke_flush(drm_device_t * dev, uint32_t class) +void via_poke_flush(struct drm_device * dev, uint32_t class) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; drm_fence_manager_t *fm = &dev->fm; @@ -200,7 +200,7 @@ int via_fence_has_irq(struct drm_device * dev, uint32_t class, void via_fence_timer(unsigned long data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; drm_fence_manager_t *fm = &dev->fm; uint32_t pending_flush; diff --git a/linux-core/via_mm.c b/linux-core/via_mm.c index d97269f5..48f5fd09 100644 --- a/linux-core/via_mm.c +++ b/linux-core/via_mm.c @@ -190,10 +190,10 @@ int via_mem_free(DRM_IOCTL_ARGS) } -void via_reclaim_buffers_locked(drm_device_t * dev, struct file *filp) +void via_reclaim_buffers_locked(struct drm_device * dev, struct file *filp) { drm_via_private_t *dev_priv = dev->dev_private; - drm_file_t *priv = filp->private_data; + struct drm_file *priv = filp->private_data; mutex_lock(&dev->struct_mutex); if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)priv)) { -- cgit v1.2.3 From 6dce9e07352e14d2e03d26b8a64a40e111ecab2b Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 16 Jul 2007 12:48:44 +1000 Subject: drm: remove hashtab/sman and object typedefs --- linux-core/drmP.h | 2 +- linux-core/drm_bo.c | 12 +++++----- linux-core/drm_bufs.c | 2 +- linux-core/drm_fence.c | 8 +++---- linux-core/drm_fops.c | 8 +++---- linux-core/drm_hashtab.c | 34 +++++++++++++------------- linux-core/drm_hashtab.h | 24 +++++++++---------- linux-core/drm_object.c | 62 ++++++++++++++++++++++++------------------------ linux-core/drm_objects.h | 42 ++++++++++++++++---------------- linux-core/drm_sman.c | 56 +++++++++++++++++++++---------------------- linux-core/drm_sman.h | 50 +++++++++++++++++++------------------- linux-core/sis_mm.c | 4 ++-- linux-core/via_mm.c | 2 +- 13 files changed, 153 insertions(+), 153 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drmP.h b/linux-core/drmP.h index df7481fe..87a194af 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -418,7 +418,7 @@ struct drm_file { struct list_head refd_objects; struct list_head user_objects; - drm_open_hash_t refd_object_hash[_DRM_NO_REF_TYPES]; + struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES]; void *driver_priv; }; diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 10d928ea..30664632 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -505,7 +505,7 @@ void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo) } } -static void drm_bo_base_deref_locked(struct drm_file * priv, drm_user_object_t * uo) +static void drm_bo_base_deref_locked(struct drm_file * priv, struct drm_user_object * uo) { drm_buffer_object_t *bo = drm_user_object_entry(uo, drm_buffer_object_t, base); @@ -924,7 +924,7 @@ static int drm_bo_new_mask(drm_buffer_object_t * bo, drm_buffer_object_t *drm_lookup_buffer_object(struct drm_file * priv, uint32_t handle, int check_owner) { - drm_user_object_t *uo; + struct drm_user_object *uo; drm_buffer_object_t *bo; uo = drm_lookup_user_object(priv, handle); @@ -1187,7 +1187,7 @@ static int drm_buffer_object_unmap(struct drm_file * priv, uint32_t handle) { struct drm_device *dev = priv->head->dev; drm_buffer_object_t *bo; - drm_ref_object_t *ro; + struct drm_ref_object *ro; int ret = 0; mutex_lock(&dev->struct_mutex); @@ -1216,7 +1216,7 @@ static int drm_buffer_object_unmap(struct drm_file * priv, uint32_t handle) */ static void drm_buffer_user_object_unmap(struct drm_file * priv, - drm_user_object_t * uo, + struct drm_user_object * uo, drm_ref_t action) { drm_buffer_object_t *bo = @@ -1811,7 +1811,7 @@ int drm_bo_destroy_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; struct drm_bo_handle_arg arg; - drm_user_object_t *uo; + struct drm_user_object *uo; int ret = 0; if (!dev->bm.initialized) { @@ -1879,7 +1879,7 @@ int drm_bo_reference_ioctl(DRM_IOCTL_ARGS) struct drm_bo_reference_info_arg arg; struct drm_bo_handle_arg *req = &arg.d.req; struct drm_bo_info_rep *rep = &arg.d.rep; - drm_user_object_t *uo; + struct drm_user_object *uo; int ret; if (!dev->bm.initialized) { diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c index 75eeafdd..f766597b 100644 --- a/linux-core/drm_bufs.c +++ b/linux-core/drm_bufs.c @@ -63,7 +63,7 @@ struct drm_map_list *drm_find_matching_map(struct drm_device *dev, drm_local_map } EXPORT_SYMBOL(drm_find_matching_map); -static int drm_map_handle(struct drm_device *dev, drm_hash_item_t *hash, +static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash, unsigned long user_token, int hashed_handle) { int use_hashed_handle; diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index 4f24b4b5..f925621a 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -180,7 +180,7 @@ void drm_fence_reference_unlocked(struct drm_fence_object **dst, } -static void drm_fence_object_destroy(struct drm_file *priv, drm_user_object_t * base) +static void drm_fence_object_destroy(struct drm_file *priv, struct drm_user_object * base) { drm_fence_object_t *fence = drm_user_object_entry(base, drm_fence_object_t, base); @@ -551,7 +551,7 @@ void drm_fence_manager_takedown(struct drm_device * dev) drm_fence_object_t *drm_lookup_fence_object(struct drm_file * priv, uint32_t handle) { struct drm_device *dev = priv->head->dev; - drm_user_object_t *uo; + struct drm_user_object *uo; drm_fence_object_t *fence; mutex_lock(&dev->struct_mutex); @@ -619,7 +619,7 @@ int drm_fence_destroy_ioctl(DRM_IOCTL_ARGS) int ret; drm_fence_manager_t *fm = &dev->fm; struct drm_fence_arg arg; - drm_user_object_t *uo; + struct drm_user_object *uo; ret = 0; if (!fm->initialized) { @@ -648,7 +648,7 @@ int drm_fence_reference_ioctl(DRM_IOCTL_ARGS) drm_fence_manager_t *fm = &dev->fm; struct drm_fence_arg arg; drm_fence_object_t *fence; - drm_user_object_t *uo; + struct drm_user_object *uo; unsigned long flags; ret = 0; diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c index 5ea3f9cf..98e581fe 100644 --- a/linux-core/drm_fops.c +++ b/linux-core/drm_fops.c @@ -337,8 +337,8 @@ static void drm_object_release(struct file *filp) { struct drm_file *priv = filp->private_data; struct list_head *head; - drm_user_object_t *user_object; - drm_ref_object_t *ref_object; + struct drm_user_object *user_object; + struct drm_ref_object *ref_object; int i; /* @@ -351,7 +351,7 @@ static void drm_object_release(struct file *filp) { head = &priv->refd_objects; while (head->next != head) { - ref_object = list_entry(head->next, drm_ref_object_t, list); + ref_object = list_entry(head->next, struct drm_ref_object, list); drm_remove_ref_object(priv, ref_object); head = &priv->refd_objects; } @@ -362,7 +362,7 @@ static void drm_object_release(struct file *filp) { head = &priv->user_objects; while (head->next != head) { - user_object = list_entry(head->next, drm_user_object_t, list); + user_object = list_entry(head->next, struct drm_user_object, list); drm_remove_user_object(priv, user_object); head = &priv->user_objects; } diff --git a/linux-core/drm_hashtab.c b/linux-core/drm_hashtab.c index 6f17e114..a8ec8468 100644 --- a/linux-core/drm_hashtab.c +++ b/linux-core/drm_hashtab.c @@ -36,7 +36,7 @@ #include "drm_hashtab.h" #include -int drm_ht_create(drm_open_hash_t * ht, unsigned int order) +int drm_ht_create(struct drm_open_hash * ht, unsigned int order) { unsigned int i; @@ -63,9 +63,9 @@ int drm_ht_create(drm_open_hash_t * ht, unsigned int order) return 0; } -void drm_ht_verbose_list(drm_open_hash_t * ht, unsigned long key) +void drm_ht_verbose_list(struct drm_open_hash * ht, unsigned long key) { - drm_hash_item_t *entry; + struct drm_hash_item *entry; struct hlist_head *h_list; struct hlist_node *list; unsigned int hashed_key; @@ -75,15 +75,15 @@ void drm_ht_verbose_list(drm_open_hash_t * ht, unsigned long key) DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key); h_list = &ht->table[hashed_key]; hlist_for_each(list, h_list) { - entry = hlist_entry(list, drm_hash_item_t, head); + entry = hlist_entry(list, struct drm_hash_item, head); DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key); } } -static struct hlist_node *drm_ht_find_key(drm_open_hash_t * ht, +static struct hlist_node *drm_ht_find_key(struct drm_open_hash * ht, unsigned long key) { - drm_hash_item_t *entry; + struct drm_hash_item *entry; struct hlist_head *h_list; struct hlist_node *list; unsigned int hashed_key; @@ -91,7 +91,7 @@ static struct hlist_node *drm_ht_find_key(drm_open_hash_t * ht, hashed_key = hash_long(key, ht->order); h_list = &ht->table[hashed_key]; hlist_for_each(list, h_list) { - entry = hlist_entry(list, drm_hash_item_t, head); + entry = hlist_entry(list, struct drm_hash_item, head); if (entry->key == key) return list; if (entry->key > key) @@ -100,9 +100,9 @@ static struct hlist_node *drm_ht_find_key(drm_open_hash_t * ht, return NULL; } -int drm_ht_insert_item(drm_open_hash_t * ht, drm_hash_item_t * item) +int drm_ht_insert_item(struct drm_open_hash * ht, struct drm_hash_item * item) { - drm_hash_item_t *entry; + struct drm_hash_item *entry; struct hlist_head *h_list; struct hlist_node *list, *parent; unsigned int hashed_key; @@ -112,7 +112,7 @@ int drm_ht_insert_item(drm_open_hash_t * ht, drm_hash_item_t * item) h_list = &ht->table[hashed_key]; parent = NULL; hlist_for_each(list, h_list) { - entry = hlist_entry(list, drm_hash_item_t, head); + entry = hlist_entry(list, struct drm_hash_item, head); if (entry->key == key) return -EINVAL; if (entry->key > key) @@ -131,7 +131,7 @@ int drm_ht_insert_item(drm_open_hash_t * ht, drm_hash_item_t * item) * Just insert an item and return any "bits" bit key that hasn't been * used before. */ -int drm_ht_just_insert_please(drm_open_hash_t * ht, drm_hash_item_t * item, +int drm_ht_just_insert_please(struct drm_open_hash * ht, struct drm_hash_item * item, unsigned long seed, int bits, int shift, unsigned long add) { @@ -155,8 +155,8 @@ int drm_ht_just_insert_please(drm_open_hash_t * ht, drm_hash_item_t * item, return 0; } -int drm_ht_find_item(drm_open_hash_t * ht, unsigned long key, - drm_hash_item_t ** item) +int drm_ht_find_item(struct drm_open_hash * ht, unsigned long key, + struct drm_hash_item ** item) { struct hlist_node *list; @@ -164,11 +164,11 @@ int drm_ht_find_item(drm_open_hash_t * ht, unsigned long key, if (!list) return -EINVAL; - *item = hlist_entry(list, drm_hash_item_t, head); + *item = hlist_entry(list, struct drm_hash_item, head); return 0; } -int drm_ht_remove_key(drm_open_hash_t * ht, unsigned long key) +int drm_ht_remove_key(struct drm_open_hash * ht, unsigned long key) { struct hlist_node *list; @@ -181,14 +181,14 @@ int drm_ht_remove_key(drm_open_hash_t * ht, unsigned long key) return -EINVAL; } -int drm_ht_remove_item(drm_open_hash_t * ht, drm_hash_item_t * item) +int drm_ht_remove_item(struct drm_open_hash * ht, struct drm_hash_item * item) { hlist_del_init(&item->head); ht->fill--; return 0; } -void drm_ht_remove(drm_open_hash_t * ht) +void drm_ht_remove(struct drm_open_hash * ht) { if (ht->table) { if (ht->use_vmalloc) diff --git a/linux-core/drm_hashtab.h b/linux-core/drm_hashtab.h index 613091c9..0f137677 100644 --- a/linux-core/drm_hashtab.h +++ b/linux-core/drm_hashtab.h @@ -37,31 +37,31 @@ #define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) -typedef struct drm_hash_item{ +struct drm_hash_item { struct hlist_node head; unsigned long key; -} drm_hash_item_t; +}; -typedef struct drm_open_hash{ +struct drm_open_hash { unsigned int size; unsigned int order; unsigned int fill; struct hlist_head *table; int use_vmalloc; -} drm_open_hash_t; +}; -extern int drm_ht_create(drm_open_hash_t *ht, unsigned int order); -extern int drm_ht_insert_item(drm_open_hash_t *ht, drm_hash_item_t *item); -extern int drm_ht_just_insert_please(drm_open_hash_t *ht, drm_hash_item_t *item, +extern int drm_ht_create(struct drm_open_hash *ht, unsigned int order); +extern int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item); +extern int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item, unsigned long seed, int bits, int shift, unsigned long add); -extern int drm_ht_find_item(drm_open_hash_t *ht, unsigned long key, drm_hash_item_t **item); +extern int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item); -extern void drm_ht_verbose_list(drm_open_hash_t *ht, unsigned long key); -extern int drm_ht_remove_key(drm_open_hash_t *ht, unsigned long key); -extern int drm_ht_remove_item(drm_open_hash_t *ht, drm_hash_item_t *item); -extern void drm_ht_remove(drm_open_hash_t *ht); +extern void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key); +extern int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key); +extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item); +extern void drm_ht_remove(struct drm_open_hash *ht); #endif diff --git a/linux-core/drm_object.c b/linux-core/drm_object.c index 3c60605c..00627725 100644 --- a/linux-core/drm_object.c +++ b/linux-core/drm_object.c @@ -30,7 +30,7 @@ #include "drmP.h" -int drm_add_user_object(struct drm_file * priv, drm_user_object_t * item, +int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item, int shareable) { struct drm_device *dev = priv->head->dev; @@ -51,12 +51,12 @@ int drm_add_user_object(struct drm_file * priv, drm_user_object_t * item, return 0; } -drm_user_object_t *drm_lookup_user_object(struct drm_file * priv, uint32_t key) +struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t key) { struct drm_device *dev = priv->head->dev; - drm_hash_item_t *hash; + struct drm_hash_item *hash; int ret; - drm_user_object_t *item; + struct drm_user_object *item; DRM_ASSERT_LOCKED(&dev->struct_mutex); @@ -64,10 +64,10 @@ drm_user_object_t *drm_lookup_user_object(struct drm_file * priv, uint32_t key) if (ret) { return NULL; } - item = drm_hash_entry(hash, drm_user_object_t, hash); + item = drm_hash_entry(hash, struct drm_user_object, hash); if (priv != item->owner) { - drm_open_hash_t *ht = &priv->refd_object_hash[_DRM_REF_USE]; + struct drm_open_hash *ht = &priv->refd_object_hash[_DRM_REF_USE]; ret = drm_ht_find_item(ht, (unsigned long)item, &hash); if (ret) { DRM_ERROR("Object not registered for usage\n"); @@ -77,7 +77,7 @@ drm_user_object_t *drm_lookup_user_object(struct drm_file * priv, uint32_t key) return item; } -static void drm_deref_user_object(struct drm_file * priv, drm_user_object_t * item) +static void drm_deref_user_object(struct drm_file * priv, struct drm_user_object * item) { struct drm_device *dev = priv->head->dev; int ret; @@ -90,7 +90,7 @@ static void drm_deref_user_object(struct drm_file * priv, drm_user_object_t * it } } -int drm_remove_user_object(struct drm_file * priv, drm_user_object_t * item) +int drm_remove_user_object(struct drm_file * priv, struct drm_user_object * item) { DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); @@ -105,7 +105,7 @@ int drm_remove_user_object(struct drm_file * priv, drm_user_object_t * item) return 0; } -static int drm_object_ref_action(struct drm_file * priv, drm_user_object_t * ro, +static int drm_object_ref_action(struct drm_file * priv, struct drm_user_object * ro, drm_ref_t action) { int ret = 0; @@ -124,12 +124,12 @@ static int drm_object_ref_action(struct drm_file * priv, drm_user_object_t * ro, return ret; } -int drm_add_ref_object(struct drm_file * priv, drm_user_object_t * referenced_object, +int drm_add_ref_object(struct drm_file * priv, struct drm_user_object * referenced_object, drm_ref_t ref_action) { int ret = 0; - drm_ref_object_t *item; - drm_open_hash_t *ht = &priv->refd_object_hash[ref_action]; + struct drm_ref_object *item; + struct drm_open_hash *ht = &priv->refd_object_hash[ref_action]; DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); if (!referenced_object->shareable && priv != referenced_object->owner) { @@ -181,11 +181,11 @@ int drm_add_ref_object(struct drm_file * priv, drm_user_object_t * referenced_ob return ret; } -drm_ref_object_t *drm_lookup_ref_object(struct drm_file * priv, - drm_user_object_t * referenced_object, +struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv, + struct drm_user_object * referenced_object, drm_ref_t ref_action) { - drm_hash_item_t *hash; + struct drm_hash_item *hash; int ret; DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); @@ -194,31 +194,31 @@ drm_ref_object_t *drm_lookup_ref_object(struct drm_file * priv, if (ret) return NULL; - return drm_hash_entry(hash, drm_ref_object_t, hash); + return drm_hash_entry(hash, struct drm_ref_object, hash); } static void drm_remove_other_references(struct drm_file * priv, - drm_user_object_t * ro) + struct drm_user_object * ro) { int i; - drm_open_hash_t *ht; - drm_hash_item_t *hash; - drm_ref_object_t *item; + struct drm_open_hash *ht; + struct drm_hash_item *hash; + struct drm_ref_object *item; for (i = _DRM_REF_USE + 1; i < _DRM_NO_REF_TYPES; ++i) { ht = &priv->refd_object_hash[i]; while (!drm_ht_find_item(ht, (unsigned long)ro, &hash)) { - item = drm_hash_entry(hash, drm_ref_object_t, hash); + item = drm_hash_entry(hash, struct drm_ref_object, hash); drm_remove_ref_object(priv, item); } } } -void drm_remove_ref_object(struct drm_file * priv, drm_ref_object_t * item) +void drm_remove_ref_object(struct drm_file * priv, struct drm_ref_object * item) { int ret; - drm_user_object_t *user_object = (drm_user_object_t *) item->hash.key; - drm_open_hash_t *ht = &priv->refd_object_hash[item->unref_action]; + struct drm_user_object *user_object = (struct drm_user_object *) item->hash.key; + struct drm_open_hash *ht = &priv->refd_object_hash[item->unref_action]; drm_ref_t unref_action; DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); @@ -245,11 +245,11 @@ void drm_remove_ref_object(struct drm_file * priv, drm_ref_object_t * item) } int drm_user_object_ref(struct drm_file * priv, uint32_t user_token, - drm_object_type_t type, drm_user_object_t ** object) + enum drm_object_type type, struct drm_user_object ** object) { struct drm_device *dev = priv->head->dev; - drm_user_object_t *uo; - drm_hash_item_t *hash; + struct drm_user_object *uo; + struct drm_hash_item *hash; int ret; mutex_lock(&dev->struct_mutex); @@ -258,7 +258,7 @@ int drm_user_object_ref(struct drm_file * priv, uint32_t user_token, DRM_ERROR("Could not find user object to reference.\n"); goto out_err; } - uo = drm_hash_entry(hash, drm_user_object_t, hash); + uo = drm_hash_entry(hash, struct drm_user_object, hash); if (uo->type != type) { ret = -EINVAL; goto out_err; @@ -275,11 +275,11 @@ int drm_user_object_ref(struct drm_file * priv, uint32_t user_token, } int drm_user_object_unref(struct drm_file * priv, uint32_t user_token, - drm_object_type_t type) + enum drm_object_type type) { struct drm_device *dev = priv->head->dev; - drm_user_object_t *uo; - drm_ref_object_t *ro; + struct drm_user_object *uo; + struct drm_ref_object *ro; int ret; mutex_lock(&dev->struct_mutex); diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index cfca5bf0..c4428a7b 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -39,14 +39,14 @@ struct drm_device; #define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) -typedef enum { +enum drm_object_type { drm_fence_type, drm_buffer_type, drm_ttm_type /* * Add other user space object types here. */ -} drm_object_type_t; +}; /* * A user object is a structure that helps the drm give out user handles @@ -55,10 +55,10 @@ typedef enum { * Designed to be accessible using a user space 32-bit handle. */ -typedef struct drm_user_object { - drm_hash_item_t hash; +struct drm_user_object { + struct drm_hash_item hash; struct list_head list; - drm_object_type_t type; + enum drm_object_type type; atomic_t refcount; int shareable; struct drm_file *owner; @@ -68,7 +68,7 @@ typedef struct drm_user_object { void (*unref) (struct drm_file * priv, struct drm_user_object * obj, drm_ref_t unref_action); void (*remove) (struct drm_file * priv, struct drm_user_object * obj); -} drm_user_object_t; +}; /* * A ref object is a structure which is used to @@ -77,24 +77,24 @@ typedef struct drm_user_object { * process exits. Designed to be accessible using a pointer to the _user_ object. */ -typedef struct drm_ref_object { - drm_hash_item_t hash; +struct drm_ref_object { + struct drm_hash_item hash; struct list_head list; atomic_t refcount; drm_ref_t unref_action; -} drm_ref_object_t; +}; /** * Must be called with the struct_mutex held. */ -extern int drm_add_user_object(struct drm_file * priv, drm_user_object_t * item, +extern int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item, int shareable); /** * Must be called with the struct_mutex held. */ -extern drm_user_object_t *drm_lookup_user_object(struct drm_file * priv, +extern struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t key); /* @@ -104,22 +104,22 @@ extern drm_user_object_t *drm_lookup_user_object(struct drm_file * priv, * This function may temporarily release the struct_mutex. */ -extern int drm_remove_user_object(struct drm_file * priv, drm_user_object_t * item); +extern int drm_remove_user_object(struct drm_file * priv, struct drm_user_object * item); /* * Must be called with the struct_mutex held. May temporarily release it. */ extern int drm_add_ref_object(struct drm_file * priv, - drm_user_object_t * referenced_object, + struct drm_user_object * referenced_object, drm_ref_t ref_action); /* * Must be called with the struct_mutex held. */ -drm_ref_object_t *drm_lookup_ref_object(struct drm_file * priv, - drm_user_object_t * referenced_object, +struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv, + struct drm_user_object * referenced_object, drm_ref_t ref_action); /* * Must be called with the struct_mutex held. @@ -128,19 +128,19 @@ drm_ref_object_t *drm_lookup_ref_object(struct drm_file * priv, * This function may temporarily release the struct_mutex. */ -extern void drm_remove_ref_object(struct drm_file * priv, drm_ref_object_t * item); +extern void drm_remove_ref_object(struct drm_file * priv, struct drm_ref_object * item); extern int drm_user_object_ref(struct drm_file * priv, uint32_t user_token, - drm_object_type_t type, - drm_user_object_t ** object); + enum drm_object_type type, + struct drm_user_object ** object); extern int drm_user_object_unref(struct drm_file * priv, uint32_t user_token, - drm_object_type_t type); + enum drm_object_type type); /*************************************************** * Fence objects. (drm_fence.c) */ typedef struct drm_fence_object { - drm_user_object_t base; + struct drm_user_object base; struct drm_device *dev; atomic_t usage; @@ -328,7 +328,7 @@ typedef struct drm_bo_mem_reg { typedef struct drm_buffer_object { struct drm_device *dev; - drm_user_object_t base; + struct drm_user_object base; /* * If there is a possibility that the usage variable is zero, diff --git a/linux-core/drm_sman.c b/linux-core/drm_sman.c index 8e4bfbd8..ece80bed 100644 --- a/linux-core/drm_sman.c +++ b/linux-core/drm_sman.c @@ -39,12 +39,12 @@ #include "drm_sman.h" typedef struct drm_owner_item { - drm_hash_item_t owner_hash; + struct drm_hash_item owner_hash; struct list_head sman_list; struct list_head mem_blocks; } drm_owner_item_t; -void drm_sman_takedown(drm_sman_t * sman) +void drm_sman_takedown(struct drm_sman * sman) { drm_ht_remove(&sman->user_hash_tab); drm_ht_remove(&sman->owner_hash_tab); @@ -56,12 +56,12 @@ void drm_sman_takedown(drm_sman_t * sman) EXPORT_SYMBOL(drm_sman_takedown); int -drm_sman_init(drm_sman_t * sman, unsigned int num_managers, +drm_sman_init(struct drm_sman * sman, unsigned int num_managers, unsigned int user_order, unsigned int owner_order) { int ret = 0; - sman->mm = (drm_sman_mm_t *) drm_calloc(num_managers, sizeof(*sman->mm), + sman->mm = (struct drm_sman_mm *) drm_calloc(num_managers, sizeof(*sman->mm), DRM_MEM_MM); if (!sman->mm) { ret = -ENOMEM; @@ -120,10 +120,10 @@ static unsigned long drm_sman_mm_offset(void *private, void *ref) } int -drm_sman_set_range(drm_sman_t * sman, unsigned int manager, +drm_sman_set_range(struct drm_sman * sman, unsigned int manager, unsigned long start, unsigned long size) { - drm_sman_mm_t *sman_mm; + struct drm_sman_mm *sman_mm; struct drm_mm *mm; int ret; @@ -153,8 +153,8 @@ drm_sman_set_range(drm_sman_t * sman, unsigned int manager, EXPORT_SYMBOL(drm_sman_set_range); int -drm_sman_set_manager(drm_sman_t * sman, unsigned int manager, - drm_sman_mm_t * allocator) +drm_sman_set_manager(struct drm_sman * sman, unsigned int manager, + struct drm_sman_mm * allocator) { BUG_ON(manager >= sman->num_managers); sman->mm[manager] = *allocator; @@ -163,11 +163,11 @@ drm_sman_set_manager(drm_sman_t * sman, unsigned int manager, } EXPORT_SYMBOL(drm_sman_set_manager); -static drm_owner_item_t *drm_sman_get_owner_item(drm_sman_t * sman, +static drm_owner_item_t *drm_sman_get_owner_item(struct drm_sman * sman, unsigned long owner) { int ret; - drm_hash_item_t *owner_hash_item; + struct drm_hash_item *owner_hash_item; drm_owner_item_t *owner_item; ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item); @@ -194,14 +194,14 @@ out: return NULL; } -drm_memblock_item_t *drm_sman_alloc(drm_sman_t *sman, unsigned int manager, +struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager, unsigned long size, unsigned alignment, unsigned long owner) { void *tmp; - drm_sman_mm_t *sman_mm; + struct drm_sman_mm *sman_mm; drm_owner_item_t *owner_item; - drm_memblock_item_t *memblock; + struct drm_memblock_item *memblock; BUG_ON(manager >= sman->num_managers); @@ -246,9 +246,9 @@ out: EXPORT_SYMBOL(drm_sman_alloc); -static void drm_sman_free(drm_memblock_item_t *item) +static void drm_sman_free(struct drm_memblock_item *item) { - drm_sman_t *sman = item->sman; + struct drm_sman *sman = item->sman; list_del(&item->owner_list); drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash); @@ -256,22 +256,22 @@ static void drm_sman_free(drm_memblock_item_t *item) drm_free(item, sizeof(*item), DRM_MEM_MM); } -int drm_sman_free_key(drm_sman_t *sman, unsigned int key) +int drm_sman_free_key(struct drm_sman *sman, unsigned int key) { - drm_hash_item_t *hash_item; - drm_memblock_item_t *memblock_item; + struct drm_hash_item *hash_item; + struct drm_memblock_item *memblock_item; if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item)) return -EINVAL; - memblock_item = drm_hash_entry(hash_item, drm_memblock_item_t, user_hash); + memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item, user_hash); drm_sman_free(memblock_item); return 0; } EXPORT_SYMBOL(drm_sman_free_key); -static void drm_sman_remove_owner(drm_sman_t *sman, +static void drm_sman_remove_owner(struct drm_sman *sman, drm_owner_item_t *owner_item) { list_del(&owner_item->sman_list); @@ -279,10 +279,10 @@ static void drm_sman_remove_owner(drm_sman_t *sman, drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM); } -int drm_sman_owner_clean(drm_sman_t *sman, unsigned long owner) +int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner) { - drm_hash_item_t *hash_item; + struct drm_hash_item *hash_item; drm_owner_item_t *owner_item; if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) { @@ -300,10 +300,10 @@ int drm_sman_owner_clean(drm_sman_t *sman, unsigned long owner) EXPORT_SYMBOL(drm_sman_owner_clean); -static void drm_sman_do_owner_cleanup(drm_sman_t *sman, +static void drm_sman_do_owner_cleanup(struct drm_sman *sman, drm_owner_item_t *owner_item) { - drm_memblock_item_t *entry, *next; + struct drm_memblock_item *entry, *next; list_for_each_entry_safe(entry, next, &owner_item->mem_blocks, owner_list) { @@ -312,10 +312,10 @@ static void drm_sman_do_owner_cleanup(drm_sman_t *sman, drm_sman_remove_owner(sman, owner_item); } -void drm_sman_owner_cleanup(drm_sman_t *sman, unsigned long owner) +void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner) { - drm_hash_item_t *hash_item; + struct drm_hash_item *hash_item; drm_owner_item_t *owner_item; if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) { @@ -329,11 +329,11 @@ void drm_sman_owner_cleanup(drm_sman_t *sman, unsigned long owner) EXPORT_SYMBOL(drm_sman_owner_cleanup); -void drm_sman_cleanup(drm_sman_t *sman) +void drm_sman_cleanup(struct drm_sman *sman) { drm_owner_item_t *entry, *next; unsigned int i; - drm_sman_mm_t *sman_mm; + struct drm_sman_mm *sman_mm; list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) { drm_sman_do_owner_cleanup(sman, entry); diff --git a/linux-core/drm_sman.h b/linux-core/drm_sman.h index ddc732a1..39a39fef 100644 --- a/linux-core/drm_sman.h +++ b/linux-core/drm_sman.h @@ -50,7 +50,7 @@ * for memory management. */ -typedef struct drm_sman_mm { +struct drm_sman_mm { /* private info. If allocated, needs to be destroyed by the destroy function */ void *private; @@ -74,30 +74,30 @@ typedef struct drm_sman_mm { "alloc" function */ unsigned long (*offset) (void *private, void *ref); -} drm_sman_mm_t; +}; -typedef struct drm_memblock_item { +struct drm_memblock_item { struct list_head owner_list; - drm_hash_item_t user_hash; + struct drm_hash_item user_hash; void *mm_info; - drm_sman_mm_t *mm; + struct drm_sman_mm *mm; struct drm_sman *sman; -} drm_memblock_item_t; +}; -typedef struct drm_sman { - drm_sman_mm_t *mm; +struct drm_sman { + struct drm_sman_mm *mm; int num_managers; - drm_open_hash_t owner_hash_tab; - drm_open_hash_t user_hash_tab; + struct drm_open_hash owner_hash_tab; + struct drm_open_hash user_hash_tab; struct list_head owner_items; -} drm_sman_t; +}; /* * Take down a memory manager. This function should only be called after a * successful init and after a call to drm_sman_cleanup. */ -extern void drm_sman_takedown(drm_sman_t * sman); +extern void drm_sman_takedown(struct drm_sman * sman); /* * Allocate structures for a manager. @@ -112,7 +112,7 @@ extern void drm_sman_takedown(drm_sman_t * sman); * */ -extern int drm_sman_init(drm_sman_t * sman, unsigned int num_managers, +extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers, unsigned int user_order, unsigned int owner_order); /* @@ -120,7 +120,7 @@ extern int drm_sman_init(drm_sman_t * sman, unsigned int num_managers, * manager unless a customized allogator is used. */ -extern int drm_sman_set_range(drm_sman_t * sman, unsigned int manager, +extern int drm_sman_set_range(struct drm_sman * sman, unsigned int manager, unsigned long start, unsigned long size); /* @@ -129,23 +129,23 @@ extern int drm_sman_set_range(drm_sman_t * sman, unsigned int manager, * so it can be destroyed after this call. */ -extern int drm_sman_set_manager(drm_sman_t * sman, unsigned int mananger, - drm_sman_mm_t * allocator); +extern int drm_sman_set_manager(struct drm_sman * sman, unsigned int mananger, + struct drm_sman_mm * allocator); /* * Allocate a memory block. Aligment is not implemented yet. */ -extern drm_memblock_item_t *drm_sman_alloc(drm_sman_t * sman, - unsigned int manager, - unsigned long size, - unsigned alignment, - unsigned long owner); +extern struct drm_memblock_item *drm_sman_alloc(struct drm_sman * sman, + unsigned int manager, + unsigned long size, + unsigned alignment, + unsigned long owner); /* * Free a memory block identified by its user hash key. */ -extern int drm_sman_free_key(drm_sman_t * sman, unsigned int key); +extern int drm_sman_free_key(struct drm_sman * sman, unsigned int key); /* * returns 1 iff there are no stale memory blocks associated with this owner. @@ -154,7 +154,7 @@ extern int drm_sman_free_key(drm_sman_t * sman, unsigned int key); * resources associated with owner. */ -extern int drm_sman_owner_clean(drm_sman_t * sman, unsigned long owner); +extern int drm_sman_owner_clean(struct drm_sman * sman, unsigned long owner); /* * Frees all stale memory blocks associated with this owner. Note that this @@ -164,13 +164,13 @@ extern int drm_sman_owner_clean(drm_sman_t * sman, unsigned long owner); * is not going to be referenced anymore. */ -extern void drm_sman_owner_cleanup(drm_sman_t * sman, unsigned long owner); +extern void drm_sman_owner_cleanup(struct drm_sman * sman, unsigned long owner); /* * Frees all stale memory blocks associated with the memory manager. * See idling above. */ -extern void drm_sman_cleanup(drm_sman_t * sman); +extern void drm_sman_cleanup(struct drm_sman * sman); #endif diff --git a/linux-core/sis_mm.c b/linux-core/sis_mm.c index 306ed453..edbf8bf4 100644 --- a/linux-core/sis_mm.c +++ b/linux-core/sis_mm.c @@ -93,7 +93,7 @@ static int sis_fb_init(DRM_IOCTL_ARGS) mutex_lock(&dev->struct_mutex); #if defined(__linux__) && defined(CONFIG_FB_SIS) { - drm_sman_mm_t sman_mm; + struct drm_sman_mm sman_mm; sman_mm.private = (void *)0xFFFFFFFF; sman_mm.allocate = sis_sman_mm_allocate; sman_mm.free = sis_sman_mm_free; @@ -129,7 +129,7 @@ static int sis_drm_alloc(struct drm_device * dev, struct drm_file * priv, drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *) data; drm_sis_mem_t mem; int retval = 0; - drm_memblock_item_t *item; + struct drm_memblock_item *item; DRM_COPY_FROM_USER_IOCTL(mem, argp, sizeof(mem)); diff --git a/linux-core/via_mm.c b/linux-core/via_mm.c index 48f5fd09..1ac51050 100644 --- a/linux-core/via_mm.c +++ b/linux-core/via_mm.c @@ -129,7 +129,7 @@ int via_mem_alloc(DRM_IOCTL_ARGS) drm_via_mem_t mem; int retval = 0; - drm_memblock_item_t *item; + struct drm_memblock_item *item; drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; unsigned long tmpSize; -- cgit v1.2.3 From be85ad0333b0c28129c2e4635f92780816308aa6 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 16 Jul 2007 13:37:02 +1000 Subject: drm: detypedef ttm/bo/fence code --- linux-core/drmP.h | 8 +- linux-core/drm_agpsupport.c | 12 +-- linux-core/drm_bo.c | 242 ++++++++++++++++++++++---------------------- linux-core/drm_bo_move.c | 58 +++++------ linux-core/drm_compat.c | 18 ++-- linux-core/drm_fence.c | 124 +++++++++++------------ linux-core/drm_objects.h | 126 +++++++++++------------ linux-core/drm_proc.c | 4 +- linux-core/drm_ttm.c | 40 ++++---- linux-core/drm_vm.c | 12 +-- linux-core/i915_buffer.c | 26 ++--- linux-core/i915_drv.c | 4 +- linux-core/i915_fence.c | 10 +- linux-core/via_buffer.c | 8 +- linux-core/via_fence.c | 8 +- 15 files changed, 350 insertions(+), 350 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 87a194af..142a04a1 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -806,8 +806,8 @@ struct drm_device { unsigned int agp_buffer_token; struct drm_head primary; /**< primary screen head */ - drm_fence_manager_t fm; - drm_buffer_manager_t bm; + struct drm_fence_manager fm; + struct drm_buffer_manager bm; /** \name Drawable information */ /*@{ */ @@ -818,7 +818,7 @@ struct drm_device { #if __OS_HAS_AGP struct drm_agp_ttm_backend { - drm_ttm_backend_t backend; + struct drm_ttm_backend backend; DRM_AGP_MEM *mem; struct agp_bridge_data *bridge; int populated; @@ -1103,7 +1103,7 @@ extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size extern int drm_agp_free_memory(DRM_AGP_MEM * handle); extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start); extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle); -extern drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev); +extern struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev); /* Stub support (drm_stub.h) */ extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver *driver); diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c index 541d95cd..57c88638 100644 --- a/linux-core/drm_agpsupport.c +++ b/linux-core/drm_agpsupport.c @@ -554,7 +554,7 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle) #define AGP_REQUIRED_MAJOR 0 #define AGP_REQUIRED_MINOR 102 -static int drm_agp_needs_unbind_cache_adjust(drm_ttm_backend_t *backend) { +static int drm_agp_needs_unbind_cache_adjust(struct drm_ttm_backend *backend) { return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1); } @@ -590,7 +590,7 @@ static int drm_agp_populate(struct drm_ttm_backend *backend, unsigned long num_p return 0; } -static int drm_agp_bind_ttm(drm_ttm_backend_t *backend, +static int drm_agp_bind_ttm(struct drm_ttm_backend *backend, unsigned long offset, int cached) { @@ -612,7 +612,7 @@ static int drm_agp_bind_ttm(drm_ttm_backend_t *backend, return ret; } -static int drm_agp_unbind_ttm(drm_ttm_backend_t *backend) { +static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend) { struct drm_agp_ttm_backend *agp_be = container_of(backend, struct drm_agp_ttm_backend, backend); @@ -624,7 +624,7 @@ static int drm_agp_unbind_ttm(drm_ttm_backend_t *backend) { return 0; } -static void drm_agp_clear_ttm(drm_ttm_backend_t *backend) { +static void drm_agp_clear_ttm(struct drm_ttm_backend *backend) { struct drm_agp_ttm_backend *agp_be = container_of(backend, struct drm_agp_ttm_backend, backend); @@ -640,7 +640,7 @@ static void drm_agp_clear_ttm(drm_ttm_backend_t *backend) { agp_be->mem = NULL; } -static void drm_agp_destroy_ttm(drm_ttm_backend_t *backend) { +static void drm_agp_destroy_ttm(struct drm_ttm_backend *backend) { struct drm_agp_ttm_backend *agp_be; @@ -656,7 +656,7 @@ static void drm_agp_destroy_ttm(drm_ttm_backend_t *backend) { } } -static drm_ttm_backend_func_t agp_ttm_backend = +static struct drm_ttm_backend_func agp_ttm_backend = { .needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust, .populate = drm_agp_populate, diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 30664632..a81dfbde 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -49,10 +49,10 @@ * */ -static void drm_bo_destroy_locked(drm_buffer_object_t * bo); -static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo); -static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo); -static void drm_bo_unmap_virtual(drm_buffer_object_t * bo); +static void drm_bo_destroy_locked(struct drm_buffer_object * bo); +static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo); +static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo); +static void drm_bo_unmap_virtual(struct drm_buffer_object * bo); static inline uint32_t drm_bo_type_flags(unsigned type) { @@ -63,9 +63,9 @@ static inline uint32_t drm_bo_type_flags(unsigned type) * bo locked. dev->struct_mutex locked. */ -void drm_bo_add_to_pinned_lru(drm_buffer_object_t * bo) +void drm_bo_add_to_pinned_lru(struct drm_buffer_object * bo) { - drm_mem_type_manager_t *man; + struct drm_mem_type_manager *man; DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); DRM_ASSERT_LOCKED(&bo->mutex); @@ -74,9 +74,9 @@ void drm_bo_add_to_pinned_lru(drm_buffer_object_t * bo) list_add_tail(&bo->pinned_lru, &man->pinned); } -void drm_bo_add_to_lru(drm_buffer_object_t * bo) +void drm_bo_add_to_lru(struct drm_buffer_object * bo) { - drm_mem_type_manager_t *man; + struct drm_mem_type_manager *man; DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); @@ -89,7 +89,7 @@ void drm_bo_add_to_lru(drm_buffer_object_t * bo) } } -static int drm_bo_vm_pre_move(drm_buffer_object_t * bo, int old_is_pci) +static int drm_bo_vm_pre_move(struct drm_buffer_object * bo, int old_is_pci) { #ifdef DRM_ODD_MM_COMPAT int ret; @@ -112,7 +112,7 @@ static int drm_bo_vm_pre_move(drm_buffer_object_t * bo, int old_is_pci) return 0; } -static void drm_bo_vm_post_move(drm_buffer_object_t * bo) +static void drm_bo_vm_post_move(struct drm_buffer_object * bo) { #ifdef DRM_ODD_MM_COMPAT int ret; @@ -133,7 +133,7 @@ static void drm_bo_vm_post_move(drm_buffer_object_t * bo) * Call bo->mutex locked. */ -static int drm_bo_add_ttm(drm_buffer_object_t * bo) +static int drm_bo_add_ttm(struct drm_buffer_object * bo) { struct drm_device *dev = bo->dev; int ret = 0; @@ -164,16 +164,16 @@ static int drm_bo_add_ttm(drm_buffer_object_t * bo) return ret; } -static int drm_bo_handle_move_mem(drm_buffer_object_t * bo, - drm_bo_mem_reg_t * mem, +static int drm_bo_handle_move_mem(struct drm_buffer_object * bo, + struct drm_bo_mem_reg * mem, int evict, int no_wait) { struct drm_device *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem); int new_is_pci = drm_mem_reg_is_pci(dev, mem); - drm_mem_type_manager_t *old_man = &bm->man[bo->mem.mem_type]; - drm_mem_type_manager_t *new_man = &bm->man[mem->mem_type]; + struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type]; + struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type]; int ret = 0; if (old_is_pci || new_is_pci) @@ -201,7 +201,7 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t * bo, if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) { - drm_bo_mem_reg_t *old_mem = &bo->mem; + struct drm_bo_mem_reg *old_mem = &bo->mem; uint64_t save_flags = old_mem->flags; uint64_t save_mask = old_mem->mask; @@ -266,7 +266,7 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t * bo, * Wait until the buffer is idle. */ -int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, +int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals, int no_wait) { int ret; @@ -292,10 +292,10 @@ int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, return 0; } -static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors) +static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors) { struct drm_device *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; if (bo->fence) { if (bm->nice_mode) { @@ -327,10 +327,10 @@ static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors) * fence object and removing from lru lists and memory managers. */ -static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all) +static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all) { struct drm_device *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; DRM_ASSERT_LOCKED(&dev->struct_mutex); @@ -389,10 +389,10 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all) * to the buffer object. Then destroy it. */ -static void drm_bo_destroy_locked(drm_buffer_object_t * bo) +static void drm_bo_destroy_locked(struct drm_buffer_object * bo) { struct drm_device *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; DRM_ASSERT_LOCKED(&dev->struct_mutex); @@ -440,17 +440,17 @@ static void drm_bo_destroy_locked(drm_buffer_object_t * bo) static void drm_bo_delayed_delete(struct drm_device * dev, int remove_all) { - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; - drm_buffer_object_t *entry, *nentry; + struct drm_buffer_object *entry, *nentry; struct list_head *list, *next; list_for_each_safe(list, next, &bm->ddestroy) { - entry = list_entry(list, drm_buffer_object_t, ddestroy); + entry = list_entry(list, struct drm_buffer_object, ddestroy); nentry = NULL; if (next != &bm->ddestroy) { - nentry = list_entry(next, drm_buffer_object_t, + nentry = list_entry(next, struct drm_buffer_object, ddestroy); atomic_inc(&nentry->usage); } @@ -471,10 +471,10 @@ static void drm_bo_delayed_workqueue(struct work_struct *work) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) struct drm_device *dev = (struct drm_device *) data; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; #else - drm_buffer_manager_t *bm = - container_of(work, drm_buffer_manager_t, wq.work); + struct drm_buffer_manager *bm = + container_of(work, struct drm_buffer_manager, wq.work); struct drm_device *dev = container_of(bm, struct drm_device, bm); #endif @@ -493,7 +493,7 @@ static void drm_bo_delayed_workqueue(struct work_struct *work) mutex_unlock(&dev->struct_mutex); } -void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo) +void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo) { struct drm_buffer_object *tmp_bo = *bo; bo = NULL; @@ -507,8 +507,8 @@ void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo) static void drm_bo_base_deref_locked(struct drm_file * priv, struct drm_user_object * uo) { - drm_buffer_object_t *bo = - drm_user_object_entry(uo, drm_buffer_object_t, base); + struct drm_buffer_object *bo = + drm_user_object_entry(uo, struct drm_buffer_object, base); DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); @@ -516,7 +516,7 @@ static void drm_bo_base_deref_locked(struct drm_file * priv, struct drm_user_obj drm_bo_usage_deref_locked(&bo); } -static void drm_bo_usage_deref_unlocked(drm_buffer_object_t ** bo) +static void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo) { struct drm_buffer_object *tmp_bo = *bo; struct drm_device *dev = tmp_bo->dev; @@ -538,13 +538,13 @@ static void drm_bo_usage_deref_unlocked(drm_buffer_object_t ** bo) int drm_fence_buffer_objects(struct drm_file * priv, struct list_head *list, uint32_t fence_flags, - drm_fence_object_t * fence, - drm_fence_object_t ** used_fence) + struct drm_fence_object * fence, + struct drm_fence_object ** used_fence) { struct drm_device *dev = priv->head->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; - drm_buffer_object_t *entry; + struct drm_buffer_object *entry; uint32_t fence_type = 0; int count = 0; int ret = 0; @@ -602,7 +602,7 @@ int drm_fence_buffer_objects(struct drm_file * priv, l = f_list.next; while (l != &f_list) { prefetch(l->next); - entry = list_entry(l, drm_buffer_object_t, lru); + entry = list_entry(l, struct drm_buffer_object, lru); atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); mutex_lock(&entry->mutex); @@ -635,12 +635,12 @@ EXPORT_SYMBOL(drm_fence_buffer_objects); * bo->mutex locked */ -static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, +static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type, int no_wait) { int ret = 0; struct drm_device *dev = bo->dev; - drm_bo_mem_reg_t evict_mem; + struct drm_bo_mem_reg evict_mem; /* * Someone might have modified the buffer before we took the buffer mutex. @@ -706,13 +706,13 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, } static int drm_bo_mem_force_space(struct drm_device * dev, - drm_bo_mem_reg_t * mem, + struct drm_bo_mem_reg * mem, uint32_t mem_type, int no_wait) { struct drm_mm_node *node; - drm_buffer_manager_t *bm = &dev->bm; - drm_buffer_object_t *entry; - drm_mem_type_manager_t *man = &bm->man[mem_type]; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_buffer_object *entry; + struct drm_mem_type_manager *man = &bm->man[mem_type]; struct list_head *lru; unsigned long num_pages = mem->num_pages; int ret; @@ -728,7 +728,7 @@ static int drm_bo_mem_force_space(struct drm_device * dev, if (lru->next == lru) break; - entry = list_entry(lru->next, drm_buffer_object_t, lru); + entry = list_entry(lru->next, struct drm_buffer_object, lru); atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); mutex_lock(&entry->mutex); @@ -754,7 +754,7 @@ static int drm_bo_mem_force_space(struct drm_device * dev, return 0; } -static int drm_bo_mt_compatible(drm_mem_type_manager_t * man, +static int drm_bo_mt_compatible(struct drm_mem_type_manager * man, uint32_t mem_type, uint32_t mask, uint32_t * res_mask) { @@ -791,12 +791,12 @@ static int drm_bo_mt_compatible(drm_mem_type_manager_t * man, return 1; } -int drm_bo_mem_space(drm_buffer_object_t * bo, - drm_bo_mem_reg_t * mem, int no_wait) +int drm_bo_mem_space(struct drm_buffer_object * bo, + struct drm_bo_mem_reg * mem, int no_wait) { struct drm_device *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man; uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio; const uint32_t *prios = dev->driver->bo_driver->mem_type_prio; @@ -883,7 +883,7 @@ int drm_bo_mem_space(drm_buffer_object_t * bo, EXPORT_SYMBOL(drm_bo_mem_space); -static int drm_bo_new_mask(drm_buffer_object_t * bo, +static int drm_bo_new_mask(struct drm_buffer_object * bo, uint64_t new_mask, uint32_t hint) { uint32_t new_props; @@ -921,11 +921,11 @@ static int drm_bo_new_mask(drm_buffer_object_t * bo, * Call dev->struct_mutex locked. */ -drm_buffer_object_t *drm_lookup_buffer_object(struct drm_file * priv, +struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file * priv, uint32_t handle, int check_owner) { struct drm_user_object *uo; - drm_buffer_object_t *bo; + struct drm_buffer_object *bo; uo = drm_lookup_user_object(priv, handle); @@ -939,7 +939,7 @@ drm_buffer_object_t *drm_lookup_buffer_object(struct drm_file * priv, return NULL; } - bo = drm_user_object_entry(uo, drm_buffer_object_t, base); + bo = drm_user_object_entry(uo, struct drm_buffer_object, base); atomic_inc(&bo->usage); return bo; } @@ -950,9 +950,9 @@ drm_buffer_object_t *drm_lookup_buffer_object(struct drm_file * priv, * Doesn't do any fence flushing as opposed to the drm_bo_busy function. */ -static int drm_bo_quick_busy(drm_buffer_object_t * bo) +static int drm_bo_quick_busy(struct drm_buffer_object * bo) { - drm_fence_object_t *fence = bo->fence; + struct drm_fence_object *fence = bo->fence; BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); if (fence) { @@ -970,9 +970,9 @@ static int drm_bo_quick_busy(drm_buffer_object_t * bo) * Returns 1 if the buffer is currently rendered to or from. 0 otherwise. */ -static int drm_bo_busy(drm_buffer_object_t * bo) +static int drm_bo_busy(struct drm_buffer_object * bo) { - drm_fence_object_t *fence = bo->fence; + struct drm_fence_object *fence = bo->fence; BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); if (fence) { @@ -990,7 +990,7 @@ static int drm_bo_busy(drm_buffer_object_t * bo) return 0; } -static int drm_bo_read_cached(drm_buffer_object_t * bo) +static int drm_bo_read_cached(struct drm_buffer_object * bo) { int ret = 0; @@ -1004,7 +1004,7 @@ static int drm_bo_read_cached(drm_buffer_object_t * bo) * Wait until a buffer is unmapped. */ -static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait) +static int drm_bo_wait_unmapped(struct drm_buffer_object * bo, int no_wait) { int ret = 0; @@ -1020,7 +1020,7 @@ static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait) return ret; } -static int drm_bo_check_unfenced(drm_buffer_object_t * bo) +static int drm_bo_check_unfenced(struct drm_buffer_object * bo) { int ret; @@ -1042,7 +1042,7 @@ static int drm_bo_check_unfenced(drm_buffer_object_t * bo) * the buffer "unfenced" after validating, but before fencing. */ -static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait, +static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait, int eagain_if_wait) { int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED); @@ -1075,7 +1075,7 @@ static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait, * Bo locked. */ -static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo, +static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo, struct drm_bo_info_rep *rep) { rep->handle = bo->base.hash.key; @@ -1106,7 +1106,7 @@ static int drm_buffer_object_map(struct drm_file * priv, uint32_t handle, uint32_t map_flags, unsigned hint, struct drm_bo_info_rep *rep) { - drm_buffer_object_t *bo; + struct drm_buffer_object *bo; struct drm_device *dev = priv->head->dev; int ret = 0; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; @@ -1186,7 +1186,7 @@ static int drm_buffer_object_map(struct drm_file * priv, uint32_t handle, static int drm_buffer_object_unmap(struct drm_file * priv, uint32_t handle) { struct drm_device *dev = priv->head->dev; - drm_buffer_object_t *bo; + struct drm_buffer_object *bo; struct drm_ref_object *ro; int ret = 0; @@ -1219,8 +1219,8 @@ static void drm_buffer_user_object_unmap(struct drm_file * priv, struct drm_user_object * uo, drm_ref_t action) { - drm_buffer_object_t *bo = - drm_user_object_entry(uo, drm_buffer_object_t, base); + struct drm_buffer_object *bo = + drm_user_object_entry(uo, struct drm_buffer_object, base); /* * We DON'T want to take the bo->lock here, because we want to @@ -1238,13 +1238,13 @@ static void drm_buffer_user_object_unmap(struct drm_file * priv, * Note that new_mem_flags are NOT transferred to the bo->mem.mask. */ -int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, +int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags, int no_wait, int move_unfenced) { struct drm_device *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; int ret = 0; - drm_bo_mem_reg_t mem; + struct drm_bo_mem_reg mem; /* * Flush outstanding fences. */ @@ -1300,7 +1300,7 @@ int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, return ret; } -static int drm_bo_mem_compat(drm_bo_mem_reg_t * mem) +static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem) { uint32_t flag_diff = (mem->mask ^ mem->flags); @@ -1318,10 +1318,10 @@ static int drm_bo_mem_compat(drm_bo_mem_reg_t * mem) return 1; } -static int drm_bo_check_fake(struct drm_device * dev, drm_bo_mem_reg_t * mem) +static int drm_bo_check_fake(struct drm_device * dev, struct drm_bo_mem_reg * mem) { - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man; uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio; const uint32_t *prios = dev->driver->bo_driver->mem_type_prio; uint32_t i; @@ -1360,13 +1360,13 @@ static int drm_bo_check_fake(struct drm_device * dev, drm_bo_mem_reg_t * mem) * bo locked. */ -static int drm_buffer_object_validate(drm_buffer_object_t * bo, +static int drm_buffer_object_validate(struct drm_buffer_object * bo, uint32_t fence_class, int move_unfenced, int no_wait) { struct drm_device *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; - drm_bo_driver_t *driver = dev->driver->bo_driver; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_bo_driver *driver = dev->driver->bo_driver; uint32_t ftype; int ret; @@ -1496,7 +1496,7 @@ static int drm_bo_handle_validate(struct drm_file * priv, struct drm_bo_info_rep *rep) { struct drm_device *dev = priv->head->dev; - drm_buffer_object_t *bo; + struct drm_buffer_object *bo; int ret; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; @@ -1536,7 +1536,7 @@ static int drm_bo_handle_info(struct drm_file *priv, uint32_t handle, struct drm_bo_info_rep *rep) { struct drm_device *dev = priv->head->dev; - drm_buffer_object_t *bo; + struct drm_buffer_object *bo; mutex_lock(&dev->struct_mutex); bo = drm_lookup_buffer_object(priv, handle, 1); @@ -1559,7 +1559,7 @@ static int drm_bo_handle_wait(struct drm_file *priv, uint32_t handle, struct drm_bo_info_rep *rep) { struct drm_device *dev = priv->head->dev; - drm_buffer_object_t *bo; + struct drm_buffer_object *bo; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; int ret; @@ -1594,10 +1594,10 @@ int drm_buffer_object_create(struct drm_device *dev, uint32_t hint, uint32_t page_alignment, unsigned long buffer_start, - drm_buffer_object_t ** buf_obj) + struct drm_buffer_object ** buf_obj) { - drm_buffer_manager_t *bm = &dev->bm; - drm_buffer_object_t *bo; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_buffer_object *bo; int ret = 0; unsigned long num_pages; @@ -1672,7 +1672,7 @@ int drm_buffer_object_create(struct drm_device *dev, return ret; } -static int drm_bo_add_user_object(struct drm_file * priv, drm_buffer_object_t * bo, +static int drm_bo_add_user_object(struct drm_file * priv, struct drm_buffer_object * bo, int shareable) { struct drm_device *dev = priv->head->dev; @@ -1769,7 +1769,7 @@ int drm_bo_create_ioctl(DRM_IOCTL_ARGS) struct drm_bo_create_arg arg; struct drm_bo_create_req *req = &arg.d.req; struct drm_bo_info_rep *rep = &arg.d.rep; - drm_buffer_object_t *entry; + struct drm_buffer_object *entry; int ret = 0; if (!dev->bm.initialized) { @@ -1975,16 +1975,16 @@ int drm_bo_wait_idle_ioctl(DRM_IOCTL_ARGS) static void drm_bo_clean_unfenced(struct drm_device *dev) { - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; struct list_head *head, *list; - drm_buffer_object_t *entry; + struct drm_buffer_object *entry; head = &bm->unfenced; list = head->next; while(list != head) { prefetch(list->next); - entry = list_entry(list, drm_buffer_object_t, lru); + entry = list_entry(list, struct drm_buffer_object, lru); atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); @@ -1999,7 +1999,7 @@ static void drm_bo_clean_unfenced(struct drm_device *dev) } } -static int drm_bo_leave_list(drm_buffer_object_t * bo, +static int drm_bo_leave_list(struct drm_buffer_object * bo, uint32_t mem_type, int free_pinned, int allow_errors) { @@ -2050,13 +2050,13 @@ static int drm_bo_leave_list(drm_buffer_object_t * bo, } -static drm_buffer_object_t *drm_bo_entry(struct list_head *list, +static struct drm_buffer_object *drm_bo_entry(struct list_head *list, int pinned_list) { if (pinned_list) - return list_entry(list, drm_buffer_object_t, pinned_lru); + return list_entry(list, struct drm_buffer_object, pinned_lru); else - return list_entry(list, drm_buffer_object_t, lru); + return list_entry(list, struct drm_buffer_object, lru); } /* @@ -2071,7 +2071,7 @@ static int drm_bo_force_list_clean(struct drm_device * dev, int pinned_list) { struct list_head *list, *next, *prev; - drm_buffer_object_t *entry, *nentry; + struct drm_buffer_object *entry, *nentry; int ret; int do_restart; @@ -2130,8 +2130,8 @@ restart: int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type) { - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[mem_type]; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man = &bm->man[mem_type]; int ret = -EINVAL; if (mem_type >= DRM_BO_MEM_TYPES) { @@ -2173,8 +2173,8 @@ int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type) static int drm_bo_lock_mm(struct drm_device * dev, unsigned mem_type) { int ret; - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[mem_type]; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man = &bm->man[mem_type]; if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) { DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type); @@ -2200,9 +2200,9 @@ int drm_bo_init_mm(struct drm_device * dev, unsigned type, unsigned long p_offset, unsigned long p_size) { - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; int ret = -EINVAL; - drm_mem_type_manager_t *man; + struct drm_mem_type_manager *man; if (type >= DRM_BO_MEM_TYPES) { DRM_ERROR("Illegal memory type %d\n", type); @@ -2247,10 +2247,10 @@ EXPORT_SYMBOL(drm_bo_init_mm); int drm_bo_driver_finish(struct drm_device * dev) { - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; int ret = 0; unsigned i = DRM_BO_MEM_TYPES; - drm_mem_type_manager_t *man; + struct drm_mem_type_manager *man; mutex_lock(&dev->bm.init_mutex); mutex_lock(&dev->struct_mutex); @@ -2298,8 +2298,8 @@ int drm_bo_driver_finish(struct drm_device * dev) int drm_bo_driver_init(struct drm_device * dev) { - drm_bo_driver_t *driver = dev->driver->bo_driver; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_bo_driver *driver = dev->driver->bo_driver; + struct drm_buffer_manager *bm = &dev->bm; int ret = -EINVAL; mutex_lock(&dev->bm.init_mutex); @@ -2339,8 +2339,8 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; struct drm_mm_init_arg arg; - drm_buffer_manager_t *bm = &dev->bm; - drm_bo_driver_t *driver = dev->driver->bo_driver; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; if (!driver) { @@ -2396,8 +2396,8 @@ int drm_mm_takedown_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; struct drm_mm_type_arg arg; - drm_buffer_manager_t *bm = &dev->bm; - drm_bo_driver_t *driver = dev->driver->bo_driver; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; if (!driver) { @@ -2438,7 +2438,7 @@ int drm_mm_lock_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; struct drm_mm_type_arg arg; - drm_bo_driver_t *driver = dev->driver->bo_driver; + struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; if (!driver) { @@ -2465,7 +2465,7 @@ int drm_mm_unlock_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; struct drm_mm_type_arg arg; - drm_bo_driver_t *driver = dev->driver->bo_driver; + struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; if (!driver) { @@ -2492,10 +2492,10 @@ int drm_mm_unlock_ioctl(DRM_IOCTL_ARGS) * buffer object vm functions. */ -int drm_mem_reg_is_pci(struct drm_device * dev, drm_bo_mem_reg_t * mem) +int drm_mem_reg_is_pci(struct drm_device * dev, struct drm_bo_mem_reg * mem) { - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man = &bm->man[mem->mem_type]; if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { if (mem->mem_type == DRM_BO_MEM_LOCAL) @@ -2526,13 +2526,13 @@ EXPORT_SYMBOL(drm_mem_reg_is_pci); * Otherwise returns zero. */ -int drm_bo_pci_offset(struct drm_device * dev, - drm_bo_mem_reg_t * mem, +int drm_bo_pci_offset(struct drm_device *dev, + struct drm_bo_mem_reg *mem, unsigned long *bus_base, unsigned long *bus_offset, unsigned long *bus_size) { - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man = &bm->man[mem->mem_type]; *bus_size = 0; if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)) @@ -2555,7 +2555,7 @@ int drm_bo_pci_offset(struct drm_device * dev, * Call bo->mutex locked. */ -void drm_bo_unmap_virtual(drm_buffer_object_t * bo) +void drm_bo_unmap_virtual(struct drm_buffer_object * bo) { struct drm_device *dev = bo->dev; loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT; @@ -2567,7 +2567,7 @@ void drm_bo_unmap_virtual(drm_buffer_object_t * bo) unmap_mapping_range(dev->dev_mapping, offset, holelen, 1); } -static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo) +static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo) { struct drm_map_list *list = &bo->map_list; drm_local_map_t *map; @@ -2593,7 +2593,7 @@ static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo) drm_bo_usage_deref_locked(&bo); } -static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo) +static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo) { struct drm_map_list *list = &bo->map_list; drm_local_map_t *map; diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 1e0d26ce..5e21173c 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -35,9 +35,9 @@ * have not been requested to free also pinned regions. */ -static void drm_bo_free_old_node(drm_buffer_object_t * bo) +static void drm_bo_free_old_node(struct drm_buffer_object * bo) { - drm_bo_mem_reg_t *old_mem = &bo->mem; + struct drm_bo_mem_reg *old_mem = &bo->mem; if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) { mutex_lock(&bo->dev->struct_mutex); @@ -48,11 +48,11 @@ static void drm_bo_free_old_node(drm_buffer_object_t * bo) old_mem->mm_node = NULL; } -int drm_bo_move_ttm(drm_buffer_object_t * bo, - int evict, int no_wait, drm_bo_mem_reg_t * new_mem) +int drm_bo_move_ttm(struct drm_buffer_object * bo, + int evict, int no_wait, struct drm_bo_mem_reg * new_mem) { - drm_ttm_t *ttm = bo->ttm; - drm_bo_mem_reg_t *old_mem = &bo->mem; + struct drm_ttm *ttm = bo->ttm; + struct drm_bo_mem_reg *old_mem = &bo->mem; uint32_t save_flags = old_mem->flags; uint32_t save_mask = old_mem->mask; int ret; @@ -102,11 +102,11 @@ EXPORT_SYMBOL(drm_bo_move_ttm); * Call bo->mutex locked. */ -int drm_mem_reg_ioremap(struct drm_device * dev, drm_bo_mem_reg_t * mem, +int drm_mem_reg_ioremap(struct drm_device * dev, struct drm_bo_mem_reg * mem, void **virtual) { - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man = &bm->man[mem->mem_type]; unsigned long bus_offset; unsigned long bus_size; unsigned long bus_base; @@ -137,11 +137,11 @@ int drm_mem_reg_ioremap(struct drm_device * dev, drm_bo_mem_reg_t * mem, * Call bo->mutex locked. */ -void drm_mem_reg_iounmap(struct drm_device * dev, drm_bo_mem_reg_t * mem, +void drm_mem_reg_iounmap(struct drm_device * dev, struct drm_bo_mem_reg * mem, void *virtual) { - drm_buffer_manager_t *bm; - drm_mem_type_manager_t *man; + struct drm_buffer_manager *bm; + struct drm_mem_type_manager *man; bm = &dev->bm; man = &bm->man[mem->mem_type]; @@ -164,7 +164,7 @@ static int drm_copy_io_page(void *dst, void *src, unsigned long page) return 0; } -static int drm_copy_io_ttm_page(drm_ttm_t * ttm, void *src, unsigned long page) +static int drm_copy_io_ttm_page(struct drm_ttm * ttm, void *src, unsigned long page) { struct page *d = drm_ttm_get_page(ttm, page); void *dst; @@ -182,7 +182,7 @@ static int drm_copy_io_ttm_page(drm_ttm_t * ttm, void *src, unsigned long page) return 0; } -static int drm_copy_ttm_io_page(drm_ttm_t * ttm, void *dst, unsigned long page) +static int drm_copy_ttm_io_page(struct drm_ttm * ttm, void *dst, unsigned long page) { struct page *s = drm_ttm_get_page(ttm, page); void *src; @@ -200,14 +200,14 @@ static int drm_copy_ttm_io_page(drm_ttm_t * ttm, void *dst, unsigned long page) return 0; } -int drm_bo_move_memcpy(drm_buffer_object_t * bo, - int evict, int no_wait, drm_bo_mem_reg_t * new_mem) +int drm_bo_move_memcpy(struct drm_buffer_object * bo, + int evict, int no_wait, struct drm_bo_mem_reg * new_mem) { struct drm_device *dev = bo->dev; - drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type]; - drm_ttm_t *ttm = bo->ttm; - drm_bo_mem_reg_t *old_mem = &bo->mem; - drm_bo_mem_reg_t old_copy = *old_mem; + struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type]; + struct drm_ttm *ttm = bo->ttm; + struct drm_bo_mem_reg *old_mem = &bo->mem; + struct drm_bo_mem_reg old_copy = *old_mem; void *old_iomap; void *new_iomap; int ret; @@ -281,12 +281,12 @@ EXPORT_SYMBOL(drm_bo_move_memcpy); * object. Call bo->mutex locked. */ -int drm_buffer_object_transfer(drm_buffer_object_t * bo, - drm_buffer_object_t ** new_obj) +int drm_buffer_object_transfer(struct drm_buffer_object * bo, + struct drm_buffer_object ** new_obj) { - drm_buffer_object_t *fbo; + struct drm_buffer_object *fbo; struct drm_device *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; fbo = drm_ctl_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ); if (!fbo) @@ -323,20 +323,20 @@ int drm_buffer_object_transfer(drm_buffer_object_t * bo, * We cannot restart until it has finished. */ -int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo, +int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo, int evict, int no_wait, uint32_t fence_class, uint32_t fence_type, - uint32_t fence_flags, drm_bo_mem_reg_t * new_mem) + uint32_t fence_flags, struct drm_bo_mem_reg * new_mem) { struct drm_device *dev = bo->dev; - drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type]; - drm_bo_mem_reg_t *old_mem = &bo->mem; + struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type]; + struct drm_bo_mem_reg *old_mem = &bo->mem; int ret; uint32_t save_flags = old_mem->flags; uint32_t save_mask = old_mem->mask; - drm_buffer_object_t *old_obj; + struct drm_buffer_object *old_obj; if (bo->fence) drm_fence_usage_deref_unlocked(&bo->fence); diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 5d1d62fa..38ca497f 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -201,7 +201,7 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma, struct fault_data *data) { unsigned long address = data->address; - drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; unsigned long page_offset; struct page *page = NULL; drm_ttm_t *ttm; @@ -351,7 +351,7 @@ struct page *drm_bo_vm_nopage(struct vm_area_struct *vma, unsigned long address, int *type) { - drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; unsigned long page_offset; struct page *page; drm_ttm_t *ttm; @@ -395,7 +395,7 @@ out_unlock: int drm_bo_map_bound(struct vm_area_struct *vma) { - drm_buffer_object_t *bo = (drm_buffer_object_t *)vma->vm_private_data; + struct drm_buffer_object *bo = (struct drm_buffer_object *)vma->vm_private_data; int ret = 0; unsigned long bus_base; unsigned long bus_offset; @@ -418,7 +418,7 @@ int drm_bo_map_bound(struct vm_area_struct *vma) } -int drm_bo_add_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma) +int drm_bo_add_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma) { p_mm_entry_t *entry, *n_entry; vma_entry_t *v_entry; @@ -454,7 +454,7 @@ int drm_bo_add_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma) return 0; } -void drm_bo_delete_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma) +void drm_bo_delete_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma) { p_mm_entry_t *entry, *n; vma_entry_t *v_entry, *v_n; @@ -486,7 +486,7 @@ void drm_bo_delete_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma) -int drm_bo_lock_kmm(drm_buffer_object_t * bo) +int drm_bo_lock_kmm(struct drm_buffer_object * bo) { p_mm_entry_t *entry; int lock_ok = 1; @@ -518,7 +518,7 @@ int drm_bo_lock_kmm(drm_buffer_object_t * bo) return -EAGAIN; } -void drm_bo_unlock_kmm(drm_buffer_object_t * bo) +void drm_bo_unlock_kmm(struct drm_buffer_object * bo) { p_mm_entry_t *entry; @@ -529,7 +529,7 @@ void drm_bo_unlock_kmm(drm_buffer_object_t * bo) } } -int drm_bo_remap_bound(drm_buffer_object_t *bo) +int drm_bo_remap_bound(struct drm_buffer_object *bo) { vma_entry_t *v_entry; int ret = 0; @@ -545,7 +545,7 @@ int drm_bo_remap_bound(drm_buffer_object_t *bo) return ret; } -void drm_bo_finish_unmap(drm_buffer_object_t *bo) +void drm_bo_finish_unmap(struct drm_buffer_object *bo) { vma_entry_t *v_entry; diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index f925621a..9b2fa405 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -40,11 +40,11 @@ void drm_fence_handler(struct drm_device * dev, uint32_t class, int wake = 0; uint32_t diff; uint32_t relevant; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_class_manager_t *fc = &fm->class[class]; - drm_fence_driver_t *driver = dev->driver->fence_driver; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->class[class]; + struct drm_fence_driver *driver = dev->driver->fence_driver; struct list_head *head; - drm_fence_object_t *fence, *next; + struct drm_fence_object *fence, *next; int found = 0; int is_exe = (type & DRM_FENCE_TYPE_EXE); int ge_last_exe; @@ -116,7 +116,7 @@ EXPORT_SYMBOL(drm_fence_handler); static void drm_fence_unring(struct drm_device * dev, struct list_head *ring) { - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; unsigned long flags; write_lock_irqsave(&fm->lock, flags); @@ -124,11 +124,11 @@ static void drm_fence_unring(struct drm_device * dev, struct list_head *ring) write_unlock_irqrestore(&fm->lock, flags); } -void drm_fence_usage_deref_locked(drm_fence_object_t ** fence) +void drm_fence_usage_deref_locked(struct drm_fence_object ** fence) { struct drm_fence_object *tmp_fence = *fence; struct drm_device *dev = tmp_fence->dev; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; DRM_ASSERT_LOCKED(&dev->struct_mutex); *fence = NULL; @@ -142,11 +142,11 @@ void drm_fence_usage_deref_locked(drm_fence_object_t ** fence) } } -void drm_fence_usage_deref_unlocked(drm_fence_object_t ** fence) +void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence) { struct drm_fence_object *tmp_fence = *fence; struct drm_device *dev = tmp_fence->dev; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; *fence = NULL; if (atomic_dec_and_test(&tmp_fence->usage)) { @@ -182,20 +182,20 @@ void drm_fence_reference_unlocked(struct drm_fence_object **dst, static void drm_fence_object_destroy(struct drm_file *priv, struct drm_user_object * base) { - drm_fence_object_t *fence = - drm_user_object_entry(base, drm_fence_object_t, base); + struct drm_fence_object *fence = + drm_user_object_entry(base, struct drm_fence_object, base); drm_fence_usage_deref_locked(&fence); } -int drm_fence_object_signaled(drm_fence_object_t * fence, +int drm_fence_object_signaled(struct drm_fence_object * fence, uint32_t mask, int poke_flush) { unsigned long flags; int signaled; struct drm_device *dev = fence->dev; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_driver_t *driver = dev->driver->fence_driver; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_driver *driver = dev->driver->fence_driver; if (poke_flush) driver->poke_flush(dev, fence->class); @@ -207,8 +207,8 @@ int drm_fence_object_signaled(drm_fence_object_t * fence, return signaled; } -static void drm_fence_flush_exe(drm_fence_class_manager_t * fc, - drm_fence_driver_t * driver, uint32_t sequence) +static void drm_fence_flush_exe(struct drm_fence_class_manager * fc, + struct drm_fence_driver * driver, uint32_t sequence) { uint32_t diff; @@ -224,13 +224,13 @@ static void drm_fence_flush_exe(drm_fence_class_manager_t * fc, } } -int drm_fence_object_flush(drm_fence_object_t * fence, +int drm_fence_object_flush(struct drm_fence_object * fence, uint32_t type) { struct drm_device *dev = fence->dev; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_class_manager_t *fc = &fm->class[fence->class]; - drm_fence_driver_t *driver = dev->driver->fence_driver; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->class[fence->class]; + struct drm_fence_driver *driver = dev->driver->fence_driver; unsigned long flags; if (type & ~fence->type) { @@ -264,12 +264,12 @@ int drm_fence_object_flush(drm_fence_object_t * fence, void drm_fence_flush_old(struct drm_device * dev, uint32_t class, uint32_t sequence) { - drm_fence_manager_t *fm = &dev->fm; - drm_fence_class_manager_t *fc = &fm->class[class]; - drm_fence_driver_t *driver = dev->driver->fence_driver; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->class[class]; + struct drm_fence_driver *driver = dev->driver->fence_driver; uint32_t old_sequence; unsigned long flags; - drm_fence_object_t *fence; + struct drm_fence_object *fence; uint32_t diff; write_lock_irqsave(&fm->lock, flags); @@ -290,7 +290,7 @@ void drm_fence_flush_old(struct drm_device * dev, uint32_t class, uint32_t seque mutex_unlock(&dev->struct_mutex); return; } - fence = drm_fence_reference_locked(list_entry(fc->ring.next, drm_fence_object_t, ring)); + fence = drm_fence_reference_locked(list_entry(fc->ring.next, struct drm_fence_object, ring)); mutex_unlock(&dev->struct_mutex); diff = (old_sequence - fence->sequence) & driver->sequence_mask; read_unlock_irqrestore(&fm->lock, flags); @@ -302,13 +302,13 @@ void drm_fence_flush_old(struct drm_device * dev, uint32_t class, uint32_t seque EXPORT_SYMBOL(drm_fence_flush_old); -static int drm_fence_lazy_wait(drm_fence_object_t *fence, +static int drm_fence_lazy_wait(struct drm_fence_object *fence, int ignore_signals, uint32_t mask) { struct drm_device *dev = fence->dev; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_class_manager_t *fc = &fm->class[fence->class]; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->class[fence->class]; int signaled; unsigned long _end = jiffies + 3*DRM_HZ; int ret = 0; @@ -336,11 +336,11 @@ static int drm_fence_lazy_wait(drm_fence_object_t *fence, return 0; } -int drm_fence_object_wait(drm_fence_object_t * fence, +int drm_fence_object_wait(struct drm_fence_object * fence, int lazy, int ignore_signals, uint32_t mask) { struct drm_device *dev = fence->dev; - drm_fence_driver_t *driver = dev->driver->fence_driver; + struct drm_fence_driver *driver = dev->driver->fence_driver; int ret = 0; unsigned long _end; int signaled; @@ -403,13 +403,13 @@ int drm_fence_object_wait(drm_fence_object_t * fence, return 0; } -int drm_fence_object_emit(drm_fence_object_t * fence, +int drm_fence_object_emit(struct drm_fence_object * fence, uint32_t fence_flags, uint32_t class, uint32_t type) { struct drm_device *dev = fence->dev; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_driver_t *driver = dev->driver->fence_driver; - drm_fence_class_manager_t *fc = &fm->class[fence->class]; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_driver *driver = dev->driver->fence_driver; + struct drm_fence_class_manager *fc = &fm->class[fence->class]; unsigned long flags; uint32_t sequence; uint32_t native_type; @@ -438,11 +438,11 @@ int drm_fence_object_emit(drm_fence_object_t * fence, static int drm_fence_object_init(struct drm_device * dev, uint32_t class, uint32_t type, uint32_t fence_flags, - drm_fence_object_t * fence) + struct drm_fence_object * fence) { int ret = 0; unsigned long flags; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; mutex_lock(&dev->struct_mutex); atomic_set(&fence->usage, 1); @@ -471,7 +471,7 @@ static int drm_fence_object_init(struct drm_device * dev, uint32_t class, return ret; } -int drm_fence_add_user_object(struct drm_file * priv, drm_fence_object_t * fence, +int drm_fence_add_user_object(struct drm_file * priv, struct drm_fence_object * fence, int shareable) { struct drm_device *dev = priv->head->dev; @@ -492,11 +492,11 @@ out: EXPORT_SYMBOL(drm_fence_add_user_object); int drm_fence_object_create(struct drm_device * dev, uint32_t class, uint32_t type, - unsigned flags, drm_fence_object_t ** c_fence) + unsigned flags, struct drm_fence_object ** c_fence) { - drm_fence_object_t *fence; + struct drm_fence_object *fence; int ret; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE); if (!fence) @@ -516,9 +516,9 @@ EXPORT_SYMBOL(drm_fence_object_create); void drm_fence_manager_init(struct drm_device * dev) { - drm_fence_manager_t *fm = &dev->fm; - drm_fence_class_manager_t *class; - drm_fence_driver_t *fed = dev->driver->fence_driver; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *class; + struct drm_fence_driver *fed = dev->driver->fence_driver; int i; rwlock_init(&fm->lock); @@ -548,11 +548,11 @@ void drm_fence_manager_takedown(struct drm_device * dev) { } -drm_fence_object_t *drm_lookup_fence_object(struct drm_file * priv, uint32_t handle) +struct drm_fence_object *drm_lookup_fence_object(struct drm_file * priv, uint32_t handle) { struct drm_device *dev = priv->head->dev; struct drm_user_object *uo; - drm_fence_object_t *fence; + struct drm_fence_object *fence; mutex_lock(&dev->struct_mutex); uo = drm_lookup_user_object(priv, handle); @@ -560,7 +560,7 @@ drm_fence_object_t *drm_lookup_fence_object(struct drm_file * priv, uint32_t han mutex_unlock(&dev->struct_mutex); return NULL; } - fence = drm_fence_reference_locked(drm_user_object_entry(uo, drm_fence_object_t, base)); + fence = drm_fence_reference_locked(drm_user_object_entry(uo, struct drm_fence_object, base)); mutex_unlock(&dev->struct_mutex); return fence; } @@ -569,9 +569,9 @@ int drm_fence_create_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; int ret; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg arg; - drm_fence_object_t *fence; + struct drm_fence_object *fence; unsigned long flags; ret = 0; @@ -617,7 +617,7 @@ int drm_fence_destroy_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; int ret; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg arg; struct drm_user_object *uo; ret = 0; @@ -645,9 +645,9 @@ int drm_fence_reference_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; int ret; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg arg; - drm_fence_object_t *fence; + struct drm_fence_object *fence; struct drm_user_object *uo; unsigned long flags; ret = 0; @@ -679,7 +679,7 @@ int drm_fence_unreference_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; int ret; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg arg; ret = 0; @@ -696,9 +696,9 @@ int drm_fence_signaled_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; int ret; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg arg; - drm_fence_object_t *fence; + struct drm_fence_object *fence; unsigned long flags; ret = 0; @@ -728,9 +728,9 @@ int drm_fence_flush_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; int ret; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg arg; - drm_fence_object_t *fence; + struct drm_fence_object *fence; unsigned long flags; ret = 0; @@ -762,9 +762,9 @@ int drm_fence_wait_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; int ret; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg arg; - drm_fence_object_t *fence; + struct drm_fence_object *fence; unsigned long flags; ret = 0; @@ -798,9 +798,9 @@ int drm_fence_emit_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; int ret; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg arg; - drm_fence_object_t *fence; + struct drm_fence_object *fence; unsigned long flags; ret = 0; @@ -833,9 +833,9 @@ int drm_fence_buffers_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; int ret; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg arg; - drm_fence_object_t *fence; + struct drm_fence_object *fence; unsigned long flags; ret = 0; diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index c4428a7b..441c19f2 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -139,7 +139,7 @@ extern int drm_user_object_unref(struct drm_file * priv, uint32_t user_token, * Fence objects. (drm_fence.c) */ -typedef struct drm_fence_object { +struct drm_fence_object { struct drm_user_object base; struct drm_device *dev; atomic_t usage; @@ -156,29 +156,29 @@ typedef struct drm_fence_object { uint32_t sequence; uint32_t flush_mask; uint32_t submitted_flush; -} drm_fence_object_t; +}; #define _DRM_FENCE_CLASSES 8 #define _DRM_FENCE_TYPE_EXE 0x00 -typedef struct drm_fence_class_manager { +struct drm_fence_class_manager { struct list_head ring; uint32_t pending_flush; wait_queue_head_t fence_queue; int pending_exe_flush; uint32_t last_exe_flush; uint32_t exe_flush_sequence; -} drm_fence_class_manager_t; +}; -typedef struct drm_fence_manager { +struct drm_fence_manager { int initialized; rwlock_t lock; - drm_fence_class_manager_t class[_DRM_FENCE_CLASSES]; + struct drm_fence_class_manager class[_DRM_FENCE_CLASSES]; uint32_t num_classes; atomic_t count; -} drm_fence_manager_t; +}; -typedef struct drm_fence_driver { +struct drm_fence_driver { uint32_t num_classes; uint32_t wrap_diff; uint32_t flush_diff; @@ -189,7 +189,7 @@ typedef struct drm_fence_driver { int (*emit) (struct drm_device * dev, uint32_t class, uint32_t flags, uint32_t * breadcrumb, uint32_t * native_type); void (*poke_flush) (struct drm_device * dev, uint32_t class); -} drm_fence_driver_t; +}; extern void drm_fence_handler(struct drm_device *dev, uint32_t class, uint32_t sequence, uint32_t type); @@ -197,21 +197,21 @@ extern void drm_fence_manager_init(struct drm_device *dev); extern void drm_fence_manager_takedown(struct drm_device *dev); extern void drm_fence_flush_old(struct drm_device *dev, uint32_t class, uint32_t sequence); -extern int drm_fence_object_flush(drm_fence_object_t * fence, uint32_t type); -extern int drm_fence_object_signaled(drm_fence_object_t * fence, +extern int drm_fence_object_flush(struct drm_fence_object * fence, uint32_t type); +extern int drm_fence_object_signaled(struct drm_fence_object * fence, uint32_t type, int flush); -extern void drm_fence_usage_deref_locked(drm_fence_object_t ** fence); -extern void drm_fence_usage_deref_unlocked(drm_fence_object_t ** fence); +extern void drm_fence_usage_deref_locked(struct drm_fence_object ** fence); +extern void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence); extern struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src); extern void drm_fence_reference_unlocked(struct drm_fence_object **dst, struct drm_fence_object *src); -extern int drm_fence_object_wait(drm_fence_object_t * fence, +extern int drm_fence_object_wait(struct drm_fence_object * fence, int lazy, int ignore_signals, uint32_t mask); extern int drm_fence_object_create(struct drm_device *dev, uint32_t type, uint32_t fence_flags, uint32_t class, - drm_fence_object_t ** c_fence); + struct drm_fence_object ** c_fence); extern int drm_fence_add_user_object(struct drm_file * priv, - drm_fence_object_t * fence, int shareable); + struct drm_fence_object * fence, int shareable); extern int drm_fence_create_ioctl(DRM_IOCTL_ARGS); extern int drm_fence_destroy_ioctl(DRM_IOCTL_ARGS); @@ -243,7 +243,7 @@ extern int drm_fence_buffers_ioctl(DRM_IOCTL_ARGS); #define DRM_BE_FLAG_BOUND_CACHED 0x00000002 struct drm_ttm_backend; -typedef struct drm_ttm_backend_func { +struct drm_ttm_backend_func { int (*needs_ub_cache_adjust) (struct drm_ttm_backend * backend); int (*populate) (struct drm_ttm_backend * backend, unsigned long num_pages, struct page ** pages); @@ -252,16 +252,16 @@ typedef struct drm_ttm_backend_func { unsigned long offset, int cached); int (*unbind) (struct drm_ttm_backend * backend); void (*destroy) (struct drm_ttm_backend * backend); -} drm_ttm_backend_func_t; +}; -typedef struct drm_ttm_backend { +struct drm_ttm_backend { uint32_t flags; int mem_type; - drm_ttm_backend_func_t *func; -} drm_ttm_backend_t; + struct drm_ttm_backend_func *func; +}; -typedef struct drm_ttm { +struct drm_ttm { struct page **pages; uint32_t page_flags; unsigned long num_pages; @@ -270,7 +270,7 @@ typedef struct drm_ttm { struct drm_device *dev; int destroy; uint32_t mapping_offset; - drm_ttm_backend_t *be; + struct drm_ttm_backend *be; enum { ttm_bound, ttm_evicted, @@ -278,14 +278,14 @@ typedef struct drm_ttm { ttm_unpopulated, } state; -} drm_ttm_t; +}; -extern drm_ttm_t *drm_ttm_init(struct drm_device *dev, unsigned long size); -extern int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset); -extern void drm_ttm_unbind(drm_ttm_t * ttm); -extern void drm_ttm_evict(drm_ttm_t * ttm); -extern void drm_ttm_fixup_caching(drm_ttm_t * ttm); -extern struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index); +extern struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size); +extern int drm_bind_ttm(struct drm_ttm * ttm, int cached, unsigned long aper_offset); +extern void drm_ttm_unbind(struct drm_ttm * ttm); +extern void drm_ttm_evict(struct drm_ttm * ttm); +extern void drm_ttm_fixup_caching(struct drm_ttm * ttm); +extern struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index); /* * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this, @@ -293,7 +293,7 @@ extern struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index); * when the last vma exits. */ -extern int drm_destroy_ttm(drm_ttm_t * ttm); +extern int drm_destroy_ttm(struct drm_ttm * ttm); #define DRM_FLAG_MASKED(_old, _new, _mask) {\ (_old) ^= (((_old) ^ (_new)) & (_mask)); \ @@ -316,7 +316,7 @@ extern int drm_destroy_ttm(drm_ttm_t * ttm); * Buffer objects. (drm_bo.c, drm_bo_move.c) */ -typedef struct drm_bo_mem_reg { +struct drm_bo_mem_reg { struct drm_mm_node *mm_node; unsigned long size; unsigned long num_pages; @@ -324,9 +324,9 @@ typedef struct drm_bo_mem_reg { uint32_t mem_type; uint64_t flags; uint64_t mask; -} drm_bo_mem_reg_t; +}; -typedef struct drm_buffer_object { +struct drm_buffer_object { struct drm_device *dev; struct drm_user_object base; @@ -340,14 +340,14 @@ typedef struct drm_buffer_object { enum drm_bo_type type; unsigned long offset; atomic_t mapped; - drm_bo_mem_reg_t mem; + struct drm_bo_mem_reg mem; struct list_head lru; struct list_head ddestroy; uint32_t fence_type; uint32_t fence_class; - drm_fence_object_t *fence; + struct drm_fence_object *fence; uint32_t priv_flags; wait_queue_head_t event_queue; struct mutex mutex; @@ -359,7 +359,7 @@ typedef struct drm_buffer_object { /* For vm */ - drm_ttm_t *ttm; + struct drm_ttm *ttm; struct drm_map_list map_list; uint32_t memory_type; unsigned long bus_offset; @@ -372,12 +372,12 @@ typedef struct drm_buffer_object { struct list_head p_mm_list; #endif -} drm_buffer_object_t; +}; #define _DRM_BO_FLAG_UNFENCED 0x00000001 #define _DRM_BO_FLAG_EVICTED 0x00000002 -typedef struct drm_mem_type_manager { +struct drm_mem_type_manager { int has_type; int use_type; struct drm_mm manager; @@ -388,7 +388,7 @@ typedef struct drm_mem_type_manager { unsigned long io_offset; unsigned long io_size; void *io_addr; -} drm_mem_type_manager_t; +}; #define _DRM_FLAG_MEMTYPE_FIXED 0x00000001 /* Fixed (on-card) PCI memory */ #define _DRM_FLAG_MEMTYPE_MAPPABLE 0x00000002 /* Memory mappable */ @@ -398,13 +398,13 @@ typedef struct drm_mem_type_manager { #define _DRM_FLAG_MEMTYPE_CMA 0x00000010 /* Can't map aperture */ #define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */ -typedef struct drm_buffer_manager { +struct drm_buffer_manager { struct mutex init_mutex; struct mutex evict_mutex; int nice_mode; int initialized; struct drm_file *last_to_validate; - drm_mem_type_manager_t man[DRM_BO_MEM_TYPES]; + struct drm_mem_type_manager man[DRM_BO_MEM_TYPES]; struct list_head unfenced; struct list_head ddestroy; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) @@ -415,23 +415,23 @@ typedef struct drm_buffer_manager { uint32_t fence_type; unsigned long cur_pages; atomic_t count; -} drm_buffer_manager_t; +}; -typedef struct drm_bo_driver { +struct drm_bo_driver { const uint32_t *mem_type_prio; const uint32_t *mem_busy_prio; uint32_t num_mem_type_prio; uint32_t num_mem_busy_prio; - drm_ttm_backend_t *(*create_ttm_backend_entry) + struct drm_ttm_backend *(*create_ttm_backend_entry) (struct drm_device * dev); int (*fence_type) (struct drm_buffer_object *bo, uint32_t * type); int (*invalidate_caches) (struct drm_device * dev, uint64_t flags); int (*init_mem_type) (struct drm_device * dev, uint32_t type, - drm_mem_type_manager_t * man); + struct drm_mem_type_manager * man); uint32_t(*evict_mask) (struct drm_buffer_object *bo); int (*move) (struct drm_buffer_object * bo, int evict, int no_wait, struct drm_bo_mem_reg * new_mem); -} drm_bo_driver_t; +}; /* * buffer objects (drm_bo.c) @@ -455,24 +455,24 @@ extern int drm_mm_unlock_ioctl(DRM_IOCTL_ARGS); extern int drm_bo_driver_finish(struct drm_device *dev); extern int drm_bo_driver_init(struct drm_device *dev); extern int drm_bo_pci_offset(struct drm_device *dev, - drm_bo_mem_reg_t * mem, + struct drm_bo_mem_reg * mem, unsigned long *bus_base, unsigned long *bus_offset, unsigned long *bus_size); -extern int drm_mem_reg_is_pci(struct drm_device *dev, drm_bo_mem_reg_t * mem); +extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg * mem); -extern void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo); +extern void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo); extern int drm_fence_buffer_objects(struct drm_file * priv, struct list_head *list, uint32_t fence_flags, - drm_fence_object_t * fence, - drm_fence_object_t ** used_fence); -extern void drm_bo_add_to_lru(drm_buffer_object_t * bo); -extern int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, + struct drm_fence_object * fence, + struct drm_fence_object ** used_fence); +extern void drm_bo_add_to_lru(struct drm_buffer_object * bo); +extern int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals, int no_wait); -extern int drm_bo_mem_space(drm_buffer_object_t * bo, - drm_bo_mem_reg_t * mem, int no_wait); -extern int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, +extern int drm_bo_mem_space(struct drm_buffer_object * bo, + struct drm_bo_mem_reg * mem, int no_wait); +extern int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags, int no_wait, int move_unfenced); /* @@ -480,18 +480,18 @@ extern int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, * drm_bo_move.c */ -extern int drm_bo_move_ttm(drm_buffer_object_t * bo, - int evict, int no_wait, drm_bo_mem_reg_t * new_mem); -extern int drm_bo_move_memcpy(drm_buffer_object_t * bo, +extern int drm_bo_move_ttm(struct drm_buffer_object * bo, + int evict, int no_wait, struct drm_bo_mem_reg * new_mem); +extern int drm_bo_move_memcpy(struct drm_buffer_object * bo, int evict, - int no_wait, drm_bo_mem_reg_t * new_mem); -extern int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo, + int no_wait, struct drm_bo_mem_reg * new_mem); +extern int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo, int evict, int no_wait, uint32_t fence_class, uint32_t fence_type, uint32_t fence_flags, - drm_bo_mem_reg_t * new_mem); + struct drm_bo_mem_reg * new_mem); #ifdef CONFIG_DEBUG_MUTEXES #define DRM_ASSERT_LOCKED(_mutex) \ diff --git a/linux-core/drm_proc.c b/linux-core/drm_proc.c index f33bd93d..3f9cb028 100644 --- a/linux-core/drm_proc.c +++ b/linux-core/drm_proc.c @@ -434,8 +434,8 @@ static int drm__objects_info(char *buf, char **start, off_t offset, int request, { struct drm_device *dev = (struct drm_device *) data; int len = 0; - drm_buffer_manager_t *bm = &dev->bm; - drm_fence_manager_t *fm = &dev->fm; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_fence_manager *fm = &dev->fm; drm_u64_t used_mem; drm_u64_t low_mem; drm_u64_t high_mem; diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c index 31503c9c..60c64cba 100644 --- a/linux-core/drm_ttm.c +++ b/linux-core/drm_ttm.c @@ -45,7 +45,7 @@ static void drm_ttm_cache_flush(void) * Use kmalloc if possible. Otherwise fall back to vmalloc. */ -static void ttm_alloc_pages(drm_ttm_t * ttm) +static void ttm_alloc_pages(struct drm_ttm * ttm) { unsigned long size = ttm->num_pages * sizeof(*ttm->pages); ttm->pages = NULL; @@ -66,7 +66,7 @@ static void ttm_alloc_pages(drm_ttm_t * ttm) } } -static void ttm_free_pages(drm_ttm_t * ttm) +static void ttm_free_pages(struct drm_ttm * ttm) { unsigned long size = ttm->num_pages * sizeof(*ttm->pages); @@ -105,7 +105,7 @@ static struct page *drm_ttm_alloc_page(void) * for range of pages in a ttm. */ -static int drm_set_caching(drm_ttm_t * ttm, int noncached) +static int drm_set_caching(struct drm_ttm * ttm, int noncached) { int i; struct page **cur_page; @@ -142,12 +142,12 @@ static int drm_set_caching(drm_ttm_t * ttm, int noncached) * Free all resources associated with a ttm. */ -int drm_destroy_ttm(drm_ttm_t * ttm) +int drm_destroy_ttm(struct drm_ttm * ttm) { int i; struct page **cur_page; - drm_ttm_backend_t *be; + struct drm_ttm_backend *be; if (!ttm) return 0; @@ -159,7 +159,7 @@ int drm_destroy_ttm(drm_ttm_t * ttm) } if (ttm->pages) { - drm_buffer_manager_t *bm = &ttm->dev->bm; + struct drm_buffer_manager *bm = &ttm->dev->bm; if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) drm_set_caching(ttm, 0); @@ -191,10 +191,10 @@ int drm_destroy_ttm(drm_ttm_t * ttm) return 0; } -struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index) +struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index) { struct page *p; - drm_buffer_manager_t *bm = &ttm->dev->bm; + struct drm_buffer_manager *bm = &ttm->dev->bm; p = ttm->pages[index]; if (!p) { @@ -207,11 +207,11 @@ struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index) return p; } -static int drm_ttm_populate(drm_ttm_t * ttm) +static int drm_ttm_populate(struct drm_ttm * ttm) { struct page *page; unsigned long i; - drm_ttm_backend_t *be; + struct drm_ttm_backend *be; if (ttm->state != ttm_unpopulated) return 0; @@ -231,10 +231,10 @@ static int drm_ttm_populate(drm_ttm_t * ttm) * Initialize a ttm. */ -drm_ttm_t *drm_ttm_init(struct drm_device * dev, unsigned long size) +struct drm_ttm *drm_ttm_init(struct drm_device * dev, unsigned long size) { - drm_bo_driver_t *bo_driver = dev->driver->bo_driver; - drm_ttm_t *ttm; + struct drm_bo_driver *bo_driver = dev->driver->bo_driver; + struct drm_ttm *ttm; if (!bo_driver) return NULL; @@ -275,9 +275,9 @@ drm_ttm_t *drm_ttm_init(struct drm_device * dev, unsigned long size) * Unbind a ttm region from the aperture. */ -void drm_ttm_evict(drm_ttm_t * ttm) +void drm_ttm_evict(struct drm_ttm * ttm) { - drm_ttm_backend_t *be = ttm->be; + struct drm_ttm_backend *be = ttm->be; int ret; if (ttm->state == ttm_bound) { @@ -288,11 +288,11 @@ void drm_ttm_evict(drm_ttm_t * ttm) ttm->state = ttm_evicted; } -void drm_ttm_fixup_caching(drm_ttm_t * ttm) +void drm_ttm_fixup_caching(struct drm_ttm * ttm) { if (ttm->state == ttm_evicted) { - drm_ttm_backend_t *be = ttm->be; + struct drm_ttm_backend *be = ttm->be; if (be->func->needs_ub_cache_adjust(be)) { drm_set_caching(ttm, 0); } @@ -300,7 +300,7 @@ void drm_ttm_fixup_caching(drm_ttm_t * ttm) } } -void drm_ttm_unbind(drm_ttm_t * ttm) +void drm_ttm_unbind(struct drm_ttm * ttm) { if (ttm->state == ttm_bound) drm_ttm_evict(ttm); @@ -308,11 +308,11 @@ void drm_ttm_unbind(drm_ttm_t * ttm) drm_ttm_fixup_caching(ttm); } -int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset) +int drm_bind_ttm(struct drm_ttm * ttm, int cached, unsigned long aper_offset) { int ret = 0; - drm_ttm_backend_t *be; + struct drm_ttm_backend *be; if (!ttm) return -EINVAL; diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index de2fba1a..265a59d8 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -713,10 +713,10 @@ EXPORT_SYMBOL(drm_mmap); static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, unsigned long address) { - drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; unsigned long page_offset; struct page *page = NULL; - drm_ttm_t *ttm; + struct drm_ttm *ttm; struct drm_device *dev; unsigned long pfn; int err; @@ -766,7 +766,7 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, page_offset = (address - vma->vm_start) >> PAGE_SHIFT; if (bus_size) { - drm_mem_type_manager_t *man = &dev->bm.man[bo->mem.mem_type]; + struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type]; pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset; vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma); @@ -798,7 +798,7 @@ out_unlock: static void drm_bo_vm_open_locked(struct vm_area_struct *vma) { - drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; drm_vm_open_locked(vma); atomic_inc(&bo->usage); @@ -815,7 +815,7 @@ static void drm_bo_vm_open_locked(struct vm_area_struct *vma) static void drm_bo_vm_open(struct vm_area_struct *vma) { - drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; struct drm_device *dev = bo->dev; mutex_lock(&dev->struct_mutex); @@ -831,7 +831,7 @@ static void drm_bo_vm_open(struct vm_area_struct *vma) static void drm_bo_vm_close(struct vm_area_struct *vma) { - drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; struct drm_device *dev = bo->dev; drm_vm_close(vma); diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index 6aeccfcb..bf500cc6 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -33,12 +33,12 @@ #include "i915_drm.h" #include "i915_drv.h" -drm_ttm_backend_t *i915_create_ttm_backend_entry(struct drm_device * dev) +struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device * dev) { return drm_agp_init_ttm(dev); } -int i915_fence_types(drm_buffer_object_t *bo, uint32_t * type) +int i915_fence_types(struct drm_buffer_object *bo, uint32_t * type) { if (bo->mem.flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) *type = 3; @@ -64,7 +64,7 @@ int i915_invalidate_caches(struct drm_device * dev, uint64_t flags) } int i915_init_mem_type(struct drm_device * dev, uint32_t type, - drm_mem_type_manager_t * man) + struct drm_mem_type_manager * man) { switch (type) { case DRM_BO_MEM_LOCAL: @@ -105,7 +105,7 @@ int i915_init_mem_type(struct drm_device * dev, uint32_t type, return 0; } -uint32_t i915_evict_mask(drm_buffer_object_t *bo) +uint32_t i915_evict_mask(struct drm_buffer_object *bo) { switch (bo->mem.mem_type) { case DRM_BO_MEM_LOCAL: @@ -150,10 +150,10 @@ static void i915_emit_copy_blit(struct drm_device * dev, return; } -static int i915_move_blit(drm_buffer_object_t * bo, - int evict, int no_wait, drm_bo_mem_reg_t * new_mem) +static int i915_move_blit(struct drm_buffer_object * bo, + int evict, int no_wait, struct drm_bo_mem_reg * new_mem) { - drm_bo_mem_reg_t *old_mem = &bo->mem; + struct drm_bo_mem_reg *old_mem = &bo->mem; int dir = 0; if ((old_mem->mem_type == new_mem->mem_type) && @@ -180,11 +180,11 @@ static int i915_move_blit(drm_buffer_object_t * bo, * then blit and subsequently move out again. */ -static int i915_move_flip(drm_buffer_object_t * bo, - int evict, int no_wait, drm_bo_mem_reg_t * new_mem) +static int i915_move_flip(struct drm_buffer_object * bo, + int evict, int no_wait, struct drm_bo_mem_reg * new_mem) { struct drm_device *dev = bo->dev; - drm_bo_mem_reg_t tmp_mem; + struct drm_bo_mem_reg tmp_mem; int ret; tmp_mem = *new_mem; @@ -216,10 +216,10 @@ out_cleanup: return ret; } -int i915_move(drm_buffer_object_t * bo, - int evict, int no_wait, drm_bo_mem_reg_t * new_mem) +int i915_move(struct drm_buffer_object * bo, + int evict, int no_wait, struct drm_bo_mem_reg * new_mem) { - drm_bo_mem_reg_t *old_mem = &bo->mem; + struct drm_bo_mem_reg *old_mem = &bo->mem; if (old_mem->mem_type == DRM_BO_MEM_LOCAL) { return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c index 49437066..e337e1d2 100644 --- a/linux-core/i915_drv.c +++ b/linux-core/i915_drv.c @@ -39,7 +39,7 @@ static struct pci_device_id pciidlist[] = { }; #ifdef I915_HAVE_FENCE -static drm_fence_driver_t i915_fence_driver = { +static struct drm_fence_driver i915_fence_driver = { .num_classes = 1, .wrap_diff = (1U << (BREADCRUMB_BITS - 1)), .flush_diff = (1U << (BREADCRUMB_BITS - 2)), @@ -55,7 +55,7 @@ static drm_fence_driver_t i915_fence_driver = { static uint32_t i915_mem_prios[] = {DRM_BO_MEM_PRIV0, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL}; static uint32_t i915_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_PRIV0, DRM_BO_MEM_LOCAL}; -static drm_bo_driver_t i915_bo_driver = { +static struct drm_bo_driver i915_bo_driver = { .mem_type_prio = i915_mem_prios, .mem_busy_prio = i915_busy_prios, .num_mem_type_prio = sizeof(i915_mem_prios)/sizeof(uint32_t), diff --git a/linux-core/i915_fence.c b/linux-core/i915_fence.c index a71e5dac..6f0de2ca 100644 --- a/linux-core/i915_fence.c +++ b/linux-core/i915_fence.c @@ -41,9 +41,9 @@ static void i915_perform_flush(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_class_manager_t *fc = &fm->class[0]; - drm_fence_driver_t *driver = dev->driver->fence_driver; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->class[0]; + struct drm_fence_driver *driver = dev->driver->fence_driver; uint32_t flush_flags = 0; uint32_t flush_sequence = 0; uint32_t i_status; @@ -111,7 +111,7 @@ static void i915_perform_flush(struct drm_device * dev) void i915_poke_flush(struct drm_device * dev, uint32_t class) { - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; unsigned long flags; write_lock_irqsave(&fm->lock, flags); @@ -137,7 +137,7 @@ int i915_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t f void i915_fence_handler(struct drm_device * dev) { - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; write_lock(&fm->lock); i915_perform_flush(dev); diff --git a/linux-core/via_buffer.c b/linux-core/via_buffer.c index e452611d..0461b3c7 100644 --- a/linux-core/via_buffer.c +++ b/linux-core/via_buffer.c @@ -32,12 +32,12 @@ #include "via_drm.h" #include "via_drv.h" -drm_ttm_backend_t *via_create_ttm_backend_entry(struct drm_device * dev) +struct drm_ttm_backend *via_create_ttm_backend_entry(struct drm_device * dev) { return drm_agp_init_ttm(dev); } -int via_fence_types(drm_buffer_object_t *bo, uint32_t * type) +int via_fence_types(struct drm_buffer_object *bo, uint32_t * type) { *type = 3; return 0; @@ -82,7 +82,7 @@ static int via_vram_info(struct drm_device *dev, } int via_init_mem_type(struct drm_device * dev, uint32_t type, - drm_mem_type_manager_t * man) + struct drm_mem_type_manager * man) { switch (type) { case DRM_BO_MEM_LOCAL: @@ -143,7 +143,7 @@ int via_init_mem_type(struct drm_device * dev, uint32_t type, return 0; } -uint32_t via_evict_mask(drm_buffer_object_t *bo) +uint32_t via_evict_mask(struct drm_buffer_object *bo) { switch (bo->mem.mem_type) { case DRM_BO_MEM_LOCAL: diff --git a/linux-core/via_fence.c b/linux-core/via_fence.c index ce4366d2..a8db3d12 100644 --- a/linux-core/via_fence.c +++ b/linux-core/via_fence.c @@ -42,7 +42,7 @@ static uint32_t via_perform_flush(struct drm_device *dev, uint32_t class) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; - drm_fence_class_manager_t *fc = &dev->fm.class[class]; + struct drm_fence_class_manager *fc = &dev->fm.class[class]; uint32_t pending_flush_types = 0; uint32_t signaled_flush_types = 0; uint32_t status; @@ -155,7 +155,7 @@ int via_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t fl void via_poke_flush(struct drm_device * dev, uint32_t class) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; unsigned long flags; uint32_t pending_flush; @@ -202,9 +202,9 @@ void via_fence_timer(unsigned long data) { struct drm_device *dev = (struct drm_device *) data; drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; uint32_t pending_flush; - drm_fence_class_manager_t *fc = &dev->fm.class[0]; + struct drm_fence_class_manager *fc = &dev->fm.class[0]; if (!dev_priv) return; -- cgit v1.2.3 From 24311d5d82b61a4729b15355088dd9c2898d1089 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 16 Jul 2007 13:42:11 +1000 Subject: drm: remove drm_buf_t --- linux-core/drmP.h | 6 +++--- linux-core/drm_bufs.c | 16 ++++++++-------- linux-core/drm_dma.c | 2 +- linux-core/i810_dma.c | 26 +++++++++++++------------- linux-core/i810_drv.h | 2 +- 5 files changed, 26 insertions(+), 26 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 142a04a1..9a79b0df 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -311,7 +311,7 @@ struct drm_vma_entry { /** * DMA buffer. */ -typedef struct drm_buf { +struct drm_buf { int idx; /**< Index into master buflist */ int total; /**< Buffer size */ int order; /**< log-base-2(total) */ @@ -337,7 +337,7 @@ typedef struct drm_buf { int dev_priv_size; /**< Size of buffer private storage */ void *dev_private; /**< Per-buffer private storage */ -} drm_buf_t; +}; /** bufs is one longer than it has to be */ struct drm_waitlist { @@ -1051,7 +1051,7 @@ extern struct drm_map_list *drm_find_matching_map(struct drm_device *dev, /* DMA support (drm_dma.h) */ extern int drm_dma_setup(struct drm_device *dev); extern void drm_dma_takedown(struct drm_device *dev); -extern void drm_free_buffer(struct drm_device *dev, drm_buf_t * buf); +extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf); extern void drm_core_reclaim_buffers(struct drm_device *dev, struct file *filp); /* IRQ support (drm_irq.h) */ diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c index f766597b..c1e23b5c 100644 --- a/linux-core/drm_bufs.c +++ b/linux-core/drm_bufs.c @@ -574,7 +574,7 @@ int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request) int total; int byte_count; int i, valid; - drm_buf_t **temp_buflist; + struct drm_buf **temp_buflist; if (!dma) return -EINVAL; @@ -738,14 +738,14 @@ int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request) int page_order; struct drm_buf_entry *entry; drm_dma_handle_t *dmah; - drm_buf_t *buf; + struct drm_buf *buf; int alignment; unsigned long offset; int i; int byte_count; int page_count; unsigned long *temp_pagelist; - drm_buf_t **temp_buflist; + struct drm_buf **temp_buflist; if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL; @@ -958,7 +958,7 @@ static int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc * request) { struct drm_device_dma *dma = dev->dma; struct drm_buf_entry *entry; - drm_buf_t *buf; + struct drm_buf *buf; unsigned long offset; unsigned long agp_offset; int count; @@ -969,7 +969,7 @@ static int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc * request) int total; int byte_count; int i; - drm_buf_t **temp_buflist; + struct drm_buf **temp_buflist; if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL; @@ -1120,7 +1120,7 @@ int drm_addbufs_fb(struct drm_device *dev, struct drm_buf_desc *request) { struct drm_device_dma *dma = dev->dma; struct drm_buf_entry *entry; - drm_buf_t *buf; + struct drm_buf *buf; unsigned long offset; unsigned long agp_offset; int count; @@ -1131,7 +1131,7 @@ int drm_addbufs_fb(struct drm_device *dev, struct drm_buf_desc *request) int total; int byte_count; int i; - drm_buf_t **temp_buflist; + struct drm_buf **temp_buflist; if (!drm_core_check_feature(dev, DRIVER_FB_DMA)) return -EINVAL; @@ -1492,7 +1492,7 @@ int drm_freebufs(struct inode *inode, struct file *filp, struct drm_buf_free request; int i; int idx; - drm_buf_t *buf; + struct drm_buf *buf; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) return -EINVAL; diff --git a/linux-core/drm_dma.c b/linux-core/drm_dma.c index 6990f8d4..d2a88d52 100644 --- a/linux-core/drm_dma.c +++ b/linux-core/drm_dma.c @@ -129,7 +129,7 @@ void drm_dma_takedown(struct drm_device * dev) * * Resets the fields of \p buf. */ -void drm_free_buffer(struct drm_device * dev, drm_buf_t * buf) +void drm_free_buffer(struct drm_device * dev, struct drm_buf * buf) { if (!buf) return; diff --git a/linux-core/i810_dma.c b/linux-core/i810_dma.c index 31dc1c86..1e6d8cd3 100644 --- a/linux-core/i810_dma.c +++ b/linux-core/i810_dma.c @@ -64,7 +64,7 @@ static inline void i810_print_status_page(struct drm_device * dev) } } -static drm_buf_t *i810_freelist_get(struct drm_device * dev) +static struct drm_buf *i810_freelist_get(struct drm_device * dev) { struct drm_device_dma *dma = dev->dma; int i; @@ -73,7 +73,7 @@ static drm_buf_t *i810_freelist_get(struct drm_device * dev) /* Linear search might not be the best solution */ for (i = 0; i < dma->buf_count; i++) { - drm_buf_t *buf = dma->buflist[i]; + struct drm_buf *buf = dma->buflist[i]; drm_i810_buf_priv_t *buf_priv = buf->dev_private; /* In use is already a pointer */ used = cmpxchg(buf_priv->in_use, I810_BUF_FREE, @@ -89,7 +89,7 @@ static drm_buf_t *i810_freelist_get(struct drm_device * dev) * yet, the hardware updates in use for us once its on the ring buffer. */ -static int i810_freelist_put(struct drm_device * dev, drm_buf_t * buf) +static int i810_freelist_put(struct drm_device * dev, struct drm_buf * buf) { drm_i810_buf_priv_t *buf_priv = buf->dev_private; int used; @@ -109,7 +109,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma) struct drm_file *priv = filp->private_data; struct drm_device *dev; drm_i810_private_t *dev_priv; - drm_buf_t *buf; + struct drm_buf *buf; drm_i810_buf_priv_t *buf_priv; lock_kernel(); @@ -139,7 +139,7 @@ static const struct file_operations i810_buffer_fops = { .fasync = drm_fasync, }; -static int i810_map_buffer(drm_buf_t * buf, struct file *filp) +static int i810_map_buffer(struct drm_buf * buf, struct file *filp) { struct drm_file *priv = filp->private_data; struct drm_device *dev = priv->head->dev; @@ -171,7 +171,7 @@ static int i810_map_buffer(drm_buf_t * buf, struct file *filp) return retcode; } -static int i810_unmap_buffer(drm_buf_t * buf) +static int i810_unmap_buffer(struct drm_buf * buf) { drm_i810_buf_priv_t *buf_priv = buf->dev_private; int retcode = 0; @@ -194,7 +194,7 @@ static int i810_unmap_buffer(drm_buf_t * buf) static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d, struct file *filp) { - drm_buf_t *buf; + struct drm_buf *buf; drm_i810_buf_priv_t *buf_priv; int retcode = 0; @@ -252,7 +252,7 @@ static int i810_dma_cleanup(struct drm_device * dev) dev->dev_private = NULL; for (i = 0; i < dma->buf_count; i++) { - drm_buf_t *buf = dma->buflist[i]; + struct drm_buf *buf = dma->buflist[i]; drm_i810_buf_priv_t *buf_priv = buf->dev_private; if (buf_priv->kernel_virtual && buf->total) @@ -320,7 +320,7 @@ static int i810_freelist_init(struct drm_device * dev, drm_i810_private_t * dev_ } for (i = 0; i < dma->buf_count; i++) { - drm_buf_t *buf = dma->buflist[i]; + struct drm_buf *buf = dma->buflist[i]; drm_i810_buf_priv_t *buf_priv = buf->dev_private; buf_priv->in_use = hw_status++; @@ -807,7 +807,7 @@ static void i810_dma_dispatch_swap(struct drm_device * dev) } static void i810_dma_dispatch_vertex(struct drm_device * dev, - drm_buf_t * buf, int discard, int used) + struct drm_buf * buf, int discard, int used) { drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_buf_priv_t *buf_priv = buf->dev_private; @@ -971,7 +971,7 @@ static int i810_flush_queue(struct drm_device * dev) i810_wait_ring(dev, dev_priv->ring.Size - 8); for (i = 0; i < dma->buf_count; i++) { - drm_buf_t *buf = dma->buflist[i]; + struct drm_buf *buf = dma->buflist[i]; drm_i810_buf_priv_t *buf_priv = buf->dev_private; int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE, @@ -1002,7 +1002,7 @@ static void i810_reclaim_buffers(struct drm_device *dev, struct file *filp) i810_flush_queue(dev); for (i = 0; i < dma->buf_count; i++) { - drm_buf_t *buf = dma->buflist[i]; + struct drm_buf *buf = dma->buflist[i]; drm_i810_buf_priv_t *buf_priv = buf->dev_private; if (buf->filp == filp && buf_priv) { @@ -1161,7 +1161,7 @@ static int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd, return 0; } -static void i810_dma_dispatch_mc(struct drm_device * dev, drm_buf_t * buf, int used, +static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf, int used, unsigned int last_render) { drm_i810_private_t *dev_priv = dev->dev_private; diff --git a/linux-core/i810_drv.h b/linux-core/i810_drv.h index 06eac774..3627d774 100644 --- a/linux-core/i810_drv.h +++ b/linux-core/i810_drv.h @@ -88,7 +88,7 @@ typedef struct drm_i810_private { dma_addr_t dma_status_page; - drm_buf_t *mmap_buffer; + struct drm_buf *mmap_buffer; u32 front_di1, back_di1, zi1; -- cgit v1.2.3 From 191c062933bb7a6f9dabf3fd639321e1dac88c50 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 16 Jul 2007 13:45:39 +1000 Subject: drm: remove drm_ref_t --- linux-core/drmP.h | 4 ++-- linux-core/drm_bo.c | 2 +- linux-core/drm_object.c | 8 ++++---- linux-core/drm_objects.h | 10 +++++----- 4 files changed, 12 insertions(+), 12 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 9a79b0df..19e9d627 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -387,11 +387,11 @@ struct drm_buf_entry { */ #define DRM_FILE_HASH_ORDER 8 -typedef enum{ +enum drm_ref_type { _DRM_REF_USE=0, _DRM_REF_TYPE1, _DRM_NO_REF_TYPES -} drm_ref_t; +}; /** File private data */ diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index a81dfbde..681d37fe 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1217,7 +1217,7 @@ static int drm_buffer_object_unmap(struct drm_file * priv, uint32_t handle) static void drm_buffer_user_object_unmap(struct drm_file * priv, struct drm_user_object * uo, - drm_ref_t action) + enum drm_ref_type action) { struct drm_buffer_object *bo = drm_user_object_entry(uo, struct drm_buffer_object, base); diff --git a/linux-core/drm_object.c b/linux-core/drm_object.c index 00627725..3d866333 100644 --- a/linux-core/drm_object.c +++ b/linux-core/drm_object.c @@ -106,7 +106,7 @@ int drm_remove_user_object(struct drm_file * priv, struct drm_user_object * item } static int drm_object_ref_action(struct drm_file * priv, struct drm_user_object * ro, - drm_ref_t action) + enum drm_ref_type action) { int ret = 0; @@ -125,7 +125,7 @@ static int drm_object_ref_action(struct drm_file * priv, struct drm_user_object } int drm_add_ref_object(struct drm_file * priv, struct drm_user_object * referenced_object, - drm_ref_t ref_action) + enum drm_ref_type ref_action) { int ret = 0; struct drm_ref_object *item; @@ -183,7 +183,7 @@ int drm_add_ref_object(struct drm_file * priv, struct drm_user_object * referenc struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv, struct drm_user_object * referenced_object, - drm_ref_t ref_action) + enum drm_ref_type ref_action) { struct drm_hash_item *hash; int ret; @@ -219,7 +219,7 @@ void drm_remove_ref_object(struct drm_file * priv, struct drm_ref_object * item) int ret; struct drm_user_object *user_object = (struct drm_user_object *) item->hash.key; struct drm_open_hash *ht = &priv->refd_object_hash[item->unref_action]; - drm_ref_t unref_action; + enum drm_ref_type unref_action; DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); unref_action = item->unref_action; diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 441c19f2..f792dc84 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -64,9 +64,9 @@ struct drm_user_object { struct drm_file *owner; void (*ref_struct_locked) (struct drm_file * priv, struct drm_user_object * obj, - drm_ref_t ref_action); + enum drm_ref_type ref_action); void (*unref) (struct drm_file * priv, struct drm_user_object * obj, - drm_ref_t unref_action); + enum drm_ref_type unref_action); void (*remove) (struct drm_file * priv, struct drm_user_object * obj); }; @@ -81,7 +81,7 @@ struct drm_ref_object { struct drm_hash_item hash; struct list_head list; atomic_t refcount; - drm_ref_t unref_action; + enum drm_ref_type unref_action; }; /** @@ -112,7 +112,7 @@ extern int drm_remove_user_object(struct drm_file * priv, struct drm_user_object extern int drm_add_ref_object(struct drm_file * priv, struct drm_user_object * referenced_object, - drm_ref_t ref_action); + enum drm_ref_type ref_action); /* * Must be called with the struct_mutex held. @@ -120,7 +120,7 @@ extern int drm_add_ref_object(struct drm_file * priv, struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv, struct drm_user_object * referenced_object, - drm_ref_t ref_action); + enum drm_ref_type ref_action); /* * Must be called with the struct_mutex held. * If "item" has been obtained by a call to drm_lookup_ref_object. You may not -- cgit v1.2.3 From 535e3dec8c61474be55588d2b5dc87b0301435f8 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 16 Jul 2007 13:46:37 +1000 Subject: drm: remove internal sman typedef --- linux-core/drm_sman.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_sman.c b/linux-core/drm_sman.c index ece80bed..118e82ae 100644 --- a/linux-core/drm_sman.c +++ b/linux-core/drm_sman.c @@ -38,11 +38,11 @@ #include "drm_sman.h" -typedef struct drm_owner_item { +struct drm_owner_item { struct drm_hash_item owner_hash; struct list_head sman_list; struct list_head mem_blocks; -} drm_owner_item_t; +}; void drm_sman_takedown(struct drm_sman * sman) { @@ -163,16 +163,16 @@ drm_sman_set_manager(struct drm_sman * sman, unsigned int manager, } EXPORT_SYMBOL(drm_sman_set_manager); -static drm_owner_item_t *drm_sman_get_owner_item(struct drm_sman * sman, +static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman, unsigned long owner) { int ret; struct drm_hash_item *owner_hash_item; - drm_owner_item_t *owner_item; + struct drm_owner_item *owner_item; ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item); if (!ret) { - return drm_hash_entry(owner_hash_item, drm_owner_item_t, + return drm_hash_entry(owner_hash_item, struct drm_owner_item, owner_hash); } @@ -200,7 +200,7 @@ struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int man { void *tmp; struct drm_sman_mm *sman_mm; - drm_owner_item_t *owner_item; + struct drm_owner_item *owner_item; struct drm_memblock_item *memblock; BUG_ON(manager >= sman->num_managers); @@ -272,7 +272,7 @@ int drm_sman_free_key(struct drm_sman *sman, unsigned int key) EXPORT_SYMBOL(drm_sman_free_key); static void drm_sman_remove_owner(struct drm_sman *sman, - drm_owner_item_t *owner_item) + struct drm_owner_item *owner_item) { list_del(&owner_item->sman_list); drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash); @@ -283,13 +283,13 @@ int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner) { struct drm_hash_item *hash_item; - drm_owner_item_t *owner_item; + struct drm_owner_item *owner_item; if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) { return -1; } - owner_item = drm_hash_entry(hash_item, drm_owner_item_t, owner_hash); + owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash); if (owner_item->mem_blocks.next == &owner_item->mem_blocks) { drm_sman_remove_owner(sman, owner_item); return -1; @@ -301,7 +301,7 @@ int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner) EXPORT_SYMBOL(drm_sman_owner_clean); static void drm_sman_do_owner_cleanup(struct drm_sman *sman, - drm_owner_item_t *owner_item) + struct drm_owner_item *owner_item) { struct drm_memblock_item *entry, *next; @@ -316,14 +316,14 @@ void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner) { struct drm_hash_item *hash_item; - drm_owner_item_t *owner_item; + struct drm_owner_item *owner_item; if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) { return; } - owner_item = drm_hash_entry(hash_item, drm_owner_item_t, owner_hash); + owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash); drm_sman_do_owner_cleanup(sman, owner_item); } @@ -331,7 +331,7 @@ EXPORT_SYMBOL(drm_sman_owner_cleanup); void drm_sman_cleanup(struct drm_sman *sman) { - drm_owner_item_t *entry, *next; + struct drm_owner_item *entry, *next; unsigned int i; struct drm_sman_mm *sman_mm; -- cgit v1.2.3 From 0accdc1f69885c6145b6224d26ccd72002f2a72e Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 16 Jul 2007 13:50:04 +1000 Subject: drm: fixup compat wrappers --- linux-core/drm_ioc32.c | 82 +++++++++++++++++++++++++------------------------- 1 file changed, 41 insertions(+), 41 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_ioc32.c b/linux-core/drm_ioc32.c index bbab3ea2..b1162785 100644 --- a/linux-core/drm_ioc32.c +++ b/linux-core/drm_ioc32.c @@ -82,7 +82,7 @@ static int compat_drm_version(struct file *file, unsigned int cmd, unsigned long arg) { drm_version32_t v32; - drm_version_t __user *version; + struct drm_version __user *version; int err; if (copy_from_user(&v32, (void __user *)arg, sizeof(v32))) @@ -129,7 +129,7 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd, unsigned long arg) { drm_unique32_t uq32; - drm_unique_t __user *u; + struct drm_unique __user *u; int err; if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32))) @@ -159,7 +159,7 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd, unsigned long arg) { drm_unique32_t uq32; - drm_unique_t __user *u; + struct drm_unique __user *u; if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32))) return -EFAULT; @@ -179,8 +179,8 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd, typedef struct drm_map32 { u32 offset; /**< Requested physical address (0 for SAREA)*/ u32 size; /**< Requested physical size (bytes) */ - drm_map_type_t type; /**< Type of memory to map */ - drm_map_flags_t flags; /**< Flags */ + enum drm_map_type type; /**< Type of memory to map */ + enum drm_map_flags flags; /**< Flags */ u32 handle; /**< User-space: "Handle" to pass to mmap() */ int mtrr; /**< MTRR slot used */ } drm_map32_t; @@ -190,7 +190,7 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd, { drm_map32_t __user *argp = (void __user *)arg; drm_map32_t m32; - drm_map_t __user *map; + struct drm_map __user *map; int idx, err; void *handle; @@ -228,7 +228,7 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd, { drm_map32_t __user *argp = (void __user *)arg; drm_map32_t m32; - drm_map_t __user *map; + struct drm_map __user *map; int err; void *handle; @@ -270,7 +270,7 @@ static int compat_drm_rmmap(struct file *file, unsigned int cmd, unsigned long arg) { drm_map32_t __user *argp = (void __user *)arg; - drm_map_t __user *map; + struct drm_map __user *map; u32 handle; if (get_user(handle, &argp->handle)) @@ -300,7 +300,7 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd, { drm_client32_t c32; drm_client32_t __user *argp = (void __user *)arg; - drm_client_t __user *client; + struct drm_client __user *client; int idx, err; if (get_user(idx, &argp->idx)) @@ -333,7 +333,7 @@ typedef struct drm_stats32 { u32 count; struct { u32 value; - drm_stat_type_t type; + enum drm_stat_type type; } data[15]; } drm_stats32_t; @@ -342,7 +342,7 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd, { drm_stats32_t s32; drm_stats32_t __user *argp = (void __user *)arg; - drm_stats_t __user *stats; + struct drm_stats __user *stats; int i, err; stats = compat_alloc_user_space(sizeof(*stats)); @@ -379,7 +379,7 @@ static int compat_drm_addbufs(struct file *file, unsigned int cmd, unsigned long arg) { drm_buf_desc32_t __user *argp = (void __user *)arg; - drm_buf_desc_t __user *buf; + struct drm_buf_desc __user *buf; int err; unsigned long agp_start; @@ -411,7 +411,7 @@ static int compat_drm_markbufs(struct file *file, unsigned int cmd, { drm_buf_desc32_t b32; drm_buf_desc32_t __user *argp = (void __user *)arg; - drm_buf_desc_t __user *buf; + struct drm_buf_desc __user *buf; if (copy_from_user(&b32, argp, sizeof(b32))) return -EFAULT; @@ -440,8 +440,8 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd, drm_buf_info32_t req32; drm_buf_info32_t __user *argp = (void __user *)arg; drm_buf_desc32_t __user *to; - drm_buf_info_t __user *request; - drm_buf_desc_t __user *list; + struct drm_buf_info __user *request; + struct drm_buf_desc __user *list; size_t nbytes; int i, err; int count, actual; @@ -457,11 +457,11 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd, && !access_ok(VERIFY_WRITE, to, count * sizeof(drm_buf_desc32_t))) return -EFAULT; - nbytes = sizeof(*request) + count * sizeof(drm_buf_desc_t); + nbytes = sizeof(*request) + count * sizeof(struct drm_buf_desc); request = compat_alloc_user_space(nbytes); if (!access_ok(VERIFY_WRITE, request, nbytes)) return -EFAULT; - list = (drm_buf_desc_t *) (request + 1); + list = (struct drm_buf_desc *) (request + 1); if (__put_user(count, &request->count) || __put_user(list, &request->list)) @@ -477,7 +477,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd, if (count >= actual) for (i = 0; i < actual; ++i) if (__copy_in_user(&to[i], &list[i], - offsetof(drm_buf_desc_t, flags))) + offsetof(struct drm_buf_desc, flags))) return -EFAULT; if (__put_user(actual, &argp->count)) @@ -505,8 +505,8 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd, drm_buf_map32_t __user *argp = (void __user *)arg; drm_buf_map32_t req32; drm_buf_pub32_t __user *list32; - drm_buf_map_t __user *request; - drm_buf_pub_t __user *list; + struct drm_buf_map __user *request; + struct drm_buf_pub __user *list; int i, err; int count, actual; size_t nbytes; @@ -519,11 +519,11 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd, if (count < 0) return -EINVAL; - nbytes = sizeof(*request) + count * sizeof(drm_buf_pub_t); + nbytes = sizeof(*request) + count * sizeof(struct drm_buf_pub); request = compat_alloc_user_space(nbytes); if (!access_ok(VERIFY_WRITE, request, nbytes)) return -EFAULT; - list = (drm_buf_pub_t *) (request + 1); + list = (struct drm_buf_pub *) (request + 1); if (__put_user(count, &request->count) || __put_user(list, &request->list)) @@ -539,7 +539,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd, if (count >= actual) for (i = 0; i < actual; ++i) if (__copy_in_user(&list32[i], &list[i], - offsetof(drm_buf_pub_t, address)) + offsetof(struct drm_buf_pub, address)) || __get_user(addr, &list[i].address) || __put_user((unsigned long)addr, &list32[i].address)) @@ -562,7 +562,7 @@ static int compat_drm_freebufs(struct file *file, unsigned int cmd, unsigned long arg) { drm_buf_free32_t req32; - drm_buf_free_t __user *request; + struct drm_buf_free __user *request; drm_buf_free32_t __user *argp = (void __user *)arg; if (copy_from_user(&req32, argp, sizeof(req32))) @@ -589,7 +589,7 @@ static int compat_drm_setsareactx(struct file *file, unsigned int cmd, unsigned long arg) { drm_ctx_priv_map32_t req32; - drm_ctx_priv_map_t __user *request; + struct drm_ctx_priv_map __user *request; drm_ctx_priv_map32_t __user *argp = (void __user *)arg; if (copy_from_user(&req32, argp, sizeof(req32))) @@ -610,7 +610,7 @@ static int compat_drm_setsareactx(struct file *file, unsigned int cmd, static int compat_drm_getsareactx(struct file *file, unsigned int cmd, unsigned long arg) { - drm_ctx_priv_map_t __user *request; + struct drm_ctx_priv_map __user *request; drm_ctx_priv_map32_t __user *argp = (void __user *)arg; int err; unsigned int ctx_id; @@ -648,7 +648,7 @@ static int compat_drm_resctx(struct file *file, unsigned int cmd, { drm_ctx_res32_t __user *argp = (void __user *)arg; drm_ctx_res32_t res32; - drm_ctx_res_t __user *res; + struct drm_ctx_res __user *res; int err; if (copy_from_user(&res32, argp, sizeof(res32))) @@ -658,7 +658,7 @@ static int compat_drm_resctx(struct file *file, unsigned int cmd, if (!access_ok(VERIFY_WRITE, res, sizeof(*res))) return -EFAULT; if (__put_user(res32.count, &res->count) - || __put_user((drm_ctx_t __user *)(unsigned long)res32.contexts, + || __put_user((struct drm_ctx __user *) (unsigned long)res32.contexts, &res->contexts)) return -EFAULT; @@ -679,7 +679,7 @@ typedef struct drm_dma32 { int send_count; /**< Number of buffers to send */ u32 send_indices; /**< List of handles to buffers */ u32 send_sizes; /**< Lengths of data to send */ - drm_dma_flags_t flags; /**< Flags */ + enum drm_dma_flags flags; /**< Flags */ int request_count; /**< Number of buffers requested */ int request_size; /**< Desired size for buffers */ u32 request_indices; /**< Buffer information */ @@ -692,7 +692,7 @@ static int compat_drm_dma(struct file *file, unsigned int cmd, { drm_dma32_t d32; drm_dma32_t __user *argp = (void __user *)arg; - drm_dma_t __user *d; + struct drm_dma __user *d; int err; if (copy_from_user(&d32, argp, sizeof(d32))) @@ -740,7 +740,7 @@ static int compat_drm_agp_enable(struct file *file, unsigned int cmd, { drm_agp_mode32_t __user *argp = (void __user *)arg; drm_agp_mode32_t m32; - drm_agp_mode_t __user *mode; + struct drm_agp_mode __user *mode; if (get_user(m32.mode, &argp->mode)) return -EFAULT; @@ -772,7 +772,7 @@ static int compat_drm_agp_info(struct file *file, unsigned int cmd, { drm_agp_info32_t __user *argp = (void __user *)arg; drm_agp_info32_t i32; - drm_agp_info_t __user *info; + struct drm_agp_info __user *info; int err; info = compat_alloc_user_space(sizeof(*info)); @@ -813,7 +813,7 @@ static int compat_drm_agp_alloc(struct file *file, unsigned int cmd, { drm_agp_buffer32_t __user *argp = (void __user *)arg; drm_agp_buffer32_t req32; - drm_agp_buffer_t __user *request; + struct drm_agp_buffer __user *request; int err; if (copy_from_user(&req32, argp, sizeof(req32))) @@ -845,7 +845,7 @@ static int compat_drm_agp_free(struct file *file, unsigned int cmd, unsigned long arg) { drm_agp_buffer32_t __user *argp = (void __user *)arg; - drm_agp_buffer_t __user *request; + struct drm_agp_buffer __user *request; u32 handle; request = compat_alloc_user_space(sizeof(*request)); @@ -868,7 +868,7 @@ static int compat_drm_agp_bind(struct file *file, unsigned int cmd, { drm_agp_binding32_t __user *argp = (void __user *)arg; drm_agp_binding32_t req32; - drm_agp_binding_t __user *request; + struct drm_agp_binding __user *request; if (copy_from_user(&req32, argp, sizeof(req32))) return -EFAULT; @@ -887,7 +887,7 @@ static int compat_drm_agp_unbind(struct file *file, unsigned int cmd, unsigned long arg) { drm_agp_binding32_t __user *argp = (void __user *)arg; - drm_agp_binding_t __user *request; + struct drm_agp_binding __user *request; u32 handle; request = compat_alloc_user_space(sizeof(*request)); @@ -910,7 +910,7 @@ static int compat_drm_sg_alloc(struct file *file, unsigned int cmd, unsigned long arg) { drm_scatter_gather32_t __user *argp = (void __user *)arg; - drm_scatter_gather_t __user *request; + struct drm_scatter_gather __user *request; int err; unsigned long x; @@ -938,7 +938,7 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd, unsigned long arg) { drm_scatter_gather32_t __user *argp = (void __user *)arg; - drm_scatter_gather_t __user *request; + struct drm_scatter_gather __user *request; unsigned long x; request = compat_alloc_user_space(sizeof(*request)); @@ -953,13 +953,13 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd, } struct drm_wait_vblank_request32 { - drm_vblank_seq_type_t type; + enum drm_vblank_seq_type type; unsigned int sequence; u32 signal; }; struct drm_wait_vblank_reply32 { - drm_vblank_seq_type_t type; + enum drm_vblank_seq_type type; unsigned int sequence; s32 tval_sec; s32 tval_usec; @@ -975,7 +975,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd, { drm_wait_vblank32_t __user *argp = (void __user *)arg; drm_wait_vblank32_t req32; - drm_wait_vblank_t __user *request; + union drm_wait_vblank __user *request; int err; if (copy_from_user(&req32, argp, sizeof(req32))) -- cgit v1.2.3 From 23631fca09a9769d2391ebdec1f186cf33bf984e Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 16 Jul 2007 13:52:21 +1000 Subject: drm: fixup old kernel compat code --- linux-core/drm_compat.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 38ca497f..9a6da7e9 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -204,8 +204,8 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma, struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; unsigned long page_offset; struct page *page = NULL; - drm_ttm_t *ttm; - drm_device_t *dev; + struct drm_ttm *ttm; + struct drm_device *dev; unsigned long pfn; int err; unsigned long bus_base; @@ -262,7 +262,7 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma, page_offset = (address - vma->vm_start) >> PAGE_SHIFT; if (bus_size) { - drm_mem_type_manager_t *man = &dev->bm.man[bo->mem.mem_type]; + struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type]; pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset; vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma); @@ -354,8 +354,8 @@ struct page *drm_bo_vm_nopage(struct vm_area_struct *vma, struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; unsigned long page_offset; struct page *page; - drm_ttm_t *ttm; - drm_device_t *dev; + struct drm_ttm *ttm; + struct drm_device *dev; mutex_lock(&bo->mutex); @@ -406,7 +406,7 @@ int drm_bo_map_bound(struct vm_area_struct *vma) BUG_ON(ret); if (bus_size) { - drm_mem_type_manager_t *man = &bo->dev->bm.man[bo->mem.mem_type]; + struct drm_mem_type_manager *man = &bo->dev->bm.man[bo->mem.mem_type]; unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT; pgprot_t pgprot = drm_io_prot(man->drm_bus_maptype, vma); ret = io_remap_pfn_range(vma, vma->vm_start, pfn, -- cgit v1.2.3 From ec67c2def9af16bf9252d6742aec815b817f135a Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Sun, 15 Jul 2007 17:18:15 +1000 Subject: nouveau: G8x PCIEGART Actually a NV04-NV50 ttm backend for both PCI and PCIEGART, but PCIGART support for G8X using the current mm has been hacked on top of it. --- linux-core/Makefile.kernel | 1 + linux-core/nouveau_sgdma.c | 318 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 319 insertions(+) create mode 100644 linux-core/nouveau_sgdma.c (limited to 'linux-core') diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index be2641c8..5aa589cd 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -22,6 +22,7 @@ i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \ i915_buffer.o nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ nouveau_object.o nouveau_irq.o nouveau_notifier.o \ + nouveau_sgdma.o \ nv04_timer.o \ nv04_mc.o nv40_mc.o nv50_mc.o \ nv04_fb.o nv10_fb.o nv40_fb.o \ diff --git a/linux-core/nouveau_sgdma.c b/linux-core/nouveau_sgdma.c new file mode 100644 index 00000000..a65317cd --- /dev/null +++ b/linux-core/nouveau_sgdma.c @@ -0,0 +1,318 @@ +#include "drmP.h" +#include "nouveau_drv.h" + +#define NV_CTXDMA_PAGE_SHIFT 12 +#define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT) +#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1) + +struct nouveau_sgdma_be { + struct drm_ttm_backend backend; + struct drm_device *dev; + + int pages; + int pages_populated; + dma_addr_t *pagelist; + int is_bound; + + unsigned int pte_start; +}; + +static int +nouveau_sgdma_needs_ub_cache_adjust(struct drm_ttm_backend *be) +{ + return ((be->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1); +} + +static int +nouveau_sgdma_populate(struct drm_ttm_backend *be, unsigned long num_pages, + struct page **pages) +{ + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + int p, d, o; + + DRM_DEBUG("num_pages = %ld\n", num_pages); + + if (nvbe->pagelist) + return DRM_ERR(EINVAL); + nvbe->pages = (num_pages << PAGE_SHIFT) >> NV_CTXDMA_PAGE_SHIFT; + nvbe->pagelist = drm_alloc(nvbe->pages*sizeof(dma_addr_t), + DRM_MEM_PAGES); + + nvbe->pages_populated = d = 0; + for (p = 0; p < num_pages; p++) { + for (o = 0; o < PAGE_SIZE; o += NV_CTXDMA_PAGE_SIZE) { + nvbe->pagelist[d] = pci_map_page(nvbe->dev->pdev, + pages[p], o, + NV_CTXDMA_PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(nvbe->pagelist[d])) { + be->func->clear(be); + DRM_ERROR("pci_map_page failed\n"); + return DRM_ERR(EINVAL); + } + nvbe->pages_populated = ++d; + } + } + + return 0; +} + +static void +nouveau_sgdma_clear(struct drm_ttm_backend *be) +{ + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + int d; + + DRM_DEBUG("\n"); + + if (nvbe && nvbe->pagelist) { + if (nvbe->is_bound) + be->func->unbind(be); + + for (d = 0; d < nvbe->pages_populated; d--) { + pci_unmap_page(nvbe->dev->pdev, nvbe->pagelist[d], + NV_CTXDMA_PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + } + drm_free(nvbe->pagelist, nvbe->pages*sizeof(dma_addr_t), + DRM_MEM_PAGES); + } +} + +static int +nouveau_sgdma_bind(struct drm_ttm_backend *be, unsigned long pg_start, + int cached) +{ + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; + struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; + uint64_t offset = (pg_start << PAGE_SHIFT); + uint32_t i; + + DRM_DEBUG("pg=0x%lx (0x%llx), cached=%d\n", pg_start, offset, cached); + + if (offset & NV_CTXDMA_PAGE_MASK) + return DRM_ERR(EINVAL); + nvbe->pte_start = (offset >> NV_CTXDMA_PAGE_SHIFT); + if (dev_priv->card_type < NV_50) + nvbe->pte_start += 2; /* skip ctxdma header */ + + for (i = nvbe->pte_start; i < nvbe->pte_start + nvbe->pages; i++) { + uint64_t pteval = nvbe->pagelist[i - nvbe->pte_start]; + + if (pteval & NV_CTXDMA_PAGE_MASK) { + DRM_ERROR("Bad pteval 0x%llx\n", pteval); + return DRM_ERR(EINVAL); + } + + if (dev_priv->card_type < NV_50) { + INSTANCE_WR(gpuobj, i, pteval | 3); + } else { + INSTANCE_WR(gpuobj, (i<<1)+0, pteval | 0x21); + INSTANCE_WR(gpuobj, (i<<1)+1, 0x00000000); + } + } + + nvbe->is_bound = 1; + return 0; +} + +static int +nouveau_sgdma_unbind(struct drm_ttm_backend *be) +{ + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; + + DRM_DEBUG("\n"); + + if (nvbe->is_bound) { + struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; + unsigned int pte; + + pte = nvbe->pte_start; + while (pte < (nvbe->pte_start + nvbe->pages)) { + uint64_t pteval = dev_priv->gart_info.sg_dummy_bus; + + if (dev_priv->card_type < NV_50) { + INSTANCE_WR(gpuobj, pte, pteval | 3); + } else { + INSTANCE_WR(gpuobj, (pte<<1)+0, 0x00000010); + INSTANCE_WR(gpuobj, (pte<<1)+1, 0x00000004); + } + + pte++; + } + + nvbe->is_bound = 0; + } + + return 0; +} + +static void +nouveau_sgdma_destroy(struct drm_ttm_backend *be) +{ + DRM_DEBUG("\n"); + if (be) { + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + if (nvbe) { + if (nvbe->pagelist) + be->func->clear(be); + drm_ctl_free(nvbe, sizeof(*nvbe), DRM_MEM_TTM); + } + } +} + +static struct drm_ttm_backend_func nouveau_sgdma_backend = { + .needs_ub_cache_adjust = nouveau_sgdma_needs_ub_cache_adjust, + .populate = nouveau_sgdma_populate, + .clear = nouveau_sgdma_clear, + .bind = nouveau_sgdma_bind, + .unbind = nouveau_sgdma_unbind, + .destroy = nouveau_sgdma_destroy +}; + +struct drm_ttm_backend * +nouveau_sgdma_init_ttm(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_sgdma_be *nvbe; + + if (!dev_priv->gart_info.sg_ctxdma) + return NULL; + + nvbe = drm_ctl_calloc(1, sizeof(*nvbe), DRM_MEM_TTM); + if (!nvbe) + return NULL; + + nvbe->dev = dev; + + nvbe->backend.func = &nouveau_sgdma_backend; + nvbe->backend.mem_type = DRM_BO_MEM_TT; + + return &nvbe->backend; +} + +int +nouveau_sgdma_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *gpuobj = NULL; + uint32_t aper_size, obj_size; + int i, ret; + + if (dev_priv->card_type < NV_50) { + aper_size = (64 * 1024 * 1024); + obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4; + obj_size += 8; /* ctxdma header */ + } else { + /* 1 entire VM page table */ + aper_size = (512 * 1024 * 1024); + obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8; + } + + if ((ret = nouveau_gpuobj_new(dev, -1, obj_size, 16, + NVOBJ_FLAG_ALLOW_NO_REFS | + NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_ZERO_FREE, &gpuobj))) { + DRM_ERROR("Error creating sgdma object: %d\n", ret); + return ret; + } + + if (dev_priv->card_type < NV_50) { + dev_priv->gart_info.sg_dummy_page = + alloc_page(GFP_KERNEL|__GFP_DMA32); + SetPageLocked(dev_priv->gart_info.sg_dummy_page); + dev_priv->gart_info.sg_dummy_bus = + pci_map_page(dev->pdev, + dev_priv->gart_info.sg_dummy_page, 0, + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + + /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and + * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE + * on those cards? */ + INSTANCE_WR(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY | + (1 << 12) /* PT present */ | + (0 << 13) /* PT *not* linear */ | + (NV_DMA_ACCESS_RW << 14) | + (NV_DMA_TARGET_PCI << 16)); + INSTANCE_WR(gpuobj, 1, aper_size - 1); + for (i=2; i<2+(aper_size>>12); i++) { + INSTANCE_WR(gpuobj, i, + dev_priv->gart_info.sg_dummy_bus | 3); + } + } else { + for (i=0; igart_info.type = NOUVEAU_GART_SGDMA; + dev_priv->gart_info.aper_base = 0; + dev_priv->gart_info.aper_size = aper_size; + dev_priv->gart_info.sg_ctxdma = gpuobj; + return 0; +} + +void +nouveau_sgdma_takedown(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->gart_info.sg_dummy_page) { + pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus, + NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + unlock_page(dev_priv->gart_info.sg_dummy_page); + __free_page(dev_priv->gart_info.sg_dummy_page); + dev_priv->gart_info.sg_dummy_page = NULL; + dev_priv->gart_info.sg_dummy_bus = 0; + } + + nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma); +} + +int +nouveau_sgdma_nottm_hack_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_ttm_backend *be; + struct drm_scatter_gather sgreq; + int ret; + + dev_priv->gart_info.sg_be = nouveau_sgdma_init_ttm(dev); + if (!dev_priv->gart_info.sg_be) + return DRM_ERR(ENOMEM); + be = dev_priv->gart_info.sg_be; + + /* Hack the aperture size down to the amount of system memory + * we're going to bind into it. + */ + if (dev_priv->gart_info.aper_size > 32*1024*1024) + dev_priv->gart_info.aper_size = 32*1024*1024; + + sgreq.size = dev_priv->gart_info.aper_size; + if ((ret = drm_sg_alloc(dev, &sgreq))) { + DRM_ERROR("drm_sg_alloc failed: %d\n", ret); + return ret; + } + dev_priv->gart_info.sg_handle = sgreq.handle; + + if ((ret = be->func->populate(be, dev->sg->pages, dev->sg->pagelist))) { + DRM_ERROR("failed populate: %d\n", ret); + return ret; + } + + if ((ret = be->func->bind(be, 0, 0))) { + DRM_ERROR("failed bind: %d\n", ret); + return ret; + } + + return 0; +} + +void +nouveau_sgdma_nottm_hack_takedown(struct drm_device *dev) +{ +} + -- cgit v1.2.3 From 4575d5b8f18fef8cd19e7884bf8dab5e8f71ec9e Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 16 Jul 2007 20:56:11 -0700 Subject: Massive log message clean up in xgi_submit_cmdlist. --- linux-core/xgi_cmdlist.c | 99 +++++++----------------------------------------- 1 file changed, 14 insertions(+), 85 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 7be0ac48..61373469 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -64,131 +64,61 @@ int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo) { - unsigned int beginPort; - /** XGI_INFO("Jong-xgi_submit_cmdlist-Begin \n"); **/ + const unsigned int beginPort = getCurBatchBeginPort(pCmdInfo); - /* Jong 05/25/2006 */ - /* return; */ - - beginPort = getCurBatchBeginPort(pCmdInfo); - XGI_INFO("Jong-xgi_submit_cmdlist-After getCurBatchBeginPort() \n"); - - /* Jong 05/25/2006 */ - /* return; */ + XGI_INFO("After getCurBatchBeginPort()\n"); if (s_cmdring._lastBatchStartAddr == 0) { - unsigned int portOffset; + const unsigned int portOffset = BASE_3D_ENG + beginPort; /* Jong 06/13/2006; remove marked for system hang test */ /* xgi_waitfor_pci_idle(info); */ - /* Jong 06132006; BASE_3D_ENG=0x2800 */ - /* beginPort: 2D: 0x30 */ - portOffset = BASE_3D_ENG + beginPort; - // Enable PCI Trigger Mode - XGI_INFO("Jong-xgi_submit_cmdlist-Enable PCI Trigger Mode \n"); - - /* Jong 05/25/2006 */ - /* return; */ - - /* Jong 06/13/2006; M2REG_AUTO_LINK_SETTING_ADDRESS=0x10 */ - XGI_INFO("Jong-M2REG_AUTO_LINK_SETTING_ADDRESS=0x%lx \n", - M2REG_AUTO_LINK_SETTING_ADDRESS); - XGI_INFO("Jong-M2REG_CLEAR_COUNTERS_MASK=0x%lx \n", - M2REG_CLEAR_COUNTERS_MASK); - XGI_INFO - ("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)=0x%lx \n", - (M2REG_AUTO_LINK_SETTING_ADDRESS << 22)); - XGI_INFO("Jong-M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n\n", - M2REG_PCI_TRIGGER_MODE_MASK); + XGI_INFO("Enable PCI Trigger Mode \n"); + /* Jong 06/14/2006; 0x400001a */ - XGI_INFO - ("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|M2REG_CLEAR_COUNTERS_MASK|0x08|M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n", - (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | - M2REG_CLEAR_COUNTERS_MASK | 0x08 | - M2REG_PCI_TRIGGER_MODE_MASK); dwWriteReg(BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | M2REG_CLEAR_COUNTERS_MASK | 0x08 | M2REG_PCI_TRIGGER_MODE_MASK); - /* Jong 05/25/2006 */ - XGI_INFO("Jong-xgi_submit_cmdlist-After dwWriteReg() \n"); - /* return; *//* OK */ - /* Jong 06/14/2006; 0x400000a */ - XGI_INFO - ("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|0x08|M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n", - (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | 0x08 | - M2REG_PCI_TRIGGER_MODE_MASK); dwWriteReg(BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | 0x08 | M2REG_PCI_TRIGGER_MODE_MASK); // Send PCI begin command - XGI_INFO("Jong-xgi_submit_cmdlist-Send PCI begin command \n"); - /* return; */ + XGI_INFO("Send PCI begin command \n"); - XGI_INFO("Jong-xgi_submit_cmdlist-portOffset=%d \n", - portOffset); - XGI_INFO("Jong-xgi_submit_cmdlist-beginPort=%d \n", beginPort); + XGI_INFO("portOffset=%d, beginPort=%d\n", + portOffset, beginPort); /* beginPort = 48; */ /* 0xc100000 */ dwWriteReg(portOffset, (beginPort << 22) + (BEGIN_VALID_MASK) + pCmdInfo->_curDebugID); - XGI_INFO("Jong-(beginPort<<22)=0x%lx \n", (beginPort << 22)); - XGI_INFO("Jong-(BEGIN_VALID_MASK)=0x%lx \n", BEGIN_VALID_MASK); - XGI_INFO("Jong- pCmdInfo->_curDebugID=0x%lx \n", - pCmdInfo->_curDebugID); - XGI_INFO - ("Jong- (beginPort<<22) + (BEGIN_VALID_MASK) + pCmdInfo->_curDebugID=0x%lx \n", - (beginPort << 22) + (BEGIN_VALID_MASK) + - pCmdInfo->_curDebugID); - XGI_INFO - ("Jong-xgi_submit_cmdlist-Send PCI begin command- After \n"); - /* return; *//* OK */ + + XGI_INFO("Send PCI begin command- After\n"); /* 0x80000024 */ dwWriteReg(portOffset + 4, BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize); - XGI_INFO("Jong- BEGIN_LINK_ENABLE_MASK=0x%lx \n", - BEGIN_LINK_ENABLE_MASK); - XGI_INFO("Jong- pCmdInfo->_firstSize=0x%lx \n", - pCmdInfo->_firstSize); - XGI_INFO - ("Jong- BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize=0x%lx \n", - BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize); - XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-1 \n"); /* 0x1010000 */ dwWriteReg(portOffset + 8, (pCmdInfo->_firstBeginAddr >> 4)); - XGI_INFO("Jong- pCmdInfo->_firstBeginAddr=0x%lx \n", - pCmdInfo->_firstBeginAddr); - XGI_INFO("Jong- (pCmdInfo->_firstBeginAddr >> 4)=0x%lx \n", - (pCmdInfo->_firstBeginAddr >> 4)); - XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-2 \n"); - - /* Jong 06/13/2006 */ - xgi_dump_register(info); /* Jong 06/12/2006; system hang; marked for test */ dwWriteReg(portOffset + 12, 0); - XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-3 \n"); /* Jong 06/13/2006; remove marked for system hang test */ /* xgi_waitfor_pci_idle(info); */ } else { u32 *lastBatchVirtAddr; - XGI_INFO - ("Jong-xgi_submit_cmdlist-s_cmdring._lastBatchStartAddr != 0 \n"); - - /* Jong 05/25/2006 */ - /* return; */ + XGI_INFO("s_cmdring._lastBatchStartAddr != 0\n"); if (pCmdInfo->_firstBeginType == BTYPE_3D) { addFlush2D(info); @@ -215,14 +145,13 @@ void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo) /* Jong 06/12/2006; system hang; marked for test */ triggerHWCommandList(info, pCmdInfo->_beginCount); + } else { + XGI_ERROR("lastBatchVirtAddr is NULL\n"); } - - XGI_INFO - ("Jong-xgi_submit_cmdlist-s_cmdring._lastBatchStartAddr != 0 - End\n"); } s_cmdring._lastBatchStartAddr = pCmdInfo->_lastBeginAddr; - XGI_INFO("Jong-xgi_submit_cmdlist-End \n"); + XGI_INFO("End\n"); } /* -- cgit v1.2.3 From 658ff2daf3d2a080da2d859f522a627aef841637 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 16 Jul 2007 20:58:43 -0700 Subject: Eliminate several useless ioctls and associated cruft. The ioctlss XGI_ESC_DEVICE_INFO, XGI_ESC_MEM_COLLECT, XGI_ESC_PCIE_CHECK, XGI_ESC_GET_SCREEN_INFO, XGI_ESC_PUT_SCREEN_INFO, XGI_ESC_MMIO_INFO, and XGI_ESC_SAREA_INFO, are completely unnecessary. The will be doubly useless when the driver is converted to the DRM infrastructure. --- linux-core/xgi_drv.c | 28 ---------------------- linux-core/xgi_drv.h | 3 --- linux-core/xgi_misc.c | 66 --------------------------------------------------- linux-core/xgi_misc.h | 5 ---- linux-core/xgi_pcie.c | 29 ---------------------- 5 files changed, 131 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index b3425c75..bd39dfdc 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -877,10 +877,6 @@ int xgi_kern_ioctl(struct inode *inode, struct file *filp, arg_size); switch (_IOC_NR(cmd)) { - case XGI_ESC_DEVICE_INFO: - XGI_INFO("Jong-xgi_ioctl_get_device_info \n"); - xgi_get_device_info(info, (struct xgi_chip_info *)arg_copy); - break; case XGI_ESC_POST_VBIOS: XGI_INFO("Jong-xgi_ioctl_post_vbios \n"); break; @@ -892,10 +888,6 @@ int xgi_kern_ioctl(struct inode *inode, struct file *filp, XGI_INFO("Jong-xgi_ioctl_fb_free \n"); xgi_fb_free(info, *(unsigned long *)arg_copy); break; - case XGI_ESC_MEM_COLLECT: - XGI_INFO("Jong-xgi_ioctl_mem_collect \n"); - xgi_mem_collect(info, (unsigned int *)arg_copy); - break; case XGI_ESC_PCIE_ALLOC: XGI_INFO("Jong-xgi_ioctl_pcie_alloc \n"); xgi_pcie_alloc(info, alloc, 0); @@ -905,30 +897,10 @@ int xgi_kern_ioctl(struct inode *inode, struct file *filp, *((unsigned long *)arg_copy)); xgi_pcie_free(info, *((unsigned long *)arg_copy)); break; - case XGI_ESC_PCIE_CHECK: - XGI_INFO("Jong-xgi_pcie_heap_check \n"); - xgi_pcie_heap_check(); - break; - case XGI_ESC_GET_SCREEN_INFO: - XGI_INFO("Jong-xgi_get_screen_info \n"); - xgi_get_screen_info(info, (struct xgi_screen_info *)arg_copy); - break; - case XGI_ESC_PUT_SCREEN_INFO: - XGI_INFO("Jong-xgi_put_screen_info \n"); - xgi_put_screen_info(info, (struct xgi_screen_info *)arg_copy); - break; - case XGI_ESC_MMIO_INFO: - XGI_INFO("Jong-xgi_ioctl_get_mmio_info \n"); - xgi_get_mmio_info(info, (struct xgi_mmio_info *)arg_copy); - break; case XGI_ESC_GE_RESET: XGI_INFO("Jong-xgi_ioctl_ge_reset \n"); xgi_ge_reset(info); break; - case XGI_ESC_SAREA_INFO: - XGI_INFO("Jong-xgi_ioctl_sarea_info \n"); - xgi_sarea_info(info, (struct xgi_sarea_info *)arg_copy); - break; case XGI_ESC_DUMP_REGISTER: XGI_INFO("Jong-xgi_ioctl_dump_register \n"); xgi_dump_register(info); diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 983ed0a9..382bb7a6 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -110,8 +110,6 @@ struct xgi_info { struct xgi_aperture mmio; struct xgi_aperture fb; struct xgi_aperture pcie; - struct xgi_screen_info scrn_info; - struct xgi_sarea_info sarea_info; /* look up table parameters */ u32 *lut_base; @@ -207,7 +205,6 @@ extern void xgi_pcie_heap_cleanup(struct xgi_info * info); extern void xgi_pcie_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, pid_t pid); extern void xgi_pcie_free(struct xgi_info * info, unsigned long offset); -extern void xgi_pcie_heap_check(void); extern struct xgi_pcie_block *xgi_find_pcie_block(struct xgi_info * info, unsigned long address); extern void *xgi_find_pcie_virt(struct xgi_info * info, unsigned long address); diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 2d310a2f..bb2813ca 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -31,78 +31,12 @@ #include "xgi_regs.h" #include "xgi_pcie.h" -void xgi_get_device_info(struct xgi_info * info, struct xgi_chip_info * req) -{ - req->device_id = info->dev->device; - req->device_name[0] = 'x'; - req->device_name[1] = 'g'; - req->device_name[2] = '4'; - req->device_name[3] = '7'; - req->vendor_id = info->dev->vendor; - req->curr_display_mode = 0; - req->fb_size = info->fb.size; - req->sarea_bus_addr = info->sarea_info.bus_addr; - req->sarea_size = info->sarea_info.size; -} - -void xgi_get_mmio_info(struct xgi_info * info, struct xgi_mmio_info * req) -{ - req->mmio_base = info->mmio.base; - req->size = info->mmio.size; -} - -void xgi_put_screen_info(struct xgi_info * info, struct xgi_screen_info * req) -{ - info->scrn_info.scrn_start = req->scrn_start; - info->scrn_info.scrn_xres = req->scrn_xres; - info->scrn_info.scrn_yres = req->scrn_yres; - info->scrn_info.scrn_bpp = req->scrn_bpp; - info->scrn_info.scrn_pitch = req->scrn_pitch; - - XGI_INFO("info->scrn_info.scrn_start: 0x%lx" - "info->scrn_info.scrn_xres: 0x%lx" - "info->scrn_info.scrn_yres: 0x%lx" - "info->scrn_info.scrn_bpp: 0x%lx" - "info->scrn_info.scrn_pitch: 0x%lx\n", - info->scrn_info.scrn_start, - info->scrn_info.scrn_xres, - info->scrn_info.scrn_yres, - info->scrn_info.scrn_bpp, info->scrn_info.scrn_pitch); -} - -void xgi_get_screen_info(struct xgi_info * info, struct xgi_screen_info * req) -{ - req->scrn_start = info->scrn_info.scrn_start; - req->scrn_xres = info->scrn_info.scrn_xres; - req->scrn_yres = info->scrn_info.scrn_yres; - req->scrn_bpp = info->scrn_info.scrn_bpp; - req->scrn_pitch = info->scrn_info.scrn_pitch; - - XGI_INFO("req->scrn_start: 0x%lx" - "req->scrn_xres: 0x%lx" - "req->scrn_yres: 0x%lx" - "req->scrn_bpp: 0x%lx" - "req->scrn_pitch: 0x%lx\n", - req->scrn_start, - req->scrn_xres, - req->scrn_yres, req->scrn_bpp, req->scrn_pitch); -} - void xgi_ge_reset(struct xgi_info * info) { xgi_disable_ge(info); xgi_enable_ge(info); } -void xgi_sarea_info(struct xgi_info * info, struct xgi_sarea_info * req) -{ - info->sarea_info.bus_addr = req->bus_addr; - info->sarea_info.size = req->size; - XGI_INFO("info->sarea_info.bus_addr: 0x%lx" - "info->sarea_info.size: 0x%lx\n", - info->sarea_info.bus_addr, info->sarea_info.size); -} - /* * irq functions */ diff --git a/linux-core/xgi_misc.h b/linux-core/xgi_misc.h index 85cfbf2b..9c0591b2 100644 --- a/linux-core/xgi_misc.h +++ b/linux-core/xgi_misc.h @@ -30,12 +30,7 @@ #define _XGI_MISC_H_ extern void xgi_dump_register(struct xgi_info * info); -extern void xgi_get_device_info(struct xgi_info * info, struct xgi_chip_info * req); -extern void xgi_get_mmio_info(struct xgi_info * info, struct xgi_mmio_info * req); -extern void xgi_get_screen_info(struct xgi_info * info, struct xgi_screen_info * req); -extern void xgi_put_screen_info(struct xgi_info * info, struct xgi_screen_info * req); extern void xgi_ge_reset(struct xgi_info * info); -extern void xgi_sarea_info(struct xgi_info * info, struct xgi_sarea_info * req); extern void xgi_restore_registers(struct xgi_info * info); extern bool xgi_ge_irq_handler(struct xgi_info * info); diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 70459b2c..0d641ab8 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -344,35 +344,6 @@ int xgi_pcie_heap_init(struct xgi_info * info) return 0; } -void xgi_pcie_heap_check(void) -{ -#ifdef XGI_DEBUG - struct xgi_pcie_block *block; - unsigned int ownerIndex; - static const char *const ownerStr[6] = - { "2D", "3D", "3D_CMD", "3D_SCR", "3D_TEX", "ELSE" }; - - if (!xgi_pcie_heap) { - return; - } - - XGI_INFO("pcie freemax = 0x%lx\n", xgi_pcie_heap->max_freesize); - list_for_each_entry(block, &xgi_pcie_heap->used_list, list) { - if (block->owner == PCIE_2D) - ownerIndex = 0; - else if (block->owner > PCIE_3D_TEXTURE - || block->owner < PCIE_2D - || block->owner < PCIE_3D) - ownerIndex = 5; - else - ownerIndex = block->owner - PCIE_3D + 1; - - XGI_INFO("Allocated by %s, block offset: 0x%lx, size: 0x%lx \n", - ownerStr[ownerIndex], block->offset, block->size); - } -#endif -} - void xgi_pcie_heap_cleanup(struct xgi_info * info) { struct list_head *free_list; -- cgit v1.2.3 From 5b08ab258f3e541334d2b64d38e15e1431080199 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 16 Jul 2007 21:12:30 -0700 Subject: Clean ups (primarilly log messages) in xgi_test_rwinkernel. --- linux-core/xgi_pcie.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 0d641ab8..cfc9febc 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -919,20 +919,21 @@ void *xgi_find_pcie_virt(struct xgi_info * info, unsigned long address) */ void xgi_test_rwinkernel(struct xgi_info * info, unsigned long address) { - unsigned long *virtaddr = 0; + u32 *virtaddr = 0; + + XGI_INFO("input GE HW addr is 0x%x\n", address); + if (address == 0) { - XGI_INFO("[Jong-kd] input GE HW addr is 0x00000000\n"); return; } - virtaddr = (unsigned long *)xgi_find_pcie_virt(info, address); + virtaddr = (u32 *)xgi_find_pcie_virt(info, address); + + XGI_INFO("convert to CPU virt addr 0x%p\n", virtaddr); - XGI_INFO("[Jong-kd] input GE HW addr is 0x%lx\n", address); - XGI_INFO("[Jong-kd] convert to CPU virt addr 0x%px\n", virtaddr); - XGI_INFO("[Jong-kd] origin [virtaddr] = 0x%lx\n", *virtaddr); if (virtaddr != NULL) { + XGI_INFO("original [virtaddr] = 0x%x\n", *virtaddr); *virtaddr = 0x00f00fff; + XGI_INFO("modified [virtaddr] = 0x%x\n", *virtaddr); } - - XGI_INFO("[Jong-kd] modified [virtaddr] = 0x%lx\n", *virtaddr); } -- cgit v1.2.3 From bcba7ba981a88e27ad4d7e8ebcdbed7097cf1488 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 16 Jul 2007 21:15:58 -0700 Subject: Log message clean up in WriteRegDWord. Remove unused inline functions. --- linux-core/xgi_regs.h | 22 ++-------------------- 1 file changed, 2 insertions(+), 20 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h index 0e54e7d8..bc3e2a1e 100644 --- a/linux-core/xgi_regs.h +++ b/linux-core/xgi_regs.h @@ -153,16 +153,10 @@ static inline void writeAttr(struct xgi_info * info, u8 index, u8 value) */ static inline void WriteRegDWord(struct xgi_info * info, u32 addr, u32 data) { - /* Jong 05/25/2006 */ - XGI_INFO("Jong-WriteRegDWord()-Begin \n"); - XGI_INFO("Jong-WriteRegDWord()-info->mmio.vbase=0x%lx \n", - info->mmio.vbase); - XGI_INFO("Jong-WriteRegDWord()-addr=0x%lx \n", addr); - XGI_INFO("Jong-WriteRegDWord()-data=0x%lx \n", data); - /* return; */ + XGI_INFO("mmio vbase = 0x%p, addr = 0x%x, data = 0x%x\n", + info->mmio->vbase, addr, data); *(volatile u32 *)(info->mmio.vbase + addr) = (data); - XGI_INFO("Jong-WriteRegDWord()-End \n"); } static inline void WriteRegWord(struct xgi_info * info, u32 addr, u16 data) @@ -262,18 +256,6 @@ extern void DisableProtect(); #define wReadReg(addr) ReadRegWord(info, addr) #define bReadReg(addr) ReadRegByte(info, addr) -static inline void xgi_protect_all(struct xgi_info * info) -{ - OUTB(0x3C4, 0x11); - OUTB(0x3C5, 0x92); -} - -static inline void xgi_unprotect_all(struct xgi_info * info) -{ - OUTB(0x3C4, 0x11); - OUTB(0x3C5, 0x92); -} - static inline void xgi_enable_mmio(struct xgi_info * info) { u8 protect = 0; -- cgit v1.2.3 From 7f98815d0027b1d4bd07b08e540106d5e994bcc5 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 16 Jul 2007 22:15:01 -0700 Subject: Make drm_sg_free callable in-kernel. --- linux-core/drmP.h | 5 +++-- linux-core/drm_scatter.c | 29 +++++++++++++++++++---------- 2 files changed, 22 insertions(+), 12 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 2bbc6200..ebb530bc 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -1135,8 +1135,9 @@ extern void drm_sg_cleanup(drm_sg_mem_t * entry); extern int drm_sg_alloc_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int drm_sg_alloc(drm_device_t *dev, drm_scatter_gather_t * request); -extern int drm_sg_free(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); +extern int drm_sg_free(struct drm_device *dev, unsigned long handle); +extern int drm_sg_free_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); /* ATI PCIGART support (ati_pcigart.h) */ extern int drm_ati_pcigart_init(drm_device_t * dev, drm_ati_pcigart_info *gart_info); diff --git a/linux-core/drm_scatter.c b/linux-core/drm_scatter.c index c0d6db24..5581dc0b 100644 --- a/linux-core/drm_scatter.c +++ b/linux-core/drm_scatter.c @@ -203,6 +203,7 @@ int drm_sg_alloc_ioctl(struct inode *inode, struct file *filp, if (copy_to_user(argp, &request, sizeof(request))) { drm_sg_cleanup(priv->head->dev->sg); + priv->head->dev->sg = NULL; return -EFAULT; } @@ -211,26 +212,18 @@ int drm_sg_alloc_ioctl(struct inode *inode, struct file *filp, } -int drm_sg_free(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_sg_free(struct drm_device *dev, unsigned long handle) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_scatter_gather_t request; drm_sg_mem_t *entry; if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL; - if (copy_from_user(&request, - (drm_scatter_gather_t __user *) arg, - sizeof(request))) - return -EFAULT; entry = dev->sg; dev->sg = NULL; - if (!entry || entry->handle != request.handle) + if (!entry || entry->handle != handle) return -EINVAL; DRM_DEBUG("sg free virtual = %p\n", entry->virtual); @@ -239,3 +232,19 @@ int drm_sg_free(struct inode *inode, struct file *filp, return 0; } + +EXPORT_SYMBOL(drm_sg_free); + +int drm_sg_free_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_scatter_gather __user *argp = (void __user *)arg; + struct drm_scatter_gather request; + + if (copy_from_user(&request, argp, sizeof(request))) + return -EFAULT; + + return drm_sg_free(dev, request.handle); +} -- cgit v1.2.3 From bff698d0edef90272247dfb90e454f7b98fd82dd Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 17 Jul 2007 09:59:26 +1000 Subject: drm_context: fix braino --- linux-core/drm_context.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/drm_context.c b/linux-core/drm_context.c index 95d28898..a0b1a7ec 100644 --- a/linux-core/drm_context.c +++ b/linux-core/drm_context.c @@ -302,7 +302,7 @@ int drm_resctx(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { struct drm_ctx_res res; - struct drm_ctx __user *argp = (void __user *)arg; + struct drm_ctx_res __user *argp = (void __user *)arg; struct drm_ctx ctx; int i; -- cgit v1.2.3 From 6ad1df217647d112a21c2e004d4e3d74c7bb0e0e Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Wed, 18 Jul 2007 09:42:06 +1000 Subject: drm: remove drm_u64_t, replace with uint64_t everwhere This might break something, stdint.h inclusion in drm.h maybe required but I'm not sure yet what platforms have it what ones don't. --- linux-core/drmP.h | 8 ++++---- linux-core/drm_bo.c | 2 +- linux-core/drm_memory.c | 12 ++++++------ linux-core/drm_proc.c | 6 +++--- 4 files changed, 14 insertions(+), 14 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 19e9d627..c5dfe6bf 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -551,7 +551,7 @@ struct drm_map_list { struct list_head head; /**< list head */ struct drm_hash_item hash; struct drm_map *map; /**< mapping */ - drm_u64_t user_token; + uint64_t user_token; struct drm_mm_node *file_offset_node; }; @@ -931,9 +931,9 @@ extern int drm_unbind_agp(DRM_AGP_MEM * handle); extern void drm_free_memctl(size_t size); extern int drm_alloc_memctl(size_t size); -extern void drm_query_memctl(drm_u64_t *cur_used, - drm_u64_t *low_threshold, - drm_u64_t *high_threshold); +extern void drm_query_memctl(uint64_t *cur_used, + uint64_t *low_threshold, + uint64_t *high_threshold); extern void drm_init_memctl(size_t low_threshold, size_t high_threshold, size_t unit_size); diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 681d37fe..374be04e 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -2629,7 +2629,7 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo) return -ENOMEM; } - list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT; + list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT; return 0; } diff --git a/linux-core/drm_memory.c b/linux-core/drm_memory.c index 454c33e8..f68a3a3e 100644 --- a/linux-core/drm_memory.c +++ b/linux-core/drm_memory.c @@ -38,9 +38,9 @@ static struct { spinlock_t lock; - drm_u64_t cur_used; - drm_u64_t low_threshold; - drm_u64_t high_threshold; + uint64_t cur_used; + uint64_t low_threshold; + uint64_t high_threshold; } drm_memctl = { .lock = SPIN_LOCK_UNLOCKED }; @@ -82,9 +82,9 @@ void drm_free_memctl(size_t size) } EXPORT_SYMBOL(drm_free_memctl); -void drm_query_memctl(drm_u64_t *cur_used, - drm_u64_t *low_threshold, - drm_u64_t *high_threshold) +void drm_query_memctl(uint64_t *cur_used, + uint64_t *low_threshold, + uint64_t *high_threshold) { spin_lock(&drm_memctl.lock); *cur_used = drm_memctl.cur_used; diff --git a/linux-core/drm_proc.c b/linux-core/drm_proc.c index 3f9cb028..08bf99d6 100644 --- a/linux-core/drm_proc.c +++ b/linux-core/drm_proc.c @@ -436,9 +436,9 @@ static int drm__objects_info(char *buf, char **start, off_t offset, int request, int len = 0; struct drm_buffer_manager *bm = &dev->bm; struct drm_fence_manager *fm = &dev->fm; - drm_u64_t used_mem; - drm_u64_t low_mem; - drm_u64_t high_mem; + uint64_t used_mem; + uint64_t low_mem; + uint64_t high_mem; if (offset > DRM_PROC_LIMIT) { -- cgit v1.2.3 From 3a71e87742ce8686c2b3c85ebbc8fb7a72b4f6e0 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Wed, 18 Jul 2007 09:46:16 +1000 Subject: drm: idr stuff is upstream for 2.6.23 --- linux-core/drm_compat.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h index 024059ac..0b00ba47 100644 --- a/linux-core/drm_compat.h +++ b/linux-core/drm_compat.h @@ -306,8 +306,10 @@ extern int drm_bo_map_bound(struct vm_area_struct *vma); #endif -/* fixme when functions are upstreamed */ +/* fixme when functions are upstreamed - upstreamed for 2.6.23 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) #define DRM_IDR_COMPAT_FN +#endif #ifdef DRM_IDR_COMPAT_FN int idr_for_each(struct idr *idp, int (*fn)(int id, void *p, void *data), void *data); -- cgit v1.2.3 From a64b5d8d3763639fbb4098500ad5c86fb8590aa7 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Wed, 18 Jul 2007 15:49:45 +1000 Subject: fix some missing whitespace/tab --- linux-core/drm_fops.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c index 98e581fe..d542d4e3 100644 --- a/linux-core/drm_fops.c +++ b/linux-core/drm_fops.c @@ -335,7 +335,7 @@ EXPORT_SYMBOL(drm_fasync); static void drm_object_release(struct file *filp) { - struct drm_file *priv = filp->private_data; + struct drm_file *priv = filp->private_data; struct list_head *head; struct drm_user_object *user_object; struct drm_ref_object *ref_object; -- cgit v1.2.3 From 1ff858fe3a6b632c879a9f99a67227db7df70b62 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 18 Jul 2007 10:40:03 +0200 Subject: Fix via dmablit when blit queue is full. Fix by Simon Farnsworth, Bugzilla Bug #11542 http://bugs.freedesktop.org/show_bug.cgi?id=11542 --- linux-core/via_dmablit.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/via_dmablit.c b/linux-core/via_dmablit.c index 5108c867..6422609c 100644 --- a/linux-core/via_dmablit.c +++ b/linux-core/via_dmablit.c @@ -568,7 +568,7 @@ via_init_dmablit(struct drm_device *dev) blitq->head = 0; blitq->cur = 0; blitq->serviced = 0; - blitq->num_free = VIA_NUM_BLIT_SLOTS; + blitq->num_free = VIA_NUM_BLIT_SLOTS - 1; blitq->num_outstanding = 0; blitq->is_active = 0; blitq->aborting = 0; -- cgit v1.2.3 From 33a50412c21229610dbb75dee83f145e2f1ec128 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Wed, 18 Jul 2007 14:22:40 -0700 Subject: Add dry-coded DRM drawable private information storage for FreeBSD. With this, all modules build again. --- linux-core/drmP.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'linux-core') diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 2bbc6200..3b2176c9 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -1264,5 +1264,19 @@ static inline void drm_ctl_free(void *pt, size_t size, int area) /*@}*/ +/** Type for the OS's non-sleepable mutex lock */ +#define DRM_SPINTYPE spinlock_t +/** + * Initialize the lock for use. name is an optional string describing the + * lock + */ +#define DRM_SPININIT(l,name) spin_lock_init(l); +#define DRM_SPINUNINIT(l) +#define DRM_SPINLOCK(l) spin_lock(l); +#define DRM_SPINUNLOCK(u) spin_unlock(l); +#define DRM_SPINLOCK_IRQSAVE(l, flags) spin_lock_irqflags(l, _flags); +#define DRM_SPINUNLOCK_IRQRESTORE(u, flags) spin_unlock_irqrestore(l, _flags); +#define DRM_SPINLOCK_ASSERT(l) do {} while (0) + #endif /* __KERNEL__ */ #endif -- cgit v1.2.3 From 5ba94c2ab8be350fee495e5cfe94afb8f663956a Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 19 Jul 2007 10:29:18 -0700 Subject: Initial pass at converting driver to DRM infrastructure. --- linux-core/Makefile | 3 +- linux-core/xgi_cmdlist.c | 99 ++- linux-core/xgi_cmdlist.h | 7 +- linux-core/xgi_drv.c | 1522 ++++++---------------------------------------- linux-core/xgi_drv.h | 205 +++---- linux-core/xgi_fb.c | 511 +++++++--------- linux-core/xgi_fb.h | 47 -- linux-core/xgi_linux.h | 490 --------------- linux-core/xgi_misc.c | 145 ++--- linux-core/xgi_misc.h | 2 - linux-core/xgi_pcie.c | 941 ++++++---------------------- linux-core/xgi_pcie.h | 68 --- linux-core/xgi_regs.h | 313 +++------- 13 files changed, 845 insertions(+), 3508 deletions(-) delete mode 100644 linux-core/xgi_fb.h delete mode 100644 linux-core/xgi_linux.h delete mode 100644 linux-core/xgi_pcie.h (limited to 'linux-core') diff --git a/linux-core/Makefile b/linux-core/Makefile index 2052459d..55e25253 100644 --- a/linux-core/Makefile +++ b/linux-core/Makefile @@ -91,8 +91,7 @@ MACH64HEADERS = mach64_drv.h mach64_drm.h $(DRMHEADERS) NVHEADERS = nv_drv.h $(DRMHEADERS) FFBHEADERS = ffb_drv.h $(DRMHEADERS) NOUVEAUHEADERS = nouveau_drv.h nouveau_drm.h nouveau_reg.h $(DRMHEADERS) -XGIHEADERS = xgi_cmdlist.h xgi_drv.h xgi_fb.h xgi_linux.h xgi_misc.h \ - xgi_pcie.h xgi_regs.h xgi_types.h +XGIHEADERS = xgi_cmdlist.h xgi_drv.h xgi_misc.h xgi_regs.h $(DRMHEADERS) PROGS = dristat drmstat diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 61373469..d2018057 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -26,7 +26,6 @@ * DEALINGS IN THE SOFTWARE. ***************************************************************************/ -#include "xgi_linux.h" #include "xgi_drv.h" #include "xgi_regs.h" #include "xgi_misc.h" @@ -55,18 +54,19 @@ int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) s_cmdring._cmdRingSize = mem_alloc.size; s_cmdring._cmdRingBuffer = mem_alloc.hw_addr; - s_cmdring._cmdRingBusAddr = mem_alloc.bus_addr; + s_cmdring._cmdRingAllocOffset = mem_alloc.offset; s_cmdring._lastBatchStartAddr = 0; s_cmdring._cmdRingOffset = 0; return 1; } -void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo) +static void xgi_submit_cmdlist(struct xgi_info * info, + struct xgi_cmd_info * pCmdInfo) { const unsigned int beginPort = getCurBatchBeginPort(pCmdInfo); - XGI_INFO("After getCurBatchBeginPort()\n"); + DRM_INFO("After getCurBatchBeginPort()\n"); if (s_cmdring._lastBatchStartAddr == 0) { const unsigned int portOffset = BASE_3D_ENG + beginPort; @@ -75,50 +75,53 @@ void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo) /* xgi_waitfor_pci_idle(info); */ // Enable PCI Trigger Mode - XGI_INFO("Enable PCI Trigger Mode \n"); + DRM_INFO("Enable PCI Trigger Mode \n"); /* Jong 06/14/2006; 0x400001a */ - dwWriteReg(BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, + dwWriteReg(info->mmio_map, + BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | M2REG_CLEAR_COUNTERS_MASK | 0x08 | M2REG_PCI_TRIGGER_MODE_MASK); /* Jong 06/14/2006; 0x400000a */ - dwWriteReg(BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, + dwWriteReg(info->mmio_map, + BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | 0x08 | M2REG_PCI_TRIGGER_MODE_MASK); // Send PCI begin command - XGI_INFO("Send PCI begin command \n"); + DRM_INFO("Send PCI begin command \n"); - XGI_INFO("portOffset=%d, beginPort=%d\n", + DRM_INFO("portOffset=%d, beginPort=%d\n", portOffset, beginPort); /* beginPort = 48; */ /* 0xc100000 */ - dwWriteReg(portOffset, + dwWriteReg(info->mmio_map, portOffset, (beginPort << 22) + (BEGIN_VALID_MASK) + pCmdInfo->_curDebugID); - XGI_INFO("Send PCI begin command- After\n"); + DRM_INFO("Send PCI begin command- After\n"); /* 0x80000024 */ - dwWriteReg(portOffset + 4, + dwWriteReg(info->mmio_map, portOffset + 4, BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize); /* 0x1010000 */ - dwWriteReg(portOffset + 8, (pCmdInfo->_firstBeginAddr >> 4)); + dwWriteReg(info->mmio_map, portOffset + 8, + (pCmdInfo->_firstBeginAddr >> 4)); /* Jong 06/12/2006; system hang; marked for test */ - dwWriteReg(portOffset + 12, 0); + dwWriteReg(info->mmio_map, portOffset + 12, 0); /* Jong 06/13/2006; remove marked for system hang test */ /* xgi_waitfor_pci_idle(info); */ } else { u32 *lastBatchVirtAddr; - XGI_INFO("s_cmdring._lastBatchStartAddr != 0\n"); + DRM_INFO("s_cmdring._lastBatchStartAddr != 0\n"); if (pCmdInfo->_firstBeginType == BTYPE_3D) { addFlush2D(info); @@ -146,21 +149,38 @@ void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo) /* Jong 06/12/2006; system hang; marked for test */ triggerHWCommandList(info, pCmdInfo->_beginCount); } else { - XGI_ERROR("lastBatchVirtAddr is NULL\n"); + DRM_ERROR("lastBatchVirtAddr is NULL\n"); } } s_cmdring._lastBatchStartAddr = pCmdInfo->_lastBeginAddr; - XGI_INFO("End\n"); + DRM_INFO("End\n"); } + +int xgi_submit_cmdlist_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct xgi_cmd_info cmd_list; + struct xgi_info *info = dev->dev_private; + + DRM_COPY_FROM_USER_IOCTL(cmd_list, + (struct xgi_cmd_info __user *) data, + sizeof(cmd_list)); + + xgi_submit_cmdlist(info, &cmd_list); + return 0; +} + + /* state: 0 - console 1 - graphic 2 - fb 3 - logout */ -void xgi_state_change(struct xgi_info * info, struct xgi_state_info * pStateInfo) +int xgi_state_change(struct xgi_info * info, unsigned int to, + unsigned int from) { #define STATE_CONSOLE 0 #define STATE_GRAPHIC 1 @@ -169,26 +189,40 @@ void xgi_state_change(struct xgi_info * info, struct xgi_state_info * pStateInfo #define STATE_REBOOT 4 #define STATE_SHUTDOWN 5 - if ((pStateInfo->_fromState == STATE_GRAPHIC) - && (pStateInfo->_toState == STATE_CONSOLE)) { - XGI_INFO("[kd] I see, now is to leaveVT\n"); + if ((from == STATE_GRAPHIC) && (to == STATE_CONSOLE)) { + DRM_INFO("[kd] I see, now is to leaveVT\n"); // stop to received batch - } else if ((pStateInfo->_fromState == STATE_CONSOLE) - && (pStateInfo->_toState == STATE_GRAPHIC)) { - XGI_INFO("[kd] I see, now is to enterVT\n"); + } else if ((from == STATE_CONSOLE) && (to == STATE_GRAPHIC)) { + DRM_INFO("[kd] I see, now is to enterVT\n"); xgi_cmdlist_reset(); - } else if ((pStateInfo->_fromState == STATE_GRAPHIC) - && ((pStateInfo->_toState == STATE_LOGOUT) - || (pStateInfo->_toState == STATE_REBOOT) - || (pStateInfo->_toState == STATE_SHUTDOWN))) { - XGI_INFO("[kd] I see, not is to exit from X\n"); + } else if ((from == STATE_GRAPHIC) + && ((to == STATE_LOGOUT) + || (to == STATE_REBOOT) + || (to == STATE_SHUTDOWN))) { + DRM_INFO("[kd] I see, not is to exit from X\n"); // stop to received batch } else { - XGI_ERROR("[kd] Should not happen\n"); + DRM_ERROR("[kd] Should not happen\n"); + return DRM_ERR(EINVAL); } + return 0; } + +int xgi_state_change_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct xgi_state_info state; + struct xgi_info *info = dev->dev_private; + + DRM_COPY_FROM_USER_IOCTL(state, (struct xgi_state_info __user *) data, + sizeof(state)); + + return xgi_state_change(info, state._toState, state._fromState); +} + + void xgi_cmdlist_reset(void) { s_cmdring._lastBatchStartAddr = 0; @@ -198,7 +232,7 @@ void xgi_cmdlist_reset(void) void xgi_cmdlist_cleanup(struct xgi_info * info) { if (s_cmdring._cmdRingBuffer != 0) { - xgi_pcie_free(info, s_cmdring._cmdRingBusAddr); + xgi_pcie_free(info, s_cmdring._cmdRingAllocOffset, NULL); s_cmdring._cmdRingBuffer = 0; s_cmdring._cmdRingOffset = 0; s_cmdring._cmdRingSize = 0; @@ -212,7 +246,8 @@ static void triggerHWCommandList(struct xgi_info * info, //Fix me, currently we just trigger one time while (triggerCounter--) { - dwWriteReg(BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS, + dwWriteReg(info->mmio_map, + BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS, 0x05000000 + (0x0ffff & s_triggerID++)); // xgi_waitfor_pci_idle(info); } diff --git a/linux-core/xgi_cmdlist.h b/linux-core/xgi_cmdlist.h index d2b95c0e..4bc56ec1 100644 --- a/linux-core/xgi_cmdlist.h +++ b/linux-core/xgi_cmdlist.h @@ -60,16 +60,15 @@ typedef enum { struct xgi_cmdring_info { unsigned int _cmdRingSize; u32 _cmdRingBuffer; - unsigned long _cmdRingBusAddr; + unsigned long _cmdRingAllocOffset; u32 _lastBatchStartAddr; u32 _cmdRingOffset; }; extern int xgi_cmdlist_initialize(struct xgi_info * info, size_t size); -extern void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo); - -extern void xgi_state_change(struct xgi_info * info, struct xgi_state_info * pStateInfo); +extern int xgi_state_change(struct xgi_info * info, unsigned int to, + unsigned int from); extern void xgi_cmdlist_cleanup(struct xgi_info * info); diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index bd39dfdc..3b9f4cb1 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -25,96 +25,119 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. ***************************************************************************/ -#include "xgi_linux.h" + +#include "drmP.h" +#include "drm.h" #include "xgi_drv.h" #include "xgi_regs.h" -#include "xgi_pcie.h" #include "xgi_misc.h" #include "xgi_cmdlist.h" -/* for debug */ -static int xgi_temp = 1; -/* - * global parameters - */ -static struct xgi_dev { - u16 vendor; - u16 device; - const char *name; -} xgidev_list[] = { - { - PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XP5, "XP5"}, { - PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XG47, "XG47"}, { - 0, 0, NULL} -}; - -int xgi_major = XGI_DEV_MAJOR; /* xgi reserved major device number. */ +#include "drm_pciids.h" -static int xgi_num_devices = 0; +static struct pci_device_id pciidlist[] = { + xgi_PCI_IDS +}; -struct xgi_info xgi_devices[XGI_MAX_DEVICES]; +static int xgi_bootstrap(DRM_IOCTL_ARGS); -#if defined(XGI_PM_SUPPORT_APM) -static struct pm_dev *apm_xgi_dev[XGI_MAX_DEVICES] = { 0 }; -#endif +static drm_ioctl_desc_t xgi_ioctls[] = { + [DRM_IOCTL_NR(DRM_XGI_BOOTSTRAP)] = {xgi_bootstrap, DRM_AUTH}, -/* add one for the control device */ -struct xgi_info xgi_ctl_device; -wait_queue_head_t xgi_ctl_waitqueue; + [DRM_IOCTL_NR(DRM_XGI_FB_ALLOC)] = {xgi_fb_alloc_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_XGI_FB_FREE)] = {xgi_fb_free_ioctl, DRM_AUTH}, -#ifdef CONFIG_PROC_FS -struct proc_dir_entry *proc_xgi; -#endif + [DRM_IOCTL_NR(DRM_XGI_PCIE_ALLOC)] = {xgi_pcie_alloc_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_XGI_PCIE_FREE)] = {xgi_pcie_free_ioctl, DRM_AUTH}, -#ifdef CONFIG_DEVFS_FS -devfs_handle_t xgi_devfs_handles[XGI_MAX_DEVICES]; -#endif + [DRM_IOCTL_NR(DRM_XGI_GE_RESET)] = {xgi_ge_reset_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_XGI_DUMP_REGISTER)] = {xgi_dump_register_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_XGI_DEBUG_INFO)] = {xgi_restore_registers_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_XGI_SUBMIT_CMDLIST)] = {xgi_submit_cmdlist_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_XGI_TEST_RWINKERNEL)] = {xgi_test_rwinkernel_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_XGI_STATE_CHANGE)] = {xgi_state_change_ioctl, DRM_AUTH}, +}; -struct list_head xgi_mempid_list; +static const int xgi_max_ioctl = DRM_ARRAY_SIZE(xgi_ioctls); + +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static int xgi_driver_load(struct drm_device *dev, unsigned long flags); +static int xgi_driver_unload(struct drm_device *dev); +static void xgi_driver_preclose(struct drm_device * dev, DRMFILE filp); +static irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS); + + +static struct drm_driver driver = { + .driver_features = + DRIVER_PCI_DMA | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | + DRIVER_IRQ_SHARED | DRIVER_SG, + .dev_priv_size = sizeof(struct xgi_info), + .load = xgi_driver_load, + .unload = xgi_driver_unload, + .preclose = xgi_driver_preclose, + .dma_quiescent = NULL, + .irq_preinstall = NULL, + .irq_postinstall = NULL, + .irq_uninstall = NULL, + .irq_handler = xgi_kern_isr, + .reclaim_buffers = drm_core_reclaim_buffers, + .get_map_ofs = drm_core_get_map_ofs, + .get_reg_ofs = drm_core_get_reg_ofs, + .ioctls = xgi_ioctls, + .dma_ioctl = NULL, + + .fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .ioctl = drm_ioctl, + .mmap = drm_mmap, + .poll = drm_poll, + .fasync = drm_fasync, + }, + + .pci_driver = { + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = probe, + .remove = __devexit_p(drm_cleanup_pci), + }, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, -/* xgi_ functions.. do not take a state device parameter */ -static int xgi_post_vbios(struct xgi_ioctl_post_vbios * info); -static void xgi_proc_create(void); -static void xgi_proc_remove_all(struct proc_dir_entry *); -static void xgi_proc_remove(void); +}; -/* xgi_kern_ functions, interfaces used by linux kernel */ -int xgi_kern_probe(struct pci_dev *, const struct pci_device_id *); +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + return drm_get_dev(pdev, ent, &driver); +} -unsigned int xgi_kern_poll(struct file *, poll_table *); -int xgi_kern_ioctl(struct inode *, struct file *, unsigned int, unsigned long); -int xgi_kern_mmap(struct file *, struct vm_area_struct *); -int xgi_kern_open(struct inode *, struct file *); -int xgi_kern_release(struct inode *inode, struct file *filp); -void xgi_kern_vma_open(struct vm_area_struct *vma); -void xgi_kern_vma_release(struct vm_area_struct *vma); -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1)) -struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, - unsigned long address, int *type); -#else -struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, - unsigned long address, int write_access); -#endif +static int __init xgi_init(void) +{ + driver.num_ioctls = xgi_max_ioctl; + return drm_init(&driver, pciidlist); +} -int xgi_kern_read_card_info(char *, char **, off_t off, int, int *, void *); -int xgi_kern_read_status(char *, char **, off_t off, int, int *, void *); -int xgi_kern_read_pcie_info(char *, char **, off_t off, int, int *, void *); -int xgi_kern_read_version(char *, char **, off_t off, int, int *, void *); +static void __exit xgi_exit(void) +{ + drm_exit(&driver); +} -int xgi_kern_ctl_open(struct inode *, struct file *); -int xgi_kern_ctl_close(struct inode *, struct file *); -unsigned int xgi_kern_ctl_poll(struct file *, poll_table *); +module_init(xgi_init); +module_exit(xgi_exit); -void xgi_kern_isr_bh(unsigned long); -irqreturn_t xgi_kern_isr(int, void *, struct pt_regs *); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); -static void xgi_lock_init(struct xgi_info * info); -#if defined(XGI_PM_SUPPORT_ACPI) -int xgi_kern_acpi_standby(struct pci_dev *, u32); -int xgi_kern_acpi_resume(struct pci_dev *); -#endif +void xgi_kern_isr_bh(struct drm_device *dev); /* * verify access to pci config space wasn't disabled behind our back @@ -129,1361 +152,206 @@ int xgi_kern_acpi_resume(struct pci_dev *); static inline void xgi_check_pci_config(struct xgi_info * info, int line) { - unsigned short cmd, flag = 0; - - // don't do this on the control device, only the actual devices - if (info->flags & XGI_FLAG_CONTROL) - return; + u16 cmd; + bool flag = 0; - pci_read_config_word(info->dev, PCI_COMMAND, &cmd); + pci_read_config_word(info->dev->pdev, PCI_COMMAND, &cmd); if (!(cmd & PCI_COMMAND_MASTER)) { - XGI_INFO("restoring bus mastering! (%d)\n", line); + DRM_INFO("restoring bus mastering! (%d)\n", line); cmd |= PCI_COMMAND_MASTER; flag = 1; } if (!(cmd & PCI_COMMAND_MEMORY)) { - XGI_INFO("restoring MEM access! (%d)\n", line); + DRM_INFO("restoring MEM access! (%d)\n", line); cmd |= PCI_COMMAND_MEMORY; flag = 1; } if (flag) - pci_write_config_word(info->dev, PCI_COMMAND, cmd); + pci_write_config_word(info->dev->pdev, PCI_COMMAND, cmd); } -/* - * struct pci_device_id { - * unsigned int vendor, device; // Vendor and device ID or PCI_ANY_ID - * unsigned int subvendor, subdevice; // Subsystem ID's or PCI_ANY_ID - * unsigned int class, class_mask; // (class,subclass,prog-if) triplet - * unsigned long driver_data; // Data private to the driver - * }; - */ -static struct pci_device_id xgi_dev_table[] = { - { - .vendor = PCI_VENDOR_ID_XGI, - .device = PCI_ANY_ID, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .class = (PCI_CLASS_DISPLAY_VGA << 8), - .class_mask = ~0, - }, - {} -}; - -/* - * #define MODULE_DEVICE_TABLE(type,name) \ - * MODULE_GENERIC_TABLE(type##_device,name) - */ -MODULE_DEVICE_TABLE(pci, xgi_dev_table); - -/* - * struct pci_driver { - * struct list_head node; - * char *name; - * const struct pci_device_id *id_table; // NULL if wants all devices - * int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); // New device inserted - * void (*remove)(struct pci_dev *dev); // Device removed (NULL if not a hot-plug capable driver) - * int (*save_state)(struct pci_dev *dev, u32 state); // Save Device Context - * int (*suspend)(struct pci_dev *dev, u32 state); // Device suspended - * int (*resume)(struct pci_dev *dev); // Device woken up - * int (*enable_wake)(struct pci_dev *dev, u32 state, int enable); // Enable wake event - * }; - */ -static struct pci_driver xgi_pci_driver = { - .name = "xgi", - .id_table = xgi_dev_table, - .probe = xgi_kern_probe, -#if defined(XGI_SUPPORT_ACPI) - .suspend = xgi_kern_acpi_standby, - .resume = xgi_kern_acpi_resume, -#endif -}; - -/* - * find xgi devices and set initial state - */ -int xgi_kern_probe(struct pci_dev *dev, const struct pci_device_id *id_table) +int xgi_bootstrap(DRM_IOCTL_ARGS) { - struct xgi_info *info; - - if ((dev->vendor != PCI_VENDOR_ID_XGI) - || (dev->class != (PCI_CLASS_DISPLAY_VGA << 8))) { - return -1; - } - - if (xgi_num_devices == XGI_MAX_DEVICES) { - XGI_INFO("maximum device number (%d) reached!\n", - xgi_num_devices); - return -1; - } - - /* enable io, mem, and bus-mastering in pci config space */ - if (pci_enable_device(dev) != 0) { - XGI_INFO("pci_enable_device failed, aborting\n"); - return -1; - } - - XGI_INFO("maximum device number (%d) reached \n", xgi_num_devices); - - pci_set_master(dev); - - info = &xgi_devices[xgi_num_devices]; - info->dev = dev; + DRM_DEVICE; + struct xgi_info *info = dev->dev_private; + struct xgi_bootstrap bs; + int err; - xgi_lock_init(info); - info->mmio.base = XGI_PCI_RESOURCE_START(dev, 1); - info->mmio.size = XGI_PCI_RESOURCE_SIZE(dev, 1); + DRM_COPY_FROM_USER_IOCTL(bs, (struct xgi_bootstrap __user *) data, + sizeof(bs)); - /* check IO region */ - if (!request_mem_region(info->mmio.base, info->mmio.size, "xgi")) { - XGI_ERROR("cannot reserve MMIO memory\n"); - goto error_disable_dev; + if (info->bootstrap_done) { + return 0; } - XGI_INFO("info->mmio.base: 0x%lx \n", info->mmio.base); - XGI_INFO("info->mmio.size: 0x%lx \n", info->mmio.size); - - info->mmio.vbase = ioremap_nocache(info->mmio.base, info->mmio.size); - if (!info->mmio.vbase) { - release_mem_region(info->mmio.base, info->mmio.size); - XGI_ERROR("info->mmio.vbase failed\n"); - goto error_disable_dev; - } xgi_enable_mmio(info); - //xgi_enable_ge(info); - - XGI_INFO("info->mmio.vbase: 0x%p \n", info->mmio.vbase); - - info->fb.base = XGI_PCI_RESOURCE_START(dev, 0); - info->fb.size = XGI_PCI_RESOURCE_SIZE(dev, 0); - - XGI_INFO("info->fb.base: 0x%lx \n", info->fb.base); - XGI_INFO("info->fb.size: 0x%lx \n", info->fb.size); - - info->fb.size = bIn3cf(0x54) * 8 * 1024 * 1024; - XGI_INFO("info->fb.size: 0x%lx \n", info->fb.size); - - /* check frame buffer region - if (!request_mem_region(info->fb.base, info->fb.size, "xgi")) - { - release_mem_region(info->mmio.base, info->mmio.size); - XGI_ERROR("cannot reserve frame buffer memory\n"); - goto error_disable_dev; - } - - info->fb.vbase = ioremap_nocache(info->fb.base, info->fb.size); - - if (!info->fb.vbase) - { - release_mem_region(info->mmio.base, info->mmio.size); - release_mem_region(info->fb.base, info->fb.size); - XGI_ERROR("info->fb.vbase failed\n"); - goto error_disable_dev; - } - */ - info->fb.vbase = NULL; - XGI_INFO("info->fb.vbase: 0x%p \n", info->fb.vbase); - - - /* check common error condition */ - if (info->dev->irq == 0) { - XGI_ERROR("Can't find an IRQ for your XGI card! \n"); - goto error_zero_dev; - } - XGI_INFO("info->irq: %lx \n", info->dev->irq); - - //xgi_enable_dvi_interrupt(info); - - /* sanity check the IO apertures */ - if ((info->mmio.base == 0) || (info->mmio.size == 0) - || (info->fb.base == 0) || (info->fb.size == 0)) { - XGI_ERROR("The IO regions for your XGI card are invalid.\n"); - - if ((info->mmio.base == 0) || (info->mmio.size == 0)) { - XGI_ERROR("mmio appears to be wrong: 0x%lx 0x%lx\n", - info->mmio.base, info->mmio.size); - } - - if ((info->fb.base == 0) || (info->fb.size == 0)) { - XGI_ERROR - ("frame buffer appears to be wrong: 0x%lx 0x%lx\n", - info->fb.base, info->fb.size); - } - - goto error_zero_dev; - } - //xgi_num_devices++; - - return 0; - - error_zero_dev: - release_mem_region(info->fb.base, info->fb.size); - release_mem_region(info->mmio.base, info->mmio.size); - - error_disable_dev: - pci_disable_device(dev); - return -1; + info->pcie.size = bs.gart_size * (1024 * 1024); -} - -/* - * vma operations... - * this is only called when the vmas are duplicated. this - * appears to only happen when the process is cloned to create - * a new process, and not when the process is threaded. - * - * increment the usage count for the physical pages, so when - * this clone unmaps the mappings, the pages are not - * deallocated under the original process. - */ -struct vm_operations_struct xgi_vm_ops = { - .open = xgi_kern_vma_open, - .close = xgi_kern_vma_release, - .nopage = xgi_kern_vma_nopage, -}; - -void xgi_kern_vma_open(struct vm_area_struct *vma) -{ - XGI_INFO("VM: vma_open for 0x%lx - 0x%lx, offset 0x%lx\n", - vma->vm_start, vma->vm_end, XGI_VMA_OFFSET(vma)); - - if (XGI_VMA_PRIVATE(vma)) { - struct xgi_pcie_block *block = - (struct xgi_pcie_block *) XGI_VMA_PRIVATE(vma); - XGI_ATOMIC_INC(block->use_count); - } -} - -void xgi_kern_vma_release(struct vm_area_struct *vma) -{ - XGI_INFO("VM: vma_release for 0x%lx - 0x%lx, offset 0x%lx\n", - vma->vm_start, vma->vm_end, XGI_VMA_OFFSET(vma)); - - if (XGI_VMA_PRIVATE(vma)) { - struct xgi_pcie_block *block = - (struct xgi_pcie_block *) XGI_VMA_PRIVATE(vma); - XGI_ATOMIC_DEC(block->use_count); - - /* - * if use_count is down to 0, the kernel virtual mapping was freed - * but the underlying physical pages were not, we need to clear the - * bit and free the physical pages. - */ - if (XGI_ATOMIC_READ(block->use_count) == 0) { - // Need TO Finish - XGI_VMA_PRIVATE(vma) = NULL; - } - } -} - -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1)) -struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, - unsigned long address, int *type) -{ - struct xgi_pcie_block *block = (struct xgi_pcie_block *) XGI_VMA_PRIVATE(vma); - struct page *page = NOPAGE_SIGBUS; - unsigned long offset = 0; - unsigned long page_addr = 0; -/* - XGI_INFO("VM: mmap([0x%lx-0x%lx] off=0x%lx) address: 0x%lx \n", - vma->vm_start, - vma->vm_end, - XGI_VMA_OFFSET(vma), - address); -*/ - offset = (address - vma->vm_start) + XGI_VMA_OFFSET(vma); - - offset = offset - block->bus_addr; - - offset >>= PAGE_SHIFT; - - page_addr = block->page_table[offset].virt_addr; - - if (xgi_temp) { - XGI_INFO("block->bus_addr: 0x%lx block->hw_addr: 0x%lx" - "block->page_count: 0x%lx block->page_order: 0x%lx" - "block->page_table[0x%lx].virt_addr: 0x%lx\n", - block->bus_addr, block->hw_addr, - block->page_count, block->page_order, - offset, block->page_table[offset].virt_addr); - xgi_temp = 0; - } - - if (!page_addr) - goto out; /* hole or end-of-file */ - page = virt_to_page(page_addr); - - /* got it, now increment the count */ - get_page(page); - out: - return page; - -} -#else -struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, - unsigned long address, int write_access) -{ - struct xgi_pcie_block *block = (struct xgi_pcie_block *) XGI_VMA_PRIVATE(vma); - struct page *page = NOPAGE_SIGBUS; - unsigned long offset = 0; - unsigned long page_addr = 0; -/* - XGI_INFO("VM: mmap([0x%lx-0x%lx] off=0x%lx) address: 0x%lx \n", - vma->vm_start, - vma->vm_end, - XGI_VMA_OFFSET(vma), - address); -*/ - offset = (address - vma->vm_start) + XGI_VMA_OFFSET(vma); - - offset = offset - block->bus_addr; - - offset >>= PAGE_SHIFT; - - page_addr = block->page_table[offset].virt_addr; - - if (xgi_temp) { - XGI_INFO("block->bus_addr: 0x%lx block->hw_addr: 0x%lx" - "block->page_count: 0x%lx block->page_order: 0x%lx" - "block->page_table[0x%lx].virt_addr: 0x%lx\n", - block->bus_addr, block->hw_addr, - block->page_count, block->page_order, - offset, block->page_table[offset].virt_addr); - xgi_temp = 0; - } - - if (!page_addr) - goto out; /* hole or end-of-file */ - page = virt_to_page(page_addr); - - /* got it, now increment the count */ - get_page(page); - out: - return page; -} -#endif - -#if 0 -static struct file_operations xgi_fops = { - /* owner: THIS_MODULE, */ - poll:xgi_kern_poll, - ioctl:xgi_kern_ioctl, - mmap:xgi_kern_mmap, - open:xgi_kern_open, - release:xgi_kern_release, -}; -#endif - -static struct file_operations xgi_fops = { - .owner = THIS_MODULE, - .poll = xgi_kern_poll, - .ioctl = xgi_kern_ioctl, - .mmap = xgi_kern_mmap, - .open = xgi_kern_open, - .release = xgi_kern_release, -}; - -static struct xgi_file_private *xgi_alloc_file_private(void) -{ - struct xgi_file_private *fp; - - XGI_KMALLOC(fp, sizeof(struct xgi_file_private)); - if (!fp) - return NULL; - - memset(fp, 0, sizeof(struct xgi_file_private)); - - /* initialize this file's event queue */ - init_waitqueue_head(&fp->wait_queue); - - xgi_init_lock(fp->fp_lock); - - return fp; -} - -static void xgi_free_file_private(struct xgi_file_private * fp) -{ - if (fp == NULL) - return; - - XGI_KFREE(fp, sizeof(struct xgi_file_private)); -} - -int xgi_kern_open(struct inode *inode, struct file *filp) -{ - struct xgi_info *info = NULL; - int dev_num; - int result = 0, status; - - /* - * the type and num values are only valid if we are not using devfs. - * However, since we use them to retrieve the device pointer, we - * don't need them with devfs as filp->private_data is already - * initialized - */ - filp->private_data = xgi_alloc_file_private(); - if (filp->private_data == NULL) - return -ENOMEM; - - XGI_INFO("filp->private_data %p\n", filp->private_data); - /* - * for control device, just jump to its open routine - * after setting up the private data - */ - if (XGI_IS_CONTROL_DEVICE(inode)) - return xgi_kern_ctl_open(inode, filp); - - /* what device are we talking about? */ - dev_num = XGI_DEVICE_NUMBER(inode); - if (dev_num >= XGI_MAX_DEVICES) { - xgi_free_file_private(filp->private_data); - filp->private_data = NULL; - return -ENODEV; - } - - info = &xgi_devices[dev_num]; - - XGI_INFO("Jong-xgi_kern_open on device %d\n", dev_num); - - xgi_down(info->info_sem); - XGI_CHECK_PCI_CONFIG(info); - - XGI_INFO_FROM_FP(filp) = info; - - /* - * map the memory and allocate isr on first open - */ - - if (!(info->flags & XGI_FLAG_OPEN)) { - XGI_INFO("info->flags & XGI_FLAG_OPEN \n"); - - if (info->dev->device == 0) { - XGI_INFO("open of nonexistent device %d\n", dev_num); - result = -ENXIO; - goto failed; - } - - /* initialize struct irqaction */ - status = request_irq(info->dev->irq, xgi_kern_isr, - SA_INTERRUPT | SA_SHIRQ, "xgi", - (void *)info); - if (status != 0) { - if (info->dev->irq && (status == -EBUSY)) { - XGI_ERROR - ("Tried to get irq %d, but another driver", - (unsigned int)info->dev->irq); - XGI_ERROR("has it and is not sharing it.\n"); - } - XGI_ERROR("isr request failed 0x%x\n", status); - result = -EIO; - goto failed; - } - - /* - * #define DECLARE_TASKLET(name, func, data) \ - * struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } - */ - info->tasklet.func = xgi_kern_isr_bh; - info->tasklet.data = (unsigned long)info; - tasklet_enable(&info->tasklet); - - /* Alloc 1M bytes for cmdbuffer which is flush2D batch array */ - xgi_cmdlist_initialize(info, 0x100000); - - info->flags |= XGI_FLAG_OPEN; - } - - XGI_ATOMIC_INC(info->use_count); - - failed: - xgi_up(info->info_sem); - - if ((result) && filp->private_data) { - xgi_free_file_private(filp->private_data); - filp->private_data = NULL; - } - - return result; -} - -int xgi_kern_release(struct inode *inode, struct file *filp) -{ - struct xgi_info *info = XGI_INFO_FROM_FP(filp); - - XGI_CHECK_PCI_CONFIG(info); - - /* - * for control device, just jump to its open routine - * after setting up the private data - */ - if (XGI_IS_CONTROL_DEVICE(inode)) - return xgi_kern_ctl_close(inode, filp); - - XGI_INFO("Jong-xgi_kern_release on device %d\n", - XGI_DEVICE_NUMBER(inode)); - - xgi_down(info->info_sem); - if (XGI_ATOMIC_DEC_AND_TEST(info->use_count)) { - - /* - * The usage count for this device has dropped to zero, it can be shut - * down safely; disable its interrupts. - */ - - /* - * Disable this device's tasklet to make sure that no bottom half will - * run with undefined device state. - */ - tasklet_disable(&info->tasklet); - - /* - * Free the IRQ, which may block until all pending interrupt processing - * has completed. - */ - free_irq(info->dev->irq, (void *)info); - - xgi_cmdlist_cleanup(info); - - /* leave INIT flag alone so we don't reinit every time */ - info->flags &= ~XGI_FLAG_OPEN; - } - - xgi_up(info->info_sem); - - if (FILE_PRIVATE(filp)) { - xgi_free_file_private(FILE_PRIVATE(filp)); - FILE_PRIVATE(filp) = NULL; - } - - return 0; -} - -int xgi_kern_mmap(struct file *filp, struct vm_area_struct *vma) -{ - //struct inode *inode = INODE_FROM_FP(filp); - struct xgi_info *info = XGI_INFO_FROM_FP(filp); - struct xgi_pcie_block *block; - int pages = 0; - unsigned long prot; - - XGI_INFO("Jong-VM: mmap([0x%lx-0x%lx] off=0x%lx)\n", - vma->vm_start, vma->vm_end, XGI_VMA_OFFSET(vma)); - - XGI_CHECK_PCI_CONFIG(info); - - if (XGI_MASK_OFFSET(vma->vm_start) - || XGI_MASK_OFFSET(vma->vm_end)) { - XGI_ERROR("VM: bad mmap range: %lx - %lx\n", - vma->vm_start, vma->vm_end); - return -ENXIO; - } - - pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; - - vma->vm_ops = &xgi_vm_ops; - - /* XGI IO(reg) space */ - if (IS_IO_OFFSET - (info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) { - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - if (XGI_REMAP_PAGE_RANGE(vma->vm_start, - XGI_VMA_OFFSET(vma), - vma->vm_end - vma->vm_start, - vma->vm_page_prot)) - return -EAGAIN; - - /* mark it as IO so that we don't dump it on core dump */ - vma->vm_flags |= VM_IO; - XGI_INFO("VM: mmap io space \n"); - } - /* XGI fb space */ - /* Jong 06/14/2006; moved behind PCIE or modify IS_FB_OFFSET */ - else if (IS_FB_OFFSET - (info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) { - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - if (XGI_REMAP_PAGE_RANGE(vma->vm_start, - XGI_VMA_OFFSET(vma), - vma->vm_end - vma->vm_start, - vma->vm_page_prot)) - return -EAGAIN; - - // mark it as IO so that we don't dump it on core dump - vma->vm_flags |= VM_IO; - XGI_INFO("VM: mmap fb space \n"); - } - /* PCIE allocator */ - /* XGI_VMA_OFFSET(vma) is offset based on pcie.base (HW address space) */ - else if (IS_PCIE_OFFSET - (info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) { - xgi_down(info->pcie_sem); - - block = xgi_find_pcie_block(info, XGI_VMA_OFFSET(vma)); - - if (block == NULL) { - XGI_ERROR("couldn't find pre-allocated PCIE memory!\n"); - xgi_up(info->pcie_sem); - return -EAGAIN; - } - - if (block->page_count != pages) { - XGI_ERROR - ("pre-allocated PCIE memory has wrong number of pages!\n"); - xgi_up(info->pcie_sem); - return -EAGAIN; - } - - vma->vm_private_data = block; - XGI_ATOMIC_INC(block->use_count); - xgi_up(info->pcie_sem); - - /* - * prevent the swapper from swapping it out - * mark the memory i/o so the buffers aren't - * dumped on core dumps */ - vma->vm_flags |= (VM_LOCKED | VM_IO); - - /* un-cached */ - prot = pgprot_val(vma->vm_page_prot); - /* - if (boot_cpu_data.x86 > 3) - prot |= _PAGE_PCD | _PAGE_PWT; - */ - vma->vm_page_prot = __pgprot(prot); - - XGI_INFO("VM: mmap pcie space \n"); - } -#if 0 - else if (IS_FB_OFFSET - (info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) { - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - if (XGI_REMAP_PAGE_RANGE(vma->vm_start, - XGI_VMA_OFFSET(vma), - vma->vm_end - vma->vm_start, - vma->vm_page_prot)) - return -EAGAIN; - - // mark it as IO so that we don't dump it on core dump - vma->vm_flags |= VM_IO; - XGI_INFO("VM: mmap fb space \n"); - } -#endif - else { - vma->vm_flags |= (VM_IO | VM_LOCKED); - XGI_ERROR("VM: mmap wrong range \n"); - } - - vma->vm_file = filp; - - return 0; -} - -unsigned int xgi_kern_poll(struct file *filp, struct poll_table_struct *wait) -{ - struct xgi_file_private *fp; - struct xgi_info *info; - unsigned int mask = 0; - unsigned long eflags; - - info = XGI_INFO_FROM_FP(filp); - - if (info->device_number == XGI_CONTROL_DEVICE_NUMBER) - return xgi_kern_ctl_poll(filp, wait); - - fp = XGI_GET_FP(filp); - - if (!(filp->f_flags & O_NONBLOCK)) { - /* add us to the list */ - poll_wait(filp, &fp->wait_queue, wait); - } - - xgi_lock_irqsave(fp->fp_lock, eflags); - - /* wake the user on any event */ - if (fp->num_events) { - XGI_INFO("Hey, an event occured!\n"); - /* - * trigger the client, when they grab the event, - * we'll decrement the event count - */ - mask |= (POLLPRI | POLLIN); - } - xgi_unlock_irqsave(fp->fp_lock, eflags); - - return mask; -} - -int xgi_kern_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) -{ - struct xgi_info *info; - struct xgi_mem_alloc *alloc = NULL; - - int status = 0; - void *arg_copy; - int arg_size; - int err = 0; - - info = XGI_INFO_FROM_FP(filp); - - XGI_INFO("Jong-ioctl(0x%x, 0x%x, 0x%lx, 0x%x)\n", _IOC_TYPE(cmd), - _IOC_NR(cmd), arg, _IOC_SIZE(cmd)); - /* - * extract the type and number bitfields, and don't decode - * wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok() - */ - if (_IOC_TYPE(cmd) != XGI_IOCTL_MAGIC) - return -ENOTTY; - if (_IOC_NR(cmd) > XGI_IOCTL_MAXNR) - return -ENOTTY; - - /* - * the direction is a bitmask, and VERIFY_WRITE catches R/W - * transfers. `Type' is user-oriented, while - * access_ok is kernel-oriented, so the concept of "read" and - * "write" is reversed - */ - if (_IOC_DIR(cmd) & _IOC_READ) { - err = !access_ok(VERIFY_WRITE, (void *)arg, _IOC_SIZE(cmd)); - } else if (_IOC_DIR(cmd) & _IOC_WRITE) { - err = !access_ok(VERIFY_READ, (void *)arg, _IOC_SIZE(cmd)); - } - if (err) - return -EFAULT; - - XGI_CHECK_PCI_CONFIG(info); - - arg_size = _IOC_SIZE(cmd); - XGI_KMALLOC(arg_copy, arg_size); - if (arg_copy == NULL) { - XGI_ERROR("failed to allocate ioctl memory\n"); - return -ENOMEM; - } - - /* Jong 05/25/2006 */ - /* copy_from_user(arg_copy, (void *)arg, arg_size); */ - if (copy_from_user(arg_copy, (void *)arg, arg_size)) { - XGI_ERROR("failed to copyin ioctl data\n"); - XGI_INFO("Jong-copy_from_user-fail! \n"); - } else - XGI_INFO("Jong-copy_from_user-OK! \n"); - - alloc = (struct xgi_mem_alloc *) arg_copy; - XGI_INFO("Jong-succeeded in copy_from_user 0x%lx, 0x%x bytes.\n", arg, - arg_size); - - switch (_IOC_NR(cmd)) { - case XGI_ESC_POST_VBIOS: - XGI_INFO("Jong-xgi_ioctl_post_vbios \n"); - break; - case XGI_ESC_FB_ALLOC: - XGI_INFO("Jong-xgi_ioctl_fb_alloc \n"); - xgi_fb_alloc(info, alloc, 0); - break; - case XGI_ESC_FB_FREE: - XGI_INFO("Jong-xgi_ioctl_fb_free \n"); - xgi_fb_free(info, *(unsigned long *)arg_copy); - break; - case XGI_ESC_PCIE_ALLOC: - XGI_INFO("Jong-xgi_ioctl_pcie_alloc \n"); - xgi_pcie_alloc(info, alloc, 0); - break; - case XGI_ESC_PCIE_FREE: - XGI_INFO("Jong-xgi_ioctl_pcie_free: bus_addr = 0x%lx \n", - *((unsigned long *)arg_copy)); - xgi_pcie_free(info, *((unsigned long *)arg_copy)); - break; - case XGI_ESC_GE_RESET: - XGI_INFO("Jong-xgi_ioctl_ge_reset \n"); - xgi_ge_reset(info); - break; - case XGI_ESC_DUMP_REGISTER: - XGI_INFO("Jong-xgi_ioctl_dump_register \n"); - xgi_dump_register(info); - break; - case XGI_ESC_DEBUG_INFO: - XGI_INFO("Jong-xgi_ioctl_restore_registers \n"); - xgi_restore_registers(info); - break; - case XGI_ESC_SUBMIT_CMDLIST: - XGI_INFO("Jong-xgi_ioctl_submit_cmdlist \n"); - xgi_submit_cmdlist(info, (struct xgi_cmd_info *) arg_copy); - break; - case XGI_ESC_TEST_RWINKERNEL: - XGI_INFO("Jong-xgi_test_rwinkernel \n"); - xgi_test_rwinkernel(info, *(unsigned long *)arg_copy); - break; - case XGI_ESC_STATE_CHANGE: - XGI_INFO("Jong-xgi_state_change \n"); - xgi_state_change(info, (struct xgi_state_info *) arg_copy); - break; - default: - XGI_INFO("Jong-xgi_ioctl_default \n"); - status = -EINVAL; - break; - } - - if (copy_to_user((void *)arg, arg_copy, arg_size)) { - XGI_ERROR("failed to copyout ioctl data\n"); - XGI_INFO("Jong-copy_to_user-fail! \n"); - } else - XGI_INFO("Jong-copy_to_user-OK! \n"); - - XGI_KFREE(arg_copy, arg_size); - return status; -} - -/* - * xgi control driver operations defined here - */ -int xgi_kern_ctl_open(struct inode *inode, struct file *filp) -{ - struct xgi_info *info = &xgi_ctl_device; - - int rc = 0; - - XGI_INFO("Jong-xgi_kern_ctl_open\n"); - - xgi_down(info->info_sem); - info->device_number = XGI_CONTROL_DEVICE_NUMBER; - - /* save the xgi info in file->private_data */ - filp->private_data = info; - - if (XGI_ATOMIC_READ(info->use_count) == 0) { - init_waitqueue_head(&xgi_ctl_waitqueue); - } - - info->flags |= XGI_FLAG_OPEN + XGI_FLAG_CONTROL; - - XGI_ATOMIC_INC(info->use_count); - xgi_up(info->info_sem); - - return rc; -} - -int xgi_kern_ctl_close(struct inode *inode, struct file *filp) -{ - struct xgi_info *info = XGI_INFO_FROM_FP(filp); - - XGI_INFO("Jong-xgi_kern_ctl_close\n"); - - xgi_down(info->info_sem); - if (XGI_ATOMIC_DEC_AND_TEST(info->use_count)) { - info->flags = 0; - } - xgi_up(info->info_sem); - - if (FILE_PRIVATE(filp)) { - xgi_free_file_private(FILE_PRIVATE(filp)); - FILE_PRIVATE(filp) = NULL; - } - - return 0; -} - -unsigned int xgi_kern_ctl_poll(struct file *filp, poll_table * wait) -{ - //struct xgi_info *info = XGI_INFO_FROM_FP(filp);; - unsigned int ret = 0; - - if (!(filp->f_flags & O_NONBLOCK)) { - poll_wait(filp, &xgi_ctl_waitqueue, wait); - } - - return ret; -} - -/* - * xgi proc system - */ -static u8 xgi_find_pcie_capability(struct pci_dev *dev) -{ - u16 status; - u8 cap_ptr, cap_id; - - pci_read_config_word(dev, PCI_STATUS, &status); - status &= PCI_STATUS_CAP_LIST; - if (!status) - return 0; - - switch (dev->hdr_type) { - case PCI_HEADER_TYPE_NORMAL: - case PCI_HEADER_TYPE_BRIDGE: - pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &cap_ptr); - break; - default: - return 0; + /* Init the resource manager */ + err = xgi_pcie_heap_init(info); + if (err) { + DRM_ERROR("xgi_pcie_heap_init() failed\n"); + return err; } - do { - cap_ptr &= 0xFC; - pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_ID, &cap_id); - pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_NEXT, - &cap_ptr); - } while (cap_ptr && cap_id != 0xFF); - - return 0; -} - -int xgi_kern_read_card_info(char *page, char **start, off_t off, - int count, int *eof, void *data) -{ - struct pci_dev *dev; - char *type; - int len = 0; - - struct xgi_info *info; - info = (struct xgi_info *) data; - - dev = info->dev; - if (!dev) - return 0; - - type = xgi_find_pcie_capability(dev) ? "PCIE" : "PCI"; - len += sprintf(page + len, "Card Type: \t %s\n", type); - - XGI_PCI_DEV_PUT(dev); - return len; -} - -int xgi_kern_read_version(char *page, char **start, off_t off, - int count, int *eof, void *data) -{ - int len = 0; - - len += sprintf(page + len, "XGI version: %s\n", "1.0"); - len += sprintf(page + len, "GCC version: %s\n", "3.0"); + /* Alloc 1M bytes for cmdbuffer which is flush2D batch array */ + xgi_cmdlist_initialize(info, 0x100000); - return len; -} - -int xgi_kern_read_pcie_info(char *page, char **start, off_t off, - int count, int *eof, void *data) -{ + info->bootstrap_done = 1; return 0; } -int xgi_kern_read_status(char *page, char **start, off_t off, - int count, int *eof, void *data) -{ - return 0; -} -static void xgi_proc_create(void) +void xgi_driver_preclose(struct drm_device * dev, DRMFILE filp) { -#ifdef CONFIG_PROC_FS - - struct pci_dev *dev; - int i = 0; - char name[6]; - - struct proc_dir_entry *entry; - struct proc_dir_entry *proc_xgi_pcie, *proc_xgi_cards; - - struct xgi_info *info; - struct xgi_info *xgi_max_devices; - - /* world readable directory */ - int flags = S_IFDIR | S_IRUGO | S_IXUGO; - - proc_xgi = create_proc_entry("xgi", flags, proc_root_driver); - if (!proc_xgi) - goto failed; - - proc_xgi_cards = create_proc_entry("cards", flags, proc_xgi); - if (!proc_xgi_cards) - goto failed; - - proc_xgi_pcie = create_proc_entry("pcie", flags, proc_xgi); - if (!proc_xgi_pcie) - goto failed; - - /* - * Set the module owner to ensure that the reference - * count reflects accesses to the proc files. - */ - proc_xgi->owner = THIS_MODULE; - proc_xgi_cards->owner = THIS_MODULE; - proc_xgi_pcie->owner = THIS_MODULE; - - xgi_max_devices = xgi_devices + XGI_MAX_DEVICES; - for (info = xgi_devices; info < xgi_max_devices; info++) { - /* world readable file */ - flags = S_IFREG | S_IRUGO; - - dev = info->dev; - if (!dev) - break; - - sprintf(name, "%d", i++); - entry = create_proc_entry(name, flags, proc_xgi_cards); - if (!entry) { - XGI_PCI_DEV_PUT(dev); - goto failed; - } - - entry->data = info; - entry->read_proc = xgi_kern_read_card_info; - entry->owner = THIS_MODULE; - - if (xgi_find_pcie_capability(dev)) { - entry = - create_proc_entry("status", flags, proc_xgi_pcie); - if (!entry) { - XGI_PCI_DEV_PUT(dev); - goto failed; - } - - entry->data = info; - entry->read_proc = xgi_kern_read_status; - entry->owner = THIS_MODULE; - - entry = create_proc_entry("card", flags, proc_xgi_pcie); - if (!entry) { - XGI_PCI_DEV_PUT(dev); - goto failed; - } - - entry->data = info; - entry->read_proc = xgi_kern_read_pcie_info; - entry->owner = THIS_MODULE; - } - - XGI_PCI_DEV_PUT(dev); - } - - entry = create_proc_entry("version", flags, proc_xgi); - if (!entry) - goto failed; - - entry->read_proc = xgi_kern_read_version; - entry->owner = THIS_MODULE; - - entry = create_proc_entry("host-bridge", flags, proc_xgi_pcie); - if (!entry) - goto failed; + struct xgi_info * info = dev->dev_private; - entry->data = NULL; - entry->read_proc = xgi_kern_read_pcie_info; - entry->owner = THIS_MODULE; - - return; - - failed: - XGI_ERROR("failed to create /proc entries!\n"); - xgi_proc_remove_all(proc_xgi); -#endif + xgi_pcie_free_all(info, filp); + xgi_fb_free_all(info, filp); } -#ifdef CONFIG_PROC_FS -static void xgi_proc_remove_all(struct proc_dir_entry *entry) -{ - while (entry) { - struct proc_dir_entry *next = entry->next; - if (entry->subdir) - xgi_proc_remove_all(entry->subdir); - remove_proc_entry(entry->name, entry->parent); - if (entry == proc_xgi) - break; - entry = next; - } -} -#endif - -static void xgi_proc_remove(void) -{ -#ifdef CONFIG_PROC_FS - xgi_proc_remove_all(proc_xgi); -#endif -} /* * driver receives an interrupt if someone waiting, then hand it off. */ -irqreturn_t xgi_kern_isr(int irq, void *dev_id, struct pt_regs *regs) +irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS) { - struct xgi_info *info = (struct xgi_info *) dev_id; + struct drm_device *dev = (struct drm_device *) arg; +// struct xgi_info *info = dev->dev_private; u32 need_to_run_bottom_half = 0; - //XGI_INFO("xgi_kern_isr \n"); + //DRM_INFO("xgi_kern_isr \n"); //XGI_CHECK_PCI_CONFIG(info); //xgi_dvi_irq_handler(info); if (need_to_run_bottom_half) { - tasklet_schedule(&info->tasklet); + drm_locked_tasklet(dev, xgi_kern_isr_bh); } return IRQ_HANDLED; } -void xgi_kern_isr_bh(unsigned long data) +void xgi_kern_isr_bh(struct drm_device *dev) { - struct xgi_info *info = (struct xgi_info *) data; + struct xgi_info *info = dev->dev_private; - XGI_INFO("xgi_kern_isr_bh \n"); + DRM_INFO("xgi_kern_isr_bh \n"); //xgi_dvi_irq_handler(info); XGI_CHECK_PCI_CONFIG(info); } -static void xgi_lock_init(struct xgi_info * info) +int xgi_driver_load(struct drm_device *dev, unsigned long flags) { - if (info == NULL) - return; - - spin_lock_init(&info->info_lock); - - sema_init(&info->info_sem, 1); - sema_init(&info->fb_sem, 1); - sema_init(&info->pcie_sem, 1); - - XGI_ATOMIC_SET(info->use_count, 0); -} + struct xgi_info *info; + int err; -static void xgi_dev_init(struct xgi_info * info) -{ - struct pci_dev *pdev = NULL; - struct xgi_dev *dev; - int found = 0; - u16 pci_cmd; - XGI_INFO("Enter xgi_dev_init \n"); + info = drm_alloc(sizeof(*info), DRM_MEM_DRIVER); + if (!info) + return DRM_ERR(ENOMEM); - //XGI_PCI_FOR_EACH_DEV(pdev) - { - for (dev = xgidev_list; dev->vendor; dev++) { - if ((dev->vendor == pdev->vendor) - && (dev->device == pdev->device)) { - u8 rev_id; + (void) memset(info, 0, sizeof(*info)); + dev->dev_private = info; + info->dev = dev; - XGI_INFO("dev->vendor = pdev->vendor= %x \n", - dev->vendor); - XGI_INFO("dev->device = pdev->device= %x \n", - dev->device); + sema_init(&info->fb_sem, 1); + sema_init(&info->pcie_sem, 1); - xgi_devices[found].dev = pdev; + info->mmio.base = drm_get_resource_start(dev, 1); + info->mmio.size = drm_get_resource_len(dev, 1); - pci_read_config_byte(pdev, PCI_REVISION_ID, - rev_id); + DRM_INFO("mmio base: 0x%lx, size: 0x%x\n", + (unsigned long) info->mmio.base, info->mmio.size); - XGI_INFO("PCI_REVISION_ID= %x \n", rev_id); - pci_read_config_word(pdev, PCI_COMMAND, - &pci_cmd); + if ((info->mmio.base == 0) || (info->mmio.size == 0)) { + DRM_ERROR("mmio appears to be wrong: 0x%lx 0x%x\n", + (unsigned long) info->mmio.base, info->mmio.size); + return DRM_ERR(EINVAL); + } - XGI_INFO("PCI_COMMAND = %x \n", pci_cmd); - break; - } - } + err = drm_addmap(dev, info->mmio.base, info->mmio.size, + _DRM_REGISTERS, _DRM_KERNEL | _DRM_READ_ONLY, + &info->mmio_map); + if (err) { + DRM_ERROR("Unable to map MMIO region: %d\n", err); + return err; } -} -/* - * Export to Linux Kernel - */ + xgi_enable_mmio(info); + //xgi_enable_ge(info); -static int __init xgi_init_module(void) -{ - struct xgi_info *info = &xgi_devices[xgi_num_devices]; - int i, result; + info->fb.base = drm_get_resource_start(dev, 0); + info->fb.size = drm_get_resource_len(dev, 0); - XGI_INFO("Jong-xgi kernel driver %s initializing\n", XGI_DRV_VERSION); - //SET_MODULE_OWNER(&xgi_fops); + DRM_INFO("fb base: 0x%lx, size: 0x%x\n", + (unsigned long) info->fb.base, info->fb.size); - memset(xgi_devices, 0, sizeof(xgi_devices)); + info->fb.size = IN3CFB(info->mmio_map, 0x54) * 8 * 1024 * 1024; - if (pci_register_driver(&xgi_pci_driver) < 0) { - pci_unregister_driver(&xgi_pci_driver); - XGI_ERROR("no XGI graphics adapter found\n"); - return -ENODEV; - } + DRM_INFO("fb base: 0x%lx, size: 0x%x (probed)\n", + (unsigned long) info->fb.base, info->fb.size); - XGI_INFO("Jong-xgi_devices[%d].fb.base.: 0x%lx \n", xgi_num_devices, - xgi_devices[xgi_num_devices].fb.base); - XGI_INFO("Jong-xgi_devices[%d].fb.size.: 0x%lx \n", xgi_num_devices, - xgi_devices[xgi_num_devices].fb.size); -/* Jong 07/27/2006; test for ubuntu */ -/* -#ifdef CONFIG_DEVFS_FS - - XGI_INFO("Jong-Use devfs \n"); - do - { - xgi_devfs_handles[0] = XGI_DEVFS_REGISTER("xgi", 0); - if (xgi_devfs_handles[0] == NULL) - { - result = -ENOMEM; - XGI_ERROR("devfs register failed\n"); - goto failed; - } - } while(0); - #else *//* no devfs, do it the "classic" way */ - - XGI_INFO("Jong-Use non-devfs \n"); - /* - * Register your major, and accept a dynamic number. This is the - * first thing to do, in order to avoid releasing other module's - * fops in scull_cleanup_module() - */ - result = XGI_REGISTER_CHRDEV(xgi_major, "xgi", &xgi_fops); - if (result < 0) { - XGI_ERROR("register chrdev failed\n"); - pci_unregister_driver(&xgi_pci_driver); - return result; + if ((info->fb.base == 0) || (info->fb.size == 0)) { + DRM_ERROR("frame buffer appears to be wrong: 0x%lx 0x%x\n", + (unsigned long) info->fb.base, info->fb.size); + return DRM_ERR(EINVAL); } - if (xgi_major == 0) - xgi_major = result; /* dynamic */ - /* #endif *//* CONFIG_DEVFS_FS */ - XGI_INFO("Jong-major number %d\n", xgi_major); - /* instantiate tasklets */ - for (i = 0; i < XGI_MAX_DEVICES; i++) { - /* - * We keep one tasklet per card to avoid latency issues with more - * than one device; no two instances of a single tasklet are ever - * executed concurrently. - */ - XGI_ATOMIC_SET(xgi_devices[i].tasklet.count, 1); + xgi_mem_block_cache = kmem_cache_create("xgi_mem_block", + sizeof(struct xgi_mem_block), + 0, + SLAB_HWCACHE_ALIGN, + NULL, NULL); + if (xgi_mem_block_cache == NULL) { + return DRM_ERR(ENOMEM); } - /* init the xgi control device */ - { - struct xgi_info *info_ctl = &xgi_ctl_device; - xgi_lock_init(info_ctl); - } /* Init the resource manager */ - INIT_LIST_HEAD(&xgi_mempid_list); - if (!xgi_fb_heap_init(info)) { - XGI_ERROR("xgi_fb_heap_init() failed\n"); - result = -EIO; - goto failed; + err = xgi_fb_heap_init(info); + if (err) { + DRM_ERROR("xgi_fb_heap_init() failed\n"); + return err; } - /* Init the resource manager */ - if (!xgi_pcie_heap_init(info)) { - XGI_ERROR("xgi_pcie_heap_init() failed\n"); - result = -EIO; - goto failed; - } - - /* create /proc/driver/xgi */ - xgi_proc_create(); - -#if defined(DEBUG) - inter_module_register("xgi_devices", THIS_MODULE, xgi_devices); -#endif - return 0; - - failed: -#ifdef CONFIG_DEVFS_FS - XGI_DEVFS_REMOVE_CONTROL(); - XGI_DEVFS_REMOVE_DEVICE(xgi_num_devices); -#endif - - if (XGI_UNREGISTER_CHRDEV(xgi_major, "xgi") < 0) - XGI_ERROR("unregister xgi chrdev failed\n"); - - for (i = 0; i < xgi_num_devices; i++) { - if (xgi_devices[i].dev) { - release_mem_region(xgi_devices[i].fb.base, - xgi_devices[i].fb.size); - release_mem_region(xgi_devices[i].mmio.base, - xgi_devices[i].mmio.size); - } - } - - pci_unregister_driver(&xgi_pci_driver); - return result; - - return 1; } -void __exit xgi_exit_module(void) +int xgi_driver_unload(struct drm_device *dev) { - int i; - -#ifdef CONFIG_DEVFS_FS - XGI_DEVFS_REMOVE_DEVICE(xgi_num_devices); -#endif - - if (XGI_UNREGISTER_CHRDEV(xgi_major, "xgi") < 0) - XGI_ERROR("unregister xgi chrdev failed\n"); - - XGI_INFO("Jong-unregister xgi chrdev scceeded\n"); - for (i = 0; i < XGI_MAX_DEVICES; i++) { - if (xgi_devices[i].dev) { - /* clean up the flush2D batch array */ - xgi_cmdlist_cleanup(&xgi_devices[i]); - - if (xgi_devices[i].fb.vbase != NULL) { - iounmap(xgi_devices[i].fb.vbase); - xgi_devices[i].fb.vbase = NULL; - } - if (xgi_devices[i].mmio.vbase != NULL) { - iounmap(xgi_devices[i].mmio.vbase); - xgi_devices[i].mmio.vbase = NULL; - } - //release_mem_region(xgi_devices[i].fb.base, xgi_devices[i].fb.size); - //XGI_INFO("release frame buffer mem region scceeded\n"); - - release_mem_region(xgi_devices[i].mmio.base, - xgi_devices[i].mmio.size); - XGI_INFO("release MMIO mem region scceeded\n"); - - xgi_fb_heap_cleanup(&xgi_devices[i]); - XGI_INFO("xgi_fb_heap_cleanup scceeded\n"); - - xgi_pcie_heap_cleanup(&xgi_devices[i]); - XGI_INFO("xgi_pcie_heap_cleanup scceeded\n"); - - XGI_PCI_DISABLE_DEVICE(xgi_devices[i].dev); - } - } - - pci_unregister_driver(&xgi_pci_driver); - - /* remove /proc/driver/xgi */ - xgi_proc_remove(); + struct xgi_info * info = dev->dev_private; -#if defined(DEBUG) - inter_module_unregister("xgi_devices"); -#endif -} + xgi_cmdlist_cleanup(info); + if (info->fb_map != NULL) { + drm_rmmap(info->dev, info->fb_map); + } -module_init(xgi_init_module); -module_exit(xgi_exit_module); + if (info->mmio_map != NULL) { + drm_rmmap(info->dev, info->mmio_map); + } -#if defined(XGI_PM_SUPPORT_ACPI) -int xgi_acpi_event(struct pci_dev *dev, u32 state) -{ - return 1; -} + xgi_mem_heap_cleanup(&info->fb_heap); + xgi_mem_heap_cleanup(&info->pcie_heap); + xgi_pcie_lut_cleanup(info); -int xgi_kern_acpi_standby(struct pci_dev *dev, u32 state) -{ - return 1; -} + if (xgi_mem_block_cache) { + kmem_cache_destroy(xgi_mem_block_cache); + xgi_mem_block_cache = NULL; + } -int xgi_kern_acpi_resume(struct pci_dev *dev) -{ - return 1; + return 0; } -#endif - -MODULE_AUTHOR("Andrea Zhang "); -MODULE_DESCRIPTION("xgi kernel driver for xgi cards"); -MODULE_LICENSE("GPL"); diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 382bb7a6..20965876 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -29,115 +29,69 @@ #ifndef _XGI_DRV_H_ #define _XGI_DRV_H_ -#include "xgi_drm.h" - -#define XGI_MAJOR_VERSION 0 -#define XGI_MINOR_VERSION 7 -#define XGI_PATCHLEVEL 5 - -#define XGI_DRV_VERSION "0.7.5" - -#ifndef XGI_DRV_NAME -#define XGI_DRV_NAME "xgi" -#endif - -/* - * xgi reserved major device number, Set this to 0 to - * request dynamic major number allocation. - */ -#ifndef XGI_DEV_MAJOR -#define XGI_DEV_MAJOR 0 -#endif - -#ifndef XGI_MAX_DEVICES -#define XGI_MAX_DEVICES 1 -#endif - -/* Jong 06/06/2006 */ -/* #define XGI_DEBUG */ +#include "drmP.h" +#include "drm.h" -#ifndef PCI_VENDOR_ID_XGI -/* -#define PCI_VENDOR_ID_XGI 0x1023 -*/ -#define PCI_VENDOR_ID_XGI 0x18CA +#define DRIVER_AUTHOR "Andrea Zhang " -#endif - -#ifndef PCI_DEVICE_ID_XP5 -#define PCI_DEVICE_ID_XP5 0x2200 -#endif - -#ifndef PCI_DEVICE_ID_XG47 -#define PCI_DEVICE_ID_XG47 0x0047 -#endif +#define DRIVER_NAME "xgi" +#define DRIVER_DESC "XGI XP5 / XP10 / XG47" +#define DRIVER_DATE "20070710" -/* Macros to make printk easier */ -#define XGI_ERROR(fmt, arg...) \ - printk(KERN_ERR "[" XGI_DRV_NAME ":%s] *ERROR* " fmt, __FUNCTION__, ##arg) +#define DRIVER_MAJOR 0 +#define DRIVER_MINOR 8 +#define DRIVER_PATCHLEVEL 0 -#define XGI_MEM_ERROR(area, fmt, arg...) \ - printk(KERN_ERR "[" XGI_DRV_NAME ":%s] *ERROR* " fmt, __FUNCTION__, ##arg) +#include "xgi_drm.h" -/* #define XGI_DEBUG */ +struct xgi_aperture { + dma_addr_t base; + unsigned int size; +}; -#ifdef XGI_DEBUG -#define XGI_INFO(fmt, arg...) \ - printk(KERN_ALERT "[" XGI_DRV_NAME ":%s] " fmt, __FUNCTION__, ##arg) -/* printk(KERN_INFO "[" XGI_DRV_NAME ":%s] " fmt, __FUNCTION__, ##arg) */ -#else -#define XGI_INFO(fmt, arg...) do { } while (0) -#endif +struct xgi_mem_block { + struct list_head list; + unsigned long offset; + unsigned long size; + DRMFILE filp; -/* device name length; must be atleast 8 */ -#define XGI_DEVICE_NAME_LENGTH 40 + unsigned int owner; +}; -/* need a fake device number for control device; just to flag it for msgs */ -#define XGI_CONTROL_DEVICE_NUMBER 100 +struct xgi_mem_heap { + struct list_head free_list; + struct list_head used_list; + struct list_head sort_list; + unsigned long max_freesize; -struct xgi_aperture { - unsigned long base; - unsigned int size; - void *vbase; + bool initialized; }; struct xgi_info { - struct pci_dev *dev; - int flags; - int device_number; + struct drm_device *dev; + + bool bootstrap_done; /* physical characteristics */ struct xgi_aperture mmio; struct xgi_aperture fb; struct xgi_aperture pcie; + struct drm_map *mmio_map; + struct drm_map *pcie_map; + struct drm_map *fb_map; + /* look up table parameters */ - u32 *lut_base; + struct drm_dma_handle *lut_handle; unsigned int lutPageSize; - unsigned int lutPageOrder; - bool isLUTInLFB; - unsigned int sdfbPageSize; - - u32 pcie_config; - u32 pcie_status; - - atomic_t use_count; - /* keep track of any pending bottom halfes */ - struct tasklet_struct tasklet; + struct xgi_mem_heap fb_heap; + struct xgi_mem_heap pcie_heap; - spinlock_t info_lock; - - struct semaphore info_sem; struct semaphore fb_sem; struct semaphore pcie_sem; }; -struct xgi_ioctl_post_vbios { - unsigned int bus; - unsigned int slot; -}; - enum PcieOwner { PCIE_2D = 0, /* @@ -151,64 +105,47 @@ enum PcieOwner { PCIE_INVALID = 0x7fffffff }; -struct xgi_mem_pid { - struct list_head list; - enum xgi_mem_location location; - unsigned long bus_addr; - unsigned long pid; -}; - - -/* - * flags - */ -#define XGI_FLAG_OPEN 0x0001 -#define XGI_FLAG_NEEDS_POSTING 0x0002 -#define XGI_FLAG_WAS_POSTED 0x0004 -#define XGI_FLAG_CONTROL 0x0010 -#define XGI_FLAG_MAP_REGS_EARLY 0x0200 - -/* mmap(2) offsets */ - -#define IS_IO_OFFSET(info, offset, length) \ - (((offset) >= (info)->mmio.base) \ - && (((offset) + (length)) <= (info)->mmio.base + (info)->mmio.size)) - -/* Jong 06/14/2006 */ -/* (info)->fb.base is a base address for physical (bus) address space */ -/* what's the definition of offest? on physical (bus) address space or HW address space */ -/* Jong 06/15/2006; use HW address space */ -#define IS_FB_OFFSET(info, offset, length) \ - (((offset) >= 0) \ - && (((offset) + (length)) <= (info)->fb.size)) -#if 0 -#define IS_FB_OFFSET(info, offset, length) \ - (((offset) >= (info)->fb.base) \ - && (((offset) + (length)) <= (info)->fb.base + (info)->fb.size)) -#endif -#define IS_PCIE_OFFSET(info, offset, length) \ - (((offset) >= (info)->pcie.base) \ - && (((offset) + (length)) <= (info)->pcie.base + (info)->pcie.size)) +extern struct kmem_cache *xgi_mem_block_cache; +extern struct xgi_mem_block *xgi_mem_alloc(struct xgi_mem_heap * heap, + unsigned long size, enum PcieOwner owner); +extern int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, + DRMFILE filp); +extern int xgi_mem_heap_init(struct xgi_mem_heap * heap, unsigned int start, + unsigned int end); +extern void xgi_mem_heap_cleanup(struct xgi_mem_heap * heap); extern int xgi_fb_heap_init(struct xgi_info * info); -extern void xgi_fb_heap_cleanup(struct xgi_info * info); -extern void xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, - pid_t pid); -extern void xgi_fb_free(struct xgi_info * info, unsigned long offset); -extern void xgi_mem_collect(struct xgi_info * info, unsigned int *pcnt); +extern int xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, + DRMFILE filp); + +extern int xgi_fb_free(struct xgi_info * info, unsigned long offset, + DRMFILE filp); extern int xgi_pcie_heap_init(struct xgi_info * info); -extern void xgi_pcie_heap_cleanup(struct xgi_info * info); +extern void xgi_pcie_lut_cleanup(struct xgi_info * info); + +extern int xgi_pcie_alloc(struct xgi_info * info, + struct xgi_mem_alloc * alloc, DRMFILE filp); + +extern int xgi_pcie_free(struct xgi_info * info, unsigned long offset, + DRMFILE filp); + +extern void *xgi_find_pcie_virt(struct xgi_info * info, u32 address); -extern void xgi_pcie_alloc(struct xgi_info * info, - struct xgi_mem_alloc * alloc, pid_t pid); -extern void xgi_pcie_free(struct xgi_info * info, unsigned long offset); -extern struct xgi_pcie_block *xgi_find_pcie_block(struct xgi_info * info, - unsigned long address); -extern void *xgi_find_pcie_virt(struct xgi_info * info, unsigned long address); +extern void xgi_pcie_free_all(struct xgi_info *, DRMFILE); +extern void xgi_fb_free_all(struct xgi_info *, DRMFILE); -extern void xgi_test_rwinkernel(struct xgi_info * info, unsigned long address); +extern int xgi_fb_alloc_ioctl(DRM_IOCTL_ARGS); +extern int xgi_fb_free_ioctl(DRM_IOCTL_ARGS); +extern int xgi_pcie_alloc_ioctl(DRM_IOCTL_ARGS); +extern int xgi_pcie_free_ioctl(DRM_IOCTL_ARGS); +extern int xgi_ge_reset_ioctl(DRM_IOCTL_ARGS); +extern int xgi_dump_register_ioctl(DRM_IOCTL_ARGS); +extern int xgi_restore_registers_ioctl(DRM_IOCTL_ARGS); +extern int xgi_submit_cmdlist_ioctl(DRM_IOCTL_ARGS); +extern int xgi_test_rwinkernel_ioctl(DRM_IOCTL_ARGS); +extern int xgi_state_change_ioctl(DRM_IOCTL_ARGS); #endif diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index 7d390d4b..ce689847 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -26,343 +26,126 @@ * DEALINGS IN THE SOFTWARE. ***************************************************************************/ -#include "xgi_linux.h" #include "xgi_drv.h" -#include "xgi_fb.h" #define XGI_FB_HEAP_START 0x1000000 -static struct xgi_mem_heap *xgi_fb_heap; -static struct kmem_cache *xgi_fb_cache_block = NULL; -extern struct list_head xgi_mempid_list; +struct kmem_cache *xgi_mem_block_cache = NULL; static struct xgi_mem_block *xgi_mem_new_node(void); -static struct xgi_mem_block *xgi_mem_alloc(struct xgi_info * info, unsigned long size); -static struct xgi_mem_block *xgi_mem_free(struct xgi_info * info, unsigned long offset); -void xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, - pid_t pid) -{ - struct xgi_mem_block *block; - struct xgi_mem_pid *mempid_block; - - if (alloc->is_front) { - alloc->location = XGI_MEMLOC_LOCAL; - alloc->bus_addr = info->fb.base; - alloc->hw_addr = 0; - XGI_INFO - ("Video RAM allocation on front buffer successfully! \n"); - } else { - xgi_down(info->fb_sem); - block = xgi_mem_alloc(info, alloc->size); - xgi_up(info->fb_sem); - - if (block == NULL) { - alloc->location = XGI_MEMLOC_LOCAL; - alloc->size = 0; - alloc->bus_addr = 0; - alloc->hw_addr = 0; - XGI_ERROR("Video RAM allocation failed\n"); - } else { - XGI_INFO("Video RAM allocation succeeded: 0x%p\n", - (char *)block->offset); - alloc->location = XGI_MEMLOC_LOCAL; - alloc->size = block->size; - alloc->bus_addr = info->fb.base + block->offset; - alloc->hw_addr = block->offset; - - /* manage mempid */ - mempid_block = - kmalloc(sizeof(struct xgi_mem_pid), GFP_KERNEL); - mempid_block->location = XGI_MEMLOC_LOCAL; - mempid_block->bus_addr = alloc->bus_addr; - mempid_block->pid = pid; - - if (!mempid_block) - XGI_ERROR("mempid_block alloc failed\n"); - - XGI_INFO - ("Memory ProcessID add one fb block pid:%ld successfully! \n", - mempid_block->pid); - list_add(&mempid_block->list, &xgi_mempid_list); - } - } -} -void xgi_fb_free(struct xgi_info * info, unsigned long bus_addr) +int xgi_mem_heap_init(struct xgi_mem_heap *heap, unsigned int start, + unsigned int end) { struct xgi_mem_block *block; - unsigned long offset = bus_addr - info->fb.base; - struct xgi_mem_pid *mempid_block; - struct xgi_mem_pid *mempid_freeblock = NULL; - - if (offset < 0) { - XGI_INFO("free onscreen frame buffer successfully !\n"); - } else { - xgi_down(info->fb_sem); - block = xgi_mem_free(info, offset); - xgi_up(info->fb_sem); - - if (block == NULL) { - XGI_ERROR("xgi_mem_free() failed at base 0x%lx\n", - offset); - } - - /* manage mempid */ - list_for_each_entry(mempid_block, &xgi_mempid_list, list) { - if (mempid_block->location == XGI_MEMLOC_LOCAL - && mempid_block->bus_addr == bus_addr) { - mempid_freeblock = mempid_block; - break; - } - } - if (mempid_freeblock) { - list_del(&mempid_freeblock->list); - XGI_INFO - ("Memory ProcessID delete one fb block pid:%ld successfully! \n", - mempid_freeblock->pid); - kfree(mempid_freeblock); - } - } -} - -int xgi_fb_heap_init(struct xgi_info * info) -{ - struct xgi_mem_block *block; - - xgi_fb_heap = kmalloc(sizeof(struct xgi_mem_heap), GFP_KERNEL); - if (!xgi_fb_heap) { - XGI_ERROR("xgi_fb_heap alloc failed\n"); - return 0; - } - - INIT_LIST_HEAD(&xgi_fb_heap->free_list); - INIT_LIST_HEAD(&xgi_fb_heap->used_list); - INIT_LIST_HEAD(&xgi_fb_heap->sort_list); - xgi_fb_cache_block = - kmem_cache_create("xgi_fb_block", sizeof(struct xgi_mem_block), 0, - SLAB_HWCACHE_ALIGN, NULL, NULL); + INIT_LIST_HEAD(&heap->free_list); + INIT_LIST_HEAD(&heap->used_list); + INIT_LIST_HEAD(&heap->sort_list); + heap->initialized = TRUE; - if (NULL == xgi_fb_cache_block) { - XGI_ERROR("Fail to creat xgi_fb_block\n"); - goto fail1; - } - - block = - (struct xgi_mem_block *) kmem_cache_alloc(xgi_fb_cache_block, - GFP_KERNEL); + block = kmem_cache_alloc(xgi_mem_block_cache, GFP_KERNEL); if (!block) { - XGI_ERROR("kmem_cache_alloc failed\n"); - goto fail2; + return DRM_ERR(ENOMEM); } - block->offset = XGI_FB_HEAP_START; - block->size = info->fb.size - XGI_FB_HEAP_START; - - list_add(&block->list, &xgi_fb_heap->free_list); + block->offset = start; + block->size = end - start; - xgi_fb_heap->max_freesize = info->fb.size - XGI_FB_HEAP_START; + list_add(&block->list, &heap->free_list); - XGI_INFO("fb start offset: 0x%lx, memory size : 0x%lx\n", block->offset, - block->size); - XGI_INFO("xgi_fb_heap->max_freesize: 0x%lx \n", - xgi_fb_heap->max_freesize); + heap->max_freesize = end - start; - return 1; - - fail2: - if (xgi_fb_cache_block) { - kmem_cache_destroy(xgi_fb_cache_block); - xgi_fb_cache_block = NULL; - } - fail1: - if (xgi_fb_heap) { - kfree(xgi_fb_heap); - xgi_fb_heap = NULL; - } return 0; } -void xgi_fb_heap_cleanup(struct xgi_info * info) + +void xgi_mem_heap_cleanup(struct xgi_mem_heap * heap) { struct list_head *free_list; struct xgi_mem_block *block; struct xgi_mem_block *next; int i; - if (xgi_fb_heap) { - free_list = &xgi_fb_heap->free_list; - for (i = 0; i < 3; i++, free_list++) { - list_for_each_entry_safe(block, next, free_list, list) { - XGI_INFO - ("No. %d block->offset: 0x%lx block->size: 0x%lx \n", - i, block->offset, block->size); - //XGI_INFO("No. %d free block: 0x%p \n", i, block); - kmem_cache_free(xgi_fb_cache_block, block); - block = NULL; - } + free_list = &heap->free_list; + for (i = 0; i < 3; i++, free_list++) { + list_for_each_entry_safe(block, next, free_list, list) { + DRM_INFO + ("No. %d block->offset: 0x%lx block->size: 0x%lx \n", + i, block->offset, block->size); + kmem_cache_free(xgi_mem_block_cache, block); + block = NULL; } - XGI_INFO("xgi_fb_heap: 0x%p \n", xgi_fb_heap); - kfree(xgi_fb_heap); - xgi_fb_heap = NULL; - } - - if (xgi_fb_cache_block) { - kmem_cache_destroy(xgi_fb_cache_block); - xgi_fb_cache_block = NULL; } + + heap->initialized = 0; } -static struct xgi_mem_block *xgi_mem_new_node(void) + +struct xgi_mem_block *xgi_mem_new_node(void) { - struct xgi_mem_block *block; + struct xgi_mem_block *block = + kmem_cache_alloc(xgi_mem_block_cache, GFP_KERNEL); - block = - (struct xgi_mem_block *) kmem_cache_alloc(xgi_fb_cache_block, - GFP_KERNEL); if (!block) { - XGI_ERROR("kmem_cache_alloc failed\n"); + DRM_ERROR("kmem_cache_alloc failed\n"); return NULL; } - return block; -} - -#if 0 -static void xgi_mem_insert_node_after(struct xgi_mem_list * list, - struct xgi_mem_block * current, - struct xgi_mem_block * block); -static void xgi_mem_insert_node_before(struct xgi_mem_list * list, - struct xgi_mem_block * current, - struct xgi_mem_block * block); -static void xgi_mem_insert_node_head(struct xgi_mem_list * list, - struct xgi_mem_block * block); -static void xgi_mem_insert_node_tail(struct xgi_mem_list * list, - struct xgi_mem_block * block); -static void xgi_mem_delete_node(struct xgi_mem_list * list, struct xgi_mem_block * block); -/* - * insert node:block after node:current - */ -static void xgi_mem_insert_node_after(struct xgi_mem_list * list, - struct xgi_mem_block * current, - struct xgi_mem_block * block) -{ - block->prev = current; - block->next = current->next; - current->next = block; - - if (current == list->tail) { - list->tail = block; - } else { - block->next->prev = block; - } -} + block->offset = 0; + block->size = 0; + block->owner = PCIE_INVALID; + block->filp = (DRMFILE) -1; -/* - * insert node:block before node:current - */ -static void xgi_mem_insert_node_before(struct xgi_mem_list * list, - struct xgi_mem_block * current, - struct xgi_mem_block * block) -{ - block->prev = current->prev; - block->next = current; - current->prev = block; - if (current == list->head) { - list->head = block; - } else { - block->prev->next = block; - } -} -void xgi_mem_insert_node_head(struct xgi_mem_list * list, struct xgi_mem_block * block) -{ - block->next = list->head; - block->prev = NULL; - - if (NULL == list->head) { - list->tail = block; - } else { - list->head->prev = block; - } - list->head = block; -} - -static void xgi_mem_insert_node_tail(struct xgi_mem_list * list, - struct xgi_mem_block * block) -{ - block->next = NULL; - block->prev = list->tail; - if (NULL == list->tail) { - list->head = block; - } else { - list->tail->next = block; - } - list->tail = block; + return block; } -static void xgi_mem_delete_node(struct xgi_mem_list * list, struct xgi_mem_block * block) -{ - if (block == list->head) { - list->head = block->next; - } - if (block == list->tail) { - list->tail = block->prev; - } - - if (block->prev) { - block->prev->next = block->next; - } - if (block->next) { - block->next->prev = block->prev; - } - block->next = block->prev = NULL; -} -#endif -static struct xgi_mem_block *xgi_mem_alloc(struct xgi_info * info, - unsigned long originalSize) +struct xgi_mem_block *xgi_mem_alloc(struct xgi_mem_heap * heap, + unsigned long originalSize, + enum PcieOwner owner) { struct xgi_mem_block *block, *free_block, *used_block; - unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK; - XGI_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n", + + DRM_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n", originalSize, size); if (size == 0) { - XGI_ERROR("size == 0\n"); + DRM_ERROR("size == 0\n"); return (NULL); } - XGI_INFO("max_freesize: 0x%lx \n", xgi_fb_heap->max_freesize); - if (size > xgi_fb_heap->max_freesize) { - XGI_ERROR + DRM_INFO("max_freesize: 0x%lx \n", heap->max_freesize); + if (size > heap->max_freesize) { + DRM_ERROR ("size: 0x%lx is bigger than frame buffer total free size: 0x%lx !\n", - size, xgi_fb_heap->max_freesize); + size, heap->max_freesize); return (NULL); } - list_for_each_entry(block, &xgi_fb_heap->free_list, list) { - XGI_INFO("free_list: 0x%px \n", free_list); + list_for_each_entry(block, &heap->free_list, list) { + DRM_INFO("block: 0x%px \n", block); if (size <= block->size) { break; } } - if (&block->list == &xgi_fb_heap->free_list) { - XGI_ERROR + if (&block->list == &heap->free_list) { + DRM_ERROR ("Can't allocate %ldk size from frame buffer memory !\n", size / 1024); return (NULL); } free_block = block; - XGI_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n", + DRM_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n", size, free_block->offset, free_block->size); if (size == free_block->size) { used_block = free_block; - XGI_INFO("size == free_block->size: free_block = 0x%p\n", + DRM_INFO("size == free_block->size: free_block = 0x%p\n", free_block); list_del(&free_block->list); } else { @@ -372,7 +155,7 @@ static struct xgi_mem_block *xgi_mem_alloc(struct xgi_info * info, return (NULL); if (used_block == free_block) { - XGI_ERROR("used_block == free_block = 0x%p\n", + DRM_ERROR("used_block == free_block = 0x%p\n", used_block); } @@ -383,14 +166,16 @@ static struct xgi_mem_block *xgi_mem_alloc(struct xgi_info * info, free_block->size -= size; } - xgi_fb_heap->max_freesize -= size; + heap->max_freesize -= size; - list_add(&used_block->list, &xgi_fb_heap->used_list); + list_add(&used_block->list, &heap->used_list); + used_block->owner = owner; return (used_block); } -static struct xgi_mem_block *xgi_mem_free(struct xgi_info * info, unsigned long offset) +int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, + DRMFILE filp) { struct xgi_mem_block *used_block = NULL, *block; struct xgi_mem_block *prev, *next; @@ -398,28 +183,32 @@ static struct xgi_mem_block *xgi_mem_free(struct xgi_info * info, unsigned long unsigned long upper; unsigned long lower; - list_for_each_entry(block, &xgi_fb_heap->used_list, list) { + list_for_each_entry(block, &heap->used_list, list) { if (block->offset == offset) { break; } } - if (&block->list == &xgi_fb_heap->used_list) { - XGI_ERROR("can't find block: 0x%lx to free!\n", offset); - return (NULL); + if (&block->list == &heap->used_list) { + DRM_ERROR("can't find block: 0x%lx to free!\n", offset); + return DRM_ERR(ENOENT); + } + + if (block->filp != filp) { + return DRM_ERR(EPERM); } used_block = block; - XGI_INFO("used_block: 0x%p, offset = 0x%lx, size = 0x%lx\n", + DRM_INFO("used_block: 0x%p, offset = 0x%lx, size = 0x%lx\n", used_block, used_block->offset, used_block->size); - xgi_fb_heap->max_freesize += used_block->size; + heap->max_freesize += used_block->size; prev = next = NULL; upper = used_block->offset + used_block->size; lower = used_block->offset; - list_for_each_entry(block, &xgi_fb_heap->free_list, list) { + list_for_each_entry(block, &heap->free_list, list) { if (block->offset == upper) { next = block; } else if ((block->offset + block->size) == lower) { @@ -427,41 +216,157 @@ static struct xgi_mem_block *xgi_mem_free(struct xgi_info * info, unsigned long } } - XGI_INFO("next = 0x%p, prev = 0x%p\n", next, prev); + DRM_INFO("next = 0x%p, prev = 0x%p\n", next, prev); list_del(&used_block->list); if (prev && next) { prev->size += (used_block->size + next->size); list_del(&next->list); - XGI_INFO("free node 0x%p\n", next); - kmem_cache_free(xgi_fb_cache_block, next); - kmem_cache_free(xgi_fb_cache_block, used_block); - - next = NULL; - used_block = NULL; - return (prev); + DRM_INFO("free node 0x%p\n", next); + kmem_cache_free(xgi_mem_block_cache, next); + kmem_cache_free(xgi_mem_block_cache, used_block); } - - if (prev) { + else if (prev) { prev->size += used_block->size; - XGI_INFO("free node 0x%p\n", used_block); - kmem_cache_free(xgi_fb_cache_block, used_block); - used_block = NULL; - return (prev); + DRM_INFO("free node 0x%p\n", used_block); + kmem_cache_free(xgi_mem_block_cache, used_block); } - - if (next) { + else if (next) { next->size += used_block->size; next->offset = used_block->offset; - XGI_INFO("free node 0x%p\n", used_block); - kmem_cache_free(xgi_fb_cache_block, used_block); - used_block = NULL; - return (next); + DRM_INFO("free node 0x%p\n", used_block); + kmem_cache_free(xgi_mem_block_cache, used_block); + } + else { + list_add(&used_block->list, &heap->free_list); + DRM_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n", + used_block, used_block->offset, used_block->size); } - list_add(&used_block->list, &xgi_fb_heap->free_list); - XGI_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n", - used_block, used_block->offset, used_block->size); + return 0; +} - return (used_block); + +int xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, + DRMFILE filp) +{ + struct xgi_mem_block *block; + + if (alloc->is_front) { + alloc->location = XGI_MEMLOC_LOCAL; + alloc->offset = 0; + alloc->hw_addr = 0; + DRM_INFO + ("Video RAM allocation on front buffer successfully! \n"); + } else { + down(&info->fb_sem); + block = xgi_mem_alloc(&info->fb_heap, alloc->size, PCIE_2D); + up(&info->fb_sem); + + if (block == NULL) { + alloc->location = XGI_MEMLOC_LOCAL; + alloc->size = 0; + DRM_ERROR("Video RAM allocation failed\n"); + return DRM_ERR(ENOMEM); + } else { + DRM_INFO("Video RAM allocation succeeded: 0x%p\n", + (char *)block->offset); + alloc->location = XGI_MEMLOC_LOCAL; + alloc->size = block->size; + alloc->offset = block->offset; + alloc->hw_addr = block->offset; + + block->filp = filp; + } + } + + return 0; +} + + +int xgi_fb_alloc_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct xgi_mem_alloc alloc; + struct xgi_info *info = dev->dev_private; + int err; + + DRM_COPY_FROM_USER_IOCTL(alloc, (struct xgi_mem_alloc __user *) data, + sizeof(alloc)); + + err = xgi_fb_alloc(info, & alloc, filp); + if (err) { + return err; + } + + DRM_COPY_TO_USER_IOCTL((struct xgi_mem_alloc __user *) data, + alloc, sizeof(alloc)); + + return 0; +} + + +int xgi_fb_free(struct xgi_info * info, unsigned long offset, DRMFILE filp) +{ + int err = 0; + + if (offset == 0) { + DRM_INFO("free onscreen frame buffer successfully !\n"); + } else { + down(&info->fb_sem); + err = xgi_mem_free(&info->fb_heap, offset, filp); + up(&info->fb_sem); + } + + return err; +} + + +int xgi_fb_free_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct xgi_info *info = dev->dev_private; + u32 offset; + + DRM_COPY_FROM_USER_IOCTL(offset, (unsigned long __user *) data, + sizeof(offset)); + + return xgi_fb_free(info, offset, filp); +} + + +int xgi_fb_heap_init(struct xgi_info * info) +{ + return xgi_mem_heap_init(&info->fb_heap, XGI_FB_HEAP_START, + info->fb.size); +} + +/** + * Free all blocks associated with a particular file handle. + */ +void xgi_fb_free_all(struct xgi_info * info, DRMFILE filp) +{ + if (!info->fb_heap.initialized) { + return; + } + + down(&info->fb_sem); + + do { + struct xgi_mem_block *block; + + list_for_each_entry(block, &info->fb_heap.used_list, list) { + if (block->filp == filp) { + break; + } + } + + if (&block->list == &info->fb_heap.used_list) { + break; + } + + (void) xgi_fb_free(info, block->offset, filp); + } while(1); + + up(&info->fb_sem); } diff --git a/linux-core/xgi_fb.h b/linux-core/xgi_fb.h deleted file mode 100644 index 363c8bc8..00000000 --- a/linux-core/xgi_fb.h +++ /dev/null @@ -1,47 +0,0 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ - -#ifndef _XGI_FB_H_ -#define _XGI_FB_H_ - -struct xgi_mem_block { - struct list_head list; - unsigned long offset; - unsigned long size; - atomic_t use_count; -}; - -struct xgi_mem_heap { - struct list_head free_list; - struct list_head used_list; - struct list_head sort_list; - unsigned long max_freesize; - spinlock_t lock; -}; - -#endif diff --git a/linux-core/xgi_linux.h b/linux-core/xgi_linux.h deleted file mode 100644 index 99bf2d04..00000000 --- a/linux-core/xgi_linux.h +++ /dev/null @@ -1,490 +0,0 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ - -#ifndef _XGI_LINUX_H_ -#define _XGI_LINUX_H_ - -#ifndef LINUX_VERSION_CODE -#include -#endif - -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) -# error "This driver does not support pre-2.6 kernels!" -#endif - -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10) -# define XGI_REMAP_PFN_RANGE_PRESENT -#else -# define XGI_REMAP_PAGE_RANGE_5 -#endif - -#if defined (CONFIG_SMP) && !defined (__SMP__) -#define __SMP__ -#endif - -#if defined (CONFIG_MODVERSIONS) && !defined (MODVERSIONS) -#define MODVERSIONS -#endif - -#include /* printk */ -#include - -#include /* module_init, module_exit */ -#include /* pic_t, size_t, __u32, etc */ -#include /* error codes */ -#include /* circular linked list */ -#include /* NULL, offsetof */ -#include /* wait queues */ - -#include /* kmalloc, kfree, etc */ -#include /* vmalloc, vfree, etc */ - -#include /* poll_wait */ -#include /* mdelay, udelay */ -#include /* rdtsc rdtscl */ - -#include /* suser(), capable() replacement - for_each_task, for_each_process */ -#ifdef for_each_process -#define XGI_SCAN_PROCESS(p) for_each_process(p) -#else -#define XGI_SCAN_PROCESS(p) for_each_task(p) -#endif - -#include /* module_param() */ -#include /* kernel_locked */ -#include /* flush_tlb(), flush_tlb_all() */ -#include /* page table entry lookup */ - -#include /* pci_find_class, etc */ -#include /* tasklets, interrupt helpers */ -#include - -#include /* cli, sli, save_flags */ -#include /* ioremap, virt_to_phys */ -#include /* access_ok */ -#include /* PAGE_OFFSET */ -#include /* pte bit definitions */ - -#include -#include -#include - -#ifdef CONFIG_PROC_FS -#include -#endif - -#ifdef CONFIG_DEVFS_FS -#include -#endif - -#ifdef CONFIG_KMOD -#include -#endif - -#ifdef CONFIG_PM -#include -#endif - -#ifdef CONFIG_MTRR -#include -#endif - -#ifdef CONFIG_KDB -#include -#include -#endif - -#if defined (CONFIG_AGP) || defined (CONFIG_AGP_MODULE) -#define AGPGART -#include -#include -#endif - -#ifndef MAX_ORDER -#define MAX_ORDER 11 -#endif - -#ifndef module_init -#define module_init(x) int init_module(void) { return x(); } -#define module_exit(x) void cleanup_module(void) { x(); } -#endif - -#ifndef minor -#define minor(x) MINOR(x) -#endif - -#ifndef IRQ_HANDLED -typedef void irqreturn_t; -#define IRQ_NONE -#define IRQ_HANDLED -#define IRQ_RETVAL(x) -#endif - -#if !defined (list_for_each) -#define list_for_each(pos, head) \ - for (pos = (head)->next, prefetch(pos->next); pos != (head); \ - pos = pos->next, prefetch(pos->next)) -#endif - -extern struct list_head pci_devices; /* list of all devices */ -#define XGI_PCI_FOR_EACH_DEV(dev) \ - for(dev = pci_dev_g(pci_devices.next); dev != pci_dev_g(&pci_devices); dev = pci_dev_g(dev->global_list.next)) - -/* - * the following macro causes problems when used in the same module - * as module_param(); undef it so we don't accidentally mix the two - */ -#undef MODULE_PARM - -#ifdef EXPORT_NO_SYMBOLS -EXPORT_NO_SYMBOLS; -#endif - -#define XGI_IS_SUSER() capable(CAP_SYS_ADMIN) -#define XGI_PCI_DEVICE_NAME(dev) ((dev)->pretty_name) -#define XGI_NUM_CPUS() num_online_cpus() -#define XGI_CLI() local_irq_disable() -#define XGI_SAVE_FLAGS(eflags) local_save_flags(eflags) -#define XGI_RESTORE_FLAGS(eflags) local_irq_restore(eflags) -#define XGI_MAY_SLEEP() (!in_interrupt() && !in_atomic()) -#define XGI_MODULE_PARAMETER(x) module_param(x, int, 0) - - -#define XGI_PCI_DISABLE_DEVICE(dev) pci_disable_device(dev) - -/* common defines */ -#define GET_MODULE_SYMBOL(mod,sym) (const void *) inter_module_get(sym) -#define PUT_MODULE_SYMBOL(sym) inter_module_put((char *) sym) - -#define XGI_GET_PAGE_STRUCT(phys_page) virt_to_page(__va(phys_page)) -#define XGI_VMA_OFFSET(vma) (((vma)->vm_pgoff) << PAGE_SHIFT) -#define XGI_VMA_PRIVATE(vma) ((vma)->vm_private_data) - -#define XGI_DEVICE_NUMBER(x) minor((x)->i_rdev) -#define XGI_IS_CONTROL_DEVICE(x) (minor((x)->i_rdev) == 255) - -#define XGI_PCI_RESOURCE_START(dev, bar) ((dev)->resource[bar].start) -#define XGI_PCI_RESOURCE_SIZE(dev, bar) ((dev)->resource[bar].end - (dev)->resource[bar].start + 1) - -#define XGI_PCI_BUS_NUMBER(dev) (dev)->bus->number -#define XGI_PCI_SLOT_NUMBER(dev) PCI_SLOT((dev)->devfn) - -#define XGI_PCI_GET_CLASS_PRESENT -#ifdef XGI_PCI_GET_CLASS_PRESENT -#define XGI_PCI_DEV_PUT(dev) pci_dev_put(dev) -#define XGI_PCI_GET_DEVICE(vendor,device,from) pci_get_device(vendor,device,from) -#else -#define XGI_PCI_DEV_PUT(dev) -#define XGI_PCI_GET_DEVICE(vendor,device,from) pci_find_device(vendor,device,from) -#endif - -/* - * acpi support has been back-ported to the 2.4 kernel, but the 2.4 driver - * model is not sufficient for full acpi support. it may work in some cases, - * but not enough for us to officially support this configuration. - */ -#if defined(CONFIG_ACPI) -#define XGI_PM_SUPPORT_ACPI -#endif - -#if defined(CONFIG_APM) || defined(CONFIG_APM_MODULE) -#define XGI_PM_SUPPORT_APM -#endif - -#if defined(CONFIG_DEVFS_FS) -typedef void *devfs_handle_t; -#define XGI_DEVFS_REGISTER(_name, _minor) \ - ({ \ - devfs_handle_t __handle = NULL; \ - if (devfs_mk_cdev(MKDEV(XGI_DEV_MAJOR, _minor), \ - S_IFCHR | S_IRUGO | S_IWUGO, _name) == 0) \ - { \ - __handle = (void *) 1; /* XXX Fix me! (boolean) */ \ - } \ - __handle; \ - }) -/* -#define XGI_DEVFS_REMOVE_DEVICE(i) devfs_remove("xgi%d", i) -*/ -#define XGI_DEVFS_REMOVE_CONTROL() devfs_remove("xgi_ctl") -#define XGI_DEVFS_REMOVE_DEVICE(i) devfs_remove("xgi") -#endif /* defined(CONFIG_DEVFS_FS) */ - -#define XGI_REGISTER_CHRDEV(x...) register_chrdev(x) -#define XGI_UNREGISTER_CHRDEV(x...) unregister_chrdev(x) - -#if defined(XGI_REMAP_PFN_RANGE_PRESENT) -#define XGI_REMAP_PAGE_RANGE(from, offset, x...) \ - remap_pfn_range(vma, from, ((offset) >> PAGE_SHIFT), x) -#elif defined(XGI_REMAP_PAGE_RANGE_5) -#define XGI_REMAP_PAGE_RANGE(x...) remap_page_range(vma, x) -#elif defined(XGI_REMAP_PAGE_RANGE_4) -#define XGI_REMAP_PAGE_RANGE(x...) remap_page_range(x) -#else -#warning "xgi_configure.sh failed, assuming remap_page_range(5)!" -#define XGI_REMAP_PAGE_RANGE(x...) remap_page_range(vma, x) -#endif - -#if defined(pmd_offset_map) -#define XGI_PMD_OFFSET(addres, pg_dir, pg_mid_dir) \ - { \ - pg_mid_dir = pmd_offset_map(pg_dir, address); \ - } -#define XGI_PMD_UNMAP(pg_mid_dir) \ - { \ - pmd_unmap(pg_mid_dir); \ - } -#else -#define XGI_PMD_OFFSET(addres, pg_dir, pg_mid_dir) \ - { \ - pg_mid_dir = pmd_offset(pg_dir, address); \ - } -#define XGI_PMD_UNMAP(pg_mid_dir) -#endif - -#define XGI_PMD_PRESENT(pg_mid_dir) \ - ({ \ - if ((pg_mid_dir) && (pmd_none(*pg_mid_dir))) \ - { \ - XGI_PMD_UNMAP(pg_mid_dir); \ - pg_mid_dir = NULL; \ - } \ - pg_mid_dir != NULL; \ - }) - -#if defined(pte_offset_atomic) -#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \ - { \ - pte = pte_offset_atomic(pg_mid_dir, address); \ - XGI_PMD_UNMAP(pg_mid_dir); \ - } -#define XGI_PTE_UNMAP(pte) \ - { \ - pte_kunmap(pte); \ - } -#elif defined(pte_offset) -#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \ - { \ - pte = pte_offset(pg_mid_dir, address); \ - XGI_PMD_UNMAP(pg_mid_dir); \ - } -#define XGI_PTE_UNMAP(pte) -#else -#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \ - { \ - pte = pte_offset_map(pg_mid_dir, address); \ - XGI_PMD_UNMAP(pg_mid_dir); \ - } -#define XGI_PTE_UNMAP(pte) \ - { \ - pte_unmap(pte); \ - } -#endif - -#define XGI_PTE_PRESENT(pte) \ - ({ \ - if (pte) \ - { \ - if (!pte_present(*pte)) \ - { \ - XGI_PTE_UNMAP(pte); pte = NULL; \ - } \ - } \ - pte != NULL; \ - }) - -#define XGI_PTE_VALUE(pte) \ - ({ \ - unsigned long __pte_value = pte_val(*pte); \ - XGI_PTE_UNMAP(pte); \ - __pte_value; \ - }) - -#define XGI_PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) / PAGE_SIZE) -#define XGI_MASK_OFFSET(addr) ((addr) & (PAGE_SIZE - 1)) - -#if !defined (pgprot_noncached) -static inline pgprot_t pgprot_noncached(pgprot_t old_prot) -{ - pgprot_t new_prot = old_prot; - if (boot_cpu_data.x86 > 3) - new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_PCD); - return new_prot; -} -#endif - -#if defined(XGI_BUILD_XGI_PAT_SUPPORT) && !defined (pgprot_writecombined) -/* Added define for write combining page, only valid if pat enabled. */ -#define _PAGE_WRTCOMB _PAGE_PWT -#define __PAGE_KERNEL_WRTCOMB \ - (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_WRTCOMB | _PAGE_ACCESSED) -#define PAGE_KERNEL_WRTCOMB MAKE_GLOBAL(__PAGE_KERNEL_WRTCOMB) - -static inline pgprot_t pgprot_writecombined(pgprot_t old_prot) -{ - pgprot_t new_prot = old_prot; - if (boot_cpu_data.x86 > 3) { - pgprot_val(old_prot) &= ~(_PAGE_PCD | _PAGE_PWT); - new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_WRTCOMB); - } - return new_prot; -} -#endif - -#if !defined(page_to_pfn) -#define page_to_pfn(page) ((page) - mem_map) -#endif - -#define XGI_VMALLOC(ptr, size) \ - { \ - (ptr) = vmalloc_32(size); \ - } - -#define XGI_VFREE(ptr, size) \ - { \ - vfree((void *) (ptr)); \ - } - -#define XGI_IOREMAP(ptr, physaddr, size) \ - { \ - (ptr) = ioremap(physaddr, size); \ - } - -#define XGI_IOREMAP_NOCACHE(ptr, physaddr, size) \ - { \ - (ptr) = ioremap_nocache(physaddr, size); \ - } - -#define XGI_IOUNMAP(ptr, size) \ - { \ - iounmap(ptr); \ - } - -/* - * only use this because GFP_KERNEL may sleep.. - * GFP_ATOMIC is ok, it won't sleep - */ -#define XGI_KMALLOC(ptr, size) \ - { \ - (ptr) = kmalloc(size, GFP_KERNEL); \ - } - -#define XGI_KMALLOC_ATOMIC(ptr, size) \ - { \ - (ptr) = kmalloc(size, GFP_ATOMIC); \ - } - -#define XGI_KFREE(ptr, size) \ - { \ - kfree((void *) (ptr)); \ - } - -#define XGI_GET_FREE_PAGES(ptr, order) \ - { \ - (ptr) = __get_free_pages(GFP_KERNEL, order); \ - } - -#define XGI_FREE_PAGES(ptr, order) \ - { \ - free_pages(ptr, order); \ - } - -struct xgi_pte { - unsigned long phys_addr; - unsigned long virt_addr; -}; - -/* - * AMD Athlon processors expose a subtle bug in the Linux - * kernel, that may lead to AGP memory corruption. Recent - * kernel versions had a workaround for this problem, but - * 2.4.20 is the first kernel to address it properly. The - * page_attr API provides the means to solve the problem. - */ -static inline void XGI_SET_PAGE_ATTRIB_UNCACHED(struct xgi_pte * page_ptr) -{ - struct page *page = virt_to_page(__va(page_ptr->phys_addr)); - change_page_attr(page, 1, PAGE_KERNEL_NOCACHE); -} -static inline void XGI_SET_PAGE_ATTRIB_CACHED(struct xgi_pte * page_ptr) -{ - struct page *page = virt_to_page(__va(page_ptr->phys_addr)); - change_page_attr(page, 1, PAGE_KERNEL); -} - -/* add for SUSE 9, Jill*/ -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 4) -#define XGI_INC_PAGE_COUNT(page) atomic_inc(&(page)->count) -#define XGI_DEC_PAGE_COUNT(page) atomic_dec(&(page)->count) -#define XGI_PAGE_COUNT(page) atomic_read(&(page)->count) -#define XGI_SET_PAGE_COUNT(page,v) atomic_set(&(page)->count, v) -#else -#define XGI_INC_PAGE_COUNT(page) atomic_inc(&(page)->_count) -#define XGI_DEC_PAGE_COUNT(page) atomic_dec(&(page)->_count) -#define XGI_PAGE_COUNT(page) atomic_read(&(page)->_count) -#define XGI_SET_PAGE_COUNT(page,v) atomic_set(&(page)->_count, v) -#endif -#define XGILockPage(page) SetPageLocked(page) -#define XGIUnlockPage(page) ClearPageLocked(page) - -struct xgi_file_private { - struct xgi_info *info; - unsigned int num_events; - spinlock_t fp_lock; - wait_queue_head_t wait_queue; -}; - -#define FILE_PRIVATE(filp) ((filp)->private_data) - -#define XGI_GET_FP(filp) ((struct xgi_file_private *) FILE_PRIVATE(filp)) - -/* for the card devices */ -#define XGI_INFO_FROM_FP(filp) (XGI_GET_FP(filp)->info) - -#define INODE_FROM_FP(filp) ((filp)->f_dentry->d_inode) - -#define XGI_ATOMIC_SET(data,val) atomic_set(&(data), (val)) -#define XGI_ATOMIC_INC(data) atomic_inc(&(data)) -#define XGI_ATOMIC_DEC(data) atomic_dec(&(data)) -#define XGI_ATOMIC_DEC_AND_TEST(data) atomic_dec_and_test(&(data)) -#define XGI_ATOMIC_READ(data) atomic_read(&(data)) - -/* - * lock-related functions that should only be called from this file - */ -#define xgi_init_lock(lock) spin_lock_init(&lock) -#define xgi_lock(lock) spin_lock(&lock) -#define xgi_unlock(lock) spin_unlock(&lock) -#define xgi_down(lock) down(&lock) -#define xgi_up(lock) up(&lock) - -#define xgi_lock_irqsave(lock,flags) spin_lock_irqsave(&lock,flags) -#define xgi_unlock_irqsave(lock,flags) spin_unlock_irqrestore(&lock,flags) - -#endif diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index bb2813ca..7f3d9d6e 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -26,17 +26,21 @@ * DEALINGS IN THE SOFTWARE. ***************************************************************************/ -#include "xgi_linux.h" #include "xgi_drv.h" #include "xgi_regs.h" -#include "xgi_pcie.h" -void xgi_ge_reset(struct xgi_info * info) +int xgi_ge_reset_ioctl(DRM_IOCTL_ARGS) { + DRM_DEVICE; + struct xgi_info *info = dev->dev_private; + xgi_disable_ge(info); xgi_enable_ge(info); + + return 0; } + /* * irq functions */ @@ -113,7 +117,7 @@ static void xgi_ge_hang_reset(volatile u8 *mmio_vbase) u8 old_index; u8 old_36; - XGI_INFO("Can not reset back 0x%x!\n", + DRM_INFO("Can not reset back 0x%x!\n", ge_3d_status[0x00]); *(mmio_vbase + 0xb057) = 0; @@ -151,7 +155,7 @@ static void xgi_ge_hang_reset(volatile u8 *mmio_vbase) bool xgi_ge_irq_handler(struct xgi_info * info) { - volatile u8 *const mmio_vbase = info->mmio.vbase; + volatile u8 *const mmio_vbase = info->mmio_map->handle; volatile u32 *const ge_3d_status = (volatile u32 *)(mmio_vbase + 0x2800); const u32 int_status = ge_3d_status[4]; @@ -185,7 +189,7 @@ bool xgi_ge_irq_handler(struct xgi_info * info) continue_int_count = 0; /* GE Hung up, need reset. */ - XGI_INFO("Reset GE!\n"); + DRM_INFO("Reset GE!\n"); xgi_ge_hang_reset(mmio_vbase); } @@ -205,23 +209,23 @@ bool xgi_ge_irq_handler(struct xgi_info * info) bool xgi_crt_irq_handler(struct xgi_info * info) { bool ret = FALSE; - u8 save_3ce = bReadReg(0x3ce); + u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce); - if (bIn3cf(0x37) & 0x01) // CRT1 interrupt just happened + if (IN3CFB(info->mmio_map, 0x37) & 0x01) // CRT1 interrupt just happened { u8 op3cf_3d; u8 op3cf_37; // What happened? - op3cf_37 = bIn3cf(0x37); + op3cf_37 = IN3CFB(info->mmio_map, 0x37); // Clear CRT interrupt - op3cf_3d = bIn3cf(0x3d); - bOut3cf(0x3d, (op3cf_3d | 0x04)); - bOut3cf(0x3d, (op3cf_3d & ~0x04)); + op3cf_3d = IN3CFB(info->mmio_map, 0x3d); + OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d | 0x04)); + OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d & ~0x04)); ret = TRUE; } - bWriteReg(0x3ce, save_3ce); + DRM_WRITE8(info->mmio_map, 0x3ce, save_3ce); return (ret); } @@ -229,36 +233,36 @@ bool xgi_crt_irq_handler(struct xgi_info * info) bool xgi_dvi_irq_handler(struct xgi_info * info) { bool ret = FALSE; - u8 save_3ce = bReadReg(0x3ce); + const u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce); - if (bIn3cf(0x38) & 0x20) // DVI interrupt just happened - { + if (IN3CFB(info->mmio_map, 0x38) & 0x20) { // DVI interrupt just happened + const u8 save_3x4 = DRM_READ8(info->mmio_map, 0x3d4); u8 op3cf_39; u8 op3cf_37; u8 op3x5_5a; - u8 save_3x4 = bReadReg(0x3d4);; // What happened? - op3cf_37 = bIn3cf(0x37); + op3cf_37 = IN3CFB(info->mmio_map, 0x37); //Notify BIOS that DVI plug/unplug happened - op3x5_5a = bIn3x5(0x5a); - bOut3x5(0x5a, op3x5_5a & 0xf7); + op3x5_5a = IN3X5B(info->mmio_map, 0x5a); + OUT3X5B(info->mmio_map, 0x5a, op3x5_5a & 0xf7); - bWriteReg(0x3d4, save_3x4); + DRM_WRITE8(info->mmio_map, 0x3d4, save_3x4); // Clear DVI interrupt - op3cf_39 = bIn3cf(0x39); - bOut3c5(0x39, (op3cf_39 & ~0x01)); //Set 3cf.39 bit 0 to 0 - bOut3c5(0x39, (op3cf_39 | 0x01)); //Set 3cf.39 bit 0 to 1 + op3cf_39 = IN3CFB(info->mmio_map, 0x39); + OUT3C5B(info->mmio_map, 0x39, (op3cf_39 & ~0x01)); //Set 3cf.39 bit 0 to 0 + OUT3C5B(info->mmio_map, 0x39, (op3cf_39 | 0x01)); //Set 3cf.39 bit 0 to 1 ret = TRUE; } - bWriteReg(0x3ce, save_3ce); + DRM_WRITE8(info->mmio_map, 0x3ce, save_3ce); return (ret); } + void xgi_dump_register(struct xgi_info * info) { int i, j; @@ -281,7 +285,7 @@ void xgi_dump_register(struct xgi_info * info) printk("%1x ", i); for (j = 0; j < 0x10; j++) { - temp = bIn3c5(i * 0x10 + j); + temp = IN3C5B(info->mmio_map, i * 0x10 + j); printk("%3x", temp); } printk("\r\n"); @@ -303,7 +307,7 @@ void xgi_dump_register(struct xgi_info * info) printk("%1x ", i); for (j = 0; j < 0x10; j++) { - temp = bIn3x5(i * 0x10 + j); + temp = IN3X5B(info->mmio_map, i * 0x10 + j); printk("%3x", temp); } printk("\r\n"); @@ -325,7 +329,7 @@ void xgi_dump_register(struct xgi_info * info) printk("%1x ", i); for (j = 0; j < 0x10; j++) { - temp = bIn3cf(i * 0x10 + j); + temp = IN3CFB(info->mmio_map, i * 0x10 + j); printk("%3x", temp); } printk("\r\n"); @@ -346,7 +350,7 @@ void xgi_dump_register(struct xgi_info * info) printk("%1x ", i); for (j = 0; j < 0x10; j++) { - temp = bReadReg(0xB000 + i * 0x10 + j); + temp = DRM_READ8(info->mmio_map, 0xB000 + i * 0x10 + j); printk("%3x", temp); } printk("\r\n"); @@ -366,7 +370,7 @@ void xgi_dump_register(struct xgi_info * info) printk("%1x ", i); for (j = 0; j < 0x10; j++) { - temp = bReadReg(0x2200 + i * 0x10 + j); + temp = DRM_READ8(info->mmio_map, 0x2200 + i * 0x10 + j); printk("%3x", temp); } printk("\r\n"); @@ -386,7 +390,7 @@ void xgi_dump_register(struct xgi_info * info) printk("%1x ", i); for (j = 0; j < 0x10; j++) { - temp = bReadReg(0x2300 + i * 0x10 + j); + temp = DRM_READ8(info->mmio_map, 0x2300 + i * 0x10 + j); printk("%3x", temp); } printk("\r\n"); @@ -406,7 +410,7 @@ void xgi_dump_register(struct xgi_info * info) printk("%1x ", i); for (j = 0; j < 0x10; j++) { - temp = bReadReg(0x2400 + i * 0x10 + j); + temp = DRM_READ8(info->mmio_map, 0x2400 + i * 0x10 + j); printk("%3x", temp); } printk("\r\n"); @@ -426,17 +430,34 @@ void xgi_dump_register(struct xgi_info * info) printk("%1x ", i); for (j = 0; j < 0x10; j++) { - temp = bReadReg(0x2800 + i * 0x10 + j); + temp = DRM_READ8(info->mmio_map, 0x2800 + i * 0x10 + j); printk("%3x", temp); } printk("\r\n"); } } -void xgi_restore_registers(struct xgi_info * info) + +int xgi_dump_register_ioctl(DRM_IOCTL_ARGS) { - bOut3x5(0x13, 0); - bOut3x5(0x8b, 2); + DRM_DEVICE; + struct xgi_info *info = dev->dev_private; + + xgi_dump_register(info); + + return 0; +} + + +int xgi_restore_registers_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct xgi_info *info = dev->dev_private; + + OUT3X5B(info->mmio_map, 0x13, 0); + OUT3X5B(info->mmio_map, 0x8b, 2); + + return 0; } void xgi_waitfor_pci_idle(struct xgi_info * info) @@ -446,60 +467,10 @@ void xgi_waitfor_pci_idle(struct xgi_info * info) int idleCount = 0; while (idleCount < 5) { - if (dwReadReg(WHOLD_GE_STATUS) & IDLE_MASK) { + if (DRM_READ32(info->mmio_map, WHOLD_GE_STATUS) & IDLE_MASK) { idleCount = 0; } else { idleCount++; } } } - - -/*memory collect function*/ -extern struct list_head xgi_mempid_list; -void xgi_mem_collect(struct xgi_info * info, unsigned int *pcnt) -{ - struct xgi_mem_pid *block; - struct xgi_mem_pid *next; - struct task_struct *p, *find; - unsigned int cnt = 0; - - list_for_each_entry_safe(block, next, &xgi_mempid_list, list) { - - find = NULL; - XGI_SCAN_PROCESS(p) { - if (p->pid == block->pid) { - XGI_INFO - ("[!]Find active pid:%ld state:%ld location:%d addr:0x%lx! \n", - block->pid, p->state, - block->location, - block->bus_addr); - find = p; - if (block->bus_addr == 0xFFFFFFFF) - ++cnt; - break; - } - } - if (!find) { - if (block->location == XGI_MEMLOC_LOCAL) { - XGI_INFO - ("Memory ProcessID free fb and delete one block pid:%ld addr:0x%lx successfully! \n", - block->pid, block->bus_addr); - xgi_fb_free(info, block->bus_addr); - } else if (block->bus_addr != 0xFFFFFFFF) { - XGI_INFO - ("Memory ProcessID free pcie and delete one block pid:%ld addr:0x%lx successfully! \n", - block->pid, block->bus_addr); - xgi_pcie_free(info, block->bus_addr); - } else { - /*only delete the memory block */ - list_del(&block->list); - XGI_INFO - ("Memory ProcessID delete one pcie block pid:%ld successfully! \n", - block->pid); - kfree(block); - } - } - } - *pcnt = cnt; -} diff --git a/linux-core/xgi_misc.h b/linux-core/xgi_misc.h index 9c0591b2..10638b2d 100644 --- a/linux-core/xgi_misc.h +++ b/linux-core/xgi_misc.h @@ -30,9 +30,7 @@ #define _XGI_MISC_H_ extern void xgi_dump_register(struct xgi_info * info); -extern void xgi_ge_reset(struct xgi_info * info); -extern void xgi_restore_registers(struct xgi_info * info); extern bool xgi_ge_irq_handler(struct xgi_info * info); extern bool xgi_crt_irq_handler(struct xgi_info * info); extern bool xgi_dvi_irq_handler(struct xgi_info * info); diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index cfc9febc..49c531fc 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -26,176 +26,81 @@ * DEALINGS IN THE SOFTWARE. ***************************************************************************/ -#include "xgi_linux.h" #include "xgi_drv.h" #include "xgi_regs.h" -#include "xgi_pcie.h" #include "xgi_misc.h" -static struct xgi_pcie_heap *xgi_pcie_heap = NULL; -static struct kmem_cache *xgi_pcie_cache_block = NULL; -static struct xgi_pcie_block *xgi_pcie_vertex_block = NULL; -static struct xgi_pcie_block *xgi_pcie_cmdlist_block = NULL; -static struct xgi_pcie_block *xgi_pcie_scratchpad_block = NULL; -extern struct list_head xgi_mempid_list; - -static unsigned long xgi_pcie_lut_alloc(unsigned long page_order) -{ - struct page *page; - unsigned long page_addr = 0; - unsigned long page_count = 0; - int i; - - page_count = (1 << page_order); - page_addr = __get_free_pages(GFP_KERNEL, page_order); - - if (page_addr == 0UL) { - XGI_ERROR("Can't get free pages: 0x%lx from system memory !\n", - page_count); - return 0; - } - - page = virt_to_page(page_addr); - - for (i = 0; i < page_count; i++, page++) { - XGI_INC_PAGE_COUNT(page); - XGILockPage(page); - } - - XGI_INFO("page_count: 0x%lx page_order: 0x%lx page_addr: 0x%lx \n", - page_count, page_order, page_addr); - return page_addr; -} - -static void xgi_pcie_lut_free(unsigned long page_addr, unsigned long page_order) -{ - struct page *page; - unsigned long page_count = 0; - int i; - - page_count = (1 << page_order); - page = virt_to_page(page_addr); - - for (i = 0; i < page_count; i++, page++) { - XGI_DEC_PAGE_COUNT(page); - XGIUnlockPage(page); - } - - free_pages(page_addr, page_order); -} +static struct xgi_mem_block *xgi_pcie_vertex_block = NULL; +static struct xgi_mem_block *xgi_pcie_cmdlist_block = NULL; +static struct xgi_mem_block *xgi_pcie_scratchpad_block = NULL; static int xgi_pcie_lut_init(struct xgi_info * info) { - unsigned char *page_addr = NULL; - unsigned long pciePageCount, lutEntryNum, lutPageCount, lutPageOrder; - unsigned long count = 0; u8 temp = 0; + int err; + unsigned i; + struct drm_scatter_gather request; + struct drm_sg_mem *sg; + u32 *lut; - /* Jong 06/06/2006 */ - unsigned long pcie_aperture_size; - - info->pcie.size = 128 * 1024 * 1024; /* Get current FB aperture size */ - temp = In3x5(0x27); - XGI_INFO("In3x5(0x27): 0x%x \n", temp); + temp = IN3X5B(info->mmio_map, 0x27); + DRM_INFO("In3x5(0x27): 0x%x \n", temp); if (temp & 0x01) { /* 256MB; Jong 06/05/2006; 0x10000000 */ - /* Jong 06/06/2006; allocate memory */ - pcie_aperture_size = 256 * 1024 * 1024; - /* info->pcie.base = 256 * 1024 * 1024; *//* pcie base is different from fb base */ + info->pcie.base = 256 * 1024 * 1024; } else { /* 128MB; Jong 06/05/2006; 0x08000000 */ - - /* Jong 06/06/2006; allocate memory */ - pcie_aperture_size = 128 * 1024 * 1024; - /* info->pcie.base = 128 * 1024 * 1024; */ + info->pcie.base = 128 * 1024 * 1024; } - /* Jong 06/06/2006; allocate memory; it can be used for build-in kernel modules */ - /* info->pcie.base=(unsigned long)alloc_bootmem(pcie_mem_size); */ - /* total 496 MB; need 256 MB (0x10000000); start from 240 MB (0x0F000000) */ - /* info->pcie.base=ioremap(0x0F000000, 0x10000000); *//* Cause system hang */ - info->pcie.base = pcie_aperture_size; /* works */ - /* info->pcie.base=info->fb.base + info->fb.size; *//* System hang */ - /* info->pcie.base=128 * 1024 * 1024; *//* System hang */ - XGI_INFO("Jong06062006-info->pcie.base: 0x%lx \n", info->pcie.base); + DRM_INFO("info->pcie.base: 0x%lx\n", (unsigned long) info->pcie.base); /* Get current lookup table page size */ - temp = bReadReg(0xB00C); + temp = DRM_READ8(info->mmio_map, 0xB00C); if (temp & 0x04) { /* 8KB */ info->lutPageSize = 8 * 1024; } else { /* 4KB */ - info->lutPageSize = 4 * 1024; } - XGI_INFO("info->lutPageSize: 0x%lx \n", info->lutPageSize); + DRM_INFO("info->lutPageSize: 0x%x \n", info->lutPageSize); -#if 0 - /* Get current lookup table location */ - temp = bReadReg(0xB00C); - if (temp & 0x02) { /* LFB */ - info->isLUTInLFB = TRUE; - /* Current we only support lookup table in LFB */ - temp &= 0xFD; - bWriteReg(0xB00C, temp); - info->isLUTInLFB = FALSE; - } else { /* SFB */ - info->isLUTInLFB = FALSE; + request.size = info->pcie.size; + err = drm_sg_alloc(info->dev, & request); + if (err) { + DRM_ERROR("cannot allocate PCIE GART backing store! " + "size = %d\n", info->pcie.size); + return err; } - XGI_INFO("info->lutPageSize: 0x%lx \n", info->lutPageSize); + sg = info->dev->sg; - /* Get current SDFB page size */ - temp = bReadReg(0xB00C); - if (temp & 0x08) { /* 8MB */ - info->sdfbPageSize = 8 * 1024 * 1024; - } else { /* 4MB */ - - info->sdfbPageSize = 4 * 1024 * 1024; + info->lut_handle = drm_pci_alloc(info->dev, + sizeof(u32) * sg->pages, + PAGE_SIZE, + DMA_31BIT_MASK); + if (info->lut_handle == NULL) { + DRM_ERROR("cannot allocate PCIE lut page!\n"); + return DRM_ERR(ENOMEM); } -#endif - pciePageCount = (info->pcie.size + PAGE_SIZE - 1) / PAGE_SIZE; - /* - * Allocate memory for PCIE GART table; - */ - lutEntryNum = pciePageCount; - lutPageCount = (lutEntryNum * 4 + PAGE_SIZE - 1) / PAGE_SIZE; - - /* get page_order base on page_count */ - count = lutPageCount; - for (lutPageOrder = 0; count; count >>= 1, ++lutPageOrder) ; - - if ((lutPageCount << 1) == (1 << lutPageOrder)) { - lutPageOrder -= 1; - } - - XGI_INFO("lutEntryNum: 0x%lx lutPageCount: 0x%lx lutPageOrder 0x%lx\n", - lutEntryNum, lutPageCount, lutPageOrder); - - info->lutPageOrder = lutPageOrder; - page_addr = (unsigned char *)xgi_pcie_lut_alloc(lutPageOrder); + lut = info->lut_handle->vaddr; + for (i = 0; i < sg->pages; i++) { + info->dev->sg->busaddr[i] = pci_map_page(info->dev->pdev, + sg->pagelist[i], + 0, + PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(info->dev->sg->busaddr[i])) { + DRM_ERROR("cannot map GART backing store for DMA!\n"); + return DRM_ERR(-(info->dev->sg->busaddr[i])); + } - if (!page_addr) { - XGI_ERROR("cannot allocate PCIE lut page!\n"); - goto fail; + lut[i] = info->dev->sg->busaddr[i]; } - info->lut_base = (unsigned long *)page_addr; - - XGI_INFO("page_addr: 0x%p virt_to_phys(page_virtual): 0x%lx \n", - page_addr, virt_to_phys(page_addr)); - - XGI_INFO - ("info->lut_base: 0x%p __pa(info->lut_base): 0x%lx info->lutPageOrder 0x%lx\n", - info->lut_base, __pa(info->lut_base), info->lutPageOrder); - - /* - * clean all PCIE GART Entry - */ - memset(page_addr, 0, PAGE_SIZE << lutPageOrder); #if defined(__i386__) || defined(__x86_64__) asm volatile ("wbinvd":::"memory"); @@ -204,675 +109,186 @@ static int xgi_pcie_lut_init(struct xgi_info * info) #endif /* Set GART in SFB */ - bWriteReg(0xB00C, bReadReg(0xB00C) & ~0x02); + temp = DRM_READ8(info->mmio_map, 0xB00C); + DRM_WRITE8(info->mmio_map, 0xB00C, temp & ~0x02); + /* Set GART base address to HW */ - dwWriteReg(0xB034, __pa(info->lut_base)); + dwWriteReg(info->mmio_map, 0xB034, info->lut_handle->busaddr); - return 1; - fail: return 0; } -static void xgi_pcie_lut_cleanup(struct xgi_info * info) -{ - if (info->lut_base) { - XGI_INFO("info->lut_base: 0x%p info->lutPageOrder: 0x%lx \n", - info->lut_base, info->lutPageOrder); - xgi_pcie_lut_free((unsigned long)info->lut_base, - info->lutPageOrder); - info->lut_base = NULL; - } -} - -static struct xgi_pcie_block *xgi_pcie_new_node(void) +void xgi_pcie_lut_cleanup(struct xgi_info * info) { - struct xgi_pcie_block *block = - (struct xgi_pcie_block *) kmem_cache_alloc(xgi_pcie_cache_block, - GFP_KERNEL); - if (block == NULL) { - return NULL; + if (info->dev->sg) { + drm_sg_free(info->dev, info->dev->sg->handle); } - block->offset = 0; /* block's offset in pcie memory, begin from 0 */ - block->size = 0; /* The block size. */ - block->bus_addr = 0; /* CPU access address/bus address */ - block->hw_addr = 0; /* GE access address */ - block->page_count = 0; - block->page_order = 0; - block->page_block = NULL; - block->page_table = NULL; - block->owner = PCIE_INVALID; - - return block; -} - -static void xgi_pcie_block_stuff_free(struct xgi_pcie_block * block) -{ - struct page *page; - struct xgi_page_block *page_block = block->page_block; - struct xgi_page_block *free_block; - unsigned long page_count = 0; - int i; - - //XGI_INFO("block->page_block: 0x%p \n", block->page_block); - while (page_block) { - page_count = page_block->page_count; - - page = virt_to_page(page_block->virt_addr); - for (i = 0; i < page_count; i++, page++) { - XGI_DEC_PAGE_COUNT(page); - XGIUnlockPage(page); - } - free_pages(page_block->virt_addr, page_block->page_order); - - page_block->phys_addr = 0; - page_block->virt_addr = 0; - page_block->page_count = 0; - page_block->page_order = 0; - - free_block = page_block; - page_block = page_block->next; - //XGI_INFO("free free_block: 0x%p \n", free_block); - kfree(free_block); - free_block = NULL; - } - - if (block->page_table) { - //XGI_INFO("free block->page_table: 0x%p \n", block->page_table); - kfree(block->page_table); - block->page_table = NULL; + if (info->lut_handle) { + drm_pci_free(info->dev, info->lut_handle); + info->lut_handle = NULL; } } int xgi_pcie_heap_init(struct xgi_info * info) { - struct xgi_pcie_block *block; - - if (!xgi_pcie_lut_init(info)) { - XGI_ERROR("xgi_pcie_lut_init failed\n"); - return 0; - } - - xgi_pcie_heap = - (struct xgi_pcie_heap *) kmalloc(sizeof(struct xgi_pcie_heap), GFP_KERNEL); - if (!xgi_pcie_heap) { - XGI_ERROR("xgi_pcie_heap alloc failed\n"); - goto fail1; - } - INIT_LIST_HEAD(&xgi_pcie_heap->free_list); - INIT_LIST_HEAD(&xgi_pcie_heap->used_list); - INIT_LIST_HEAD(&xgi_pcie_heap->sort_list); - - xgi_pcie_heap->max_freesize = info->pcie.size; - - xgi_pcie_cache_block = - kmem_cache_create("xgi_pcie_block", sizeof(struct xgi_pcie_block), 0, - SLAB_HWCACHE_ALIGN, NULL, NULL); + int err; - if (NULL == xgi_pcie_cache_block) { - XGI_ERROR("Fail to creat xgi_pcie_block\n"); - goto fail2; + err = xgi_pcie_lut_init(info); + if (err) { + DRM_ERROR("xgi_pcie_lut_init failed\n"); + return err; } - block = (struct xgi_pcie_block *) xgi_pcie_new_node(); - if (!block) { - XGI_ERROR("xgi_pcie_new_node failed\n"); - goto fail3; - } - - block->offset = 0; /* block's offset in pcie memory, begin from 0 */ - block->size = info->pcie.size; - - list_add(&block->list, &xgi_pcie_heap->free_list); - XGI_INFO("PCIE start address: 0x%lx, memory size : 0x%lx\n", - block->offset, block->size); - return 1; - fail3: - if (xgi_pcie_cache_block) { - kmem_cache_destroy(xgi_pcie_cache_block); - xgi_pcie_cache_block = NULL; + err = xgi_mem_heap_init(&info->pcie_heap, 0, info->pcie.size); + if (err) { + xgi_pcie_lut_cleanup(info); } - fail2: - if (xgi_pcie_heap) { - kfree(xgi_pcie_heap); - xgi_pcie_heap = NULL; - } - fail1: - xgi_pcie_lut_cleanup(info); - return 0; + return err; } -void xgi_pcie_heap_cleanup(struct xgi_info * info) -{ - struct list_head *free_list; - struct xgi_pcie_block *block; - struct xgi_pcie_block *next; - int j; - - xgi_pcie_lut_cleanup(info); - XGI_INFO("xgi_pcie_lut_cleanup scceeded\n"); - - if (xgi_pcie_heap) { - free_list = &xgi_pcie_heap->free_list; - for (j = 0; j < 3; j++, free_list++) { - list_for_each_entry_safe(block, next, free_list, list) { - XGI_INFO - ("No. %d block offset: 0x%lx size: 0x%lx\n", - j, block->offset, block->size); - xgi_pcie_block_stuff_free(block); - block->bus_addr = 0; - block->hw_addr = 0; - - //XGI_INFO("No. %d free block: 0x%p \n", j, block); - kmem_cache_free(xgi_pcie_cache_block, block); - } - } - - XGI_INFO("free xgi_pcie_heap: 0x%p \n", xgi_pcie_heap); - kfree(xgi_pcie_heap); - xgi_pcie_heap = NULL; - } - if (xgi_pcie_cache_block) { - kmem_cache_destroy(xgi_pcie_cache_block); - xgi_pcie_cache_block = NULL; - } -} - -static struct xgi_pcie_block *xgi_pcie_mem_alloc(struct xgi_info * info, - unsigned long originalSize, - enum PcieOwner owner) +int xgi_pcie_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, + DRMFILE filp) { - struct xgi_pcie_block *block, *used_block, *free_block; - struct xgi_page_block *page_block, *prev_page_block; - struct page *page; - unsigned long page_order = 0, count = 0, index = 0; - unsigned long page_addr = 0; - u32 *lut_addr = NULL; - unsigned long lut_id = 0; - unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK; - int i, j, page_count = 0; - int temp = 0; - - XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-Begin\n"); - XGI_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n", - originalSize, size); - - if (owner == PCIE_3D) { - if (xgi_pcie_vertex_block) { - XGI_INFO - ("PCIE Vertex has been created, return directly.\n"); - return xgi_pcie_vertex_block; - } - } + struct xgi_mem_block *block; - if (owner == PCIE_3D_CMDLIST) { - if (xgi_pcie_cmdlist_block) { - XGI_INFO - ("PCIE Cmdlist has been created, return directly.\n"); - return xgi_pcie_cmdlist_block; - } + down(&info->pcie_sem); + if ((alloc->owner == PCIE_3D) && (xgi_pcie_vertex_block)) { + DRM_INFO("PCIE Vertex has been created, return directly.\n"); + block = xgi_pcie_vertex_block; } - - if (owner == PCIE_3D_SCRATCHPAD) { - if (xgi_pcie_scratchpad_block) { - XGI_INFO - ("PCIE Scratchpad has been created, return directly.\n"); - return xgi_pcie_scratchpad_block; - } - } - - if (size == 0) { - XGI_ERROR("size == 0 \n"); - return (NULL); + else if ((alloc->owner == PCIE_3D_CMDLIST) && (xgi_pcie_cmdlist_block)) { + DRM_INFO("PCIE Cmdlist has been created, return directly.\n"); + block = xgi_pcie_cmdlist_block; } - - XGI_INFO("max_freesize: 0x%lx \n", xgi_pcie_heap->max_freesize); - if (size > xgi_pcie_heap->max_freesize) { - XGI_ERROR - ("size: 0x%lx bigger than PCIE total free size: 0x%lx.\n", - size, xgi_pcie_heap->max_freesize); - return (NULL); + else if ((alloc->owner == PCIE_3D_SCRATCHPAD) && (xgi_pcie_scratchpad_block)) { + DRM_INFO("PCIE Scratchpad has been created, return directly.\n"); + block = xgi_pcie_scratchpad_block; } + else { + block = xgi_mem_alloc(&info->pcie_heap, alloc->size, alloc->owner); - /* Jong 05/30/2006; find next free list which has enough space */ - list_for_each_entry(block, &xgi_pcie_heap->free_list, list) { - if (size <= block->size) { - break; + if (alloc->owner == PCIE_3D) { + xgi_pcie_vertex_block = block; + } + else if (alloc->owner == PCIE_3D_CMDLIST) { + xgi_pcie_cmdlist_block = block; + } + else if (alloc->owner == PCIE_3D_SCRATCHPAD) { + xgi_pcie_scratchpad_block = block; } } + up(&info->pcie_sem); - if (&block->list == &xgi_pcie_heap->free_list) { - XGI_ERROR("Can't allocate %ldk size from PCIE memory !\n", - size / 1024); - return (NULL); - } - - free_block = block; - XGI_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n", - size, free_block->offset, free_block->size); - - if (size == free_block->size) { - used_block = free_block; - XGI_INFO("size==free_block->size: free_block = 0x%p\n", - free_block); - list_del(&free_block->list); + if (block == NULL) { + alloc->location = XGI_MEMLOC_INVALID; + alloc->size = 0; + DRM_ERROR("PCIE RAM allocation failed\n"); + return DRM_ERR(ENOMEM); } else { - used_block = xgi_pcie_new_node(); - if (used_block == NULL) { - return NULL; - } - - if (used_block == free_block) { - XGI_ERROR("used_block == free_block = 0x%p\n", - used_block); - } - - used_block->offset = free_block->offset; - used_block->size = size; + DRM_INFO("PCIE RAM allocation succeeded: offset = 0x%lx\n", + block->offset); + alloc->location = XGI_MEMLOC_NON_LOCAL; + alloc->size = block->size; + alloc->hw_addr = block->offset + info->pcie.base; + alloc->offset = block->offset; - free_block->offset += size; - free_block->size -= size; + block->filp = filp; + return 0; } +} - xgi_pcie_heap->max_freesize -= size; - used_block->bus_addr = info->pcie.base + used_block->offset; - used_block->hw_addr = info->pcie.base + used_block->offset; - used_block->page_count = page_count = size / PAGE_SIZE; +int xgi_pcie_alloc_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct xgi_mem_alloc alloc; + struct xgi_info *info = dev->dev_private; + int err; - /* get page_order base on page_count */ - for (used_block->page_order = 0; page_count; page_count >>= 1) { - ++used_block->page_order; - } + DRM_COPY_FROM_USER_IOCTL(alloc, (struct xgi_mem_alloc __user *) data, + sizeof(alloc)); - if ((used_block->page_count << 1) == (1 << used_block->page_order)) { - used_block->page_order--; - } - XGI_INFO - ("used_block->offset: 0x%lx, used_block->size: 0x%lx, used_block->bus_addr: 0x%lx, used_block->hw_addr: 0x%lx, used_block->page_count: 0x%lx used_block->page_order: 0x%lx\n", - used_block->offset, used_block->size, used_block->bus_addr, - used_block->hw_addr, used_block->page_count, - used_block->page_order); - - used_block->page_block = NULL; - //used_block->page_block = (struct xgi_pages_block *)kmalloc(sizeof(struct xgi_pages_block), GFP_KERNEL); - //if (!used_block->page_block) return NULL;_t - //used_block->page_block->next = NULL; - - used_block->page_table = - (struct xgi_pte *) kmalloc(sizeof(struct xgi_pte) * used_block->page_count, - GFP_KERNEL); - if (used_block->page_table == NULL) { - goto fail; + err = xgi_pcie_alloc(info, & alloc, filp); + if (err) { + return err; } + + DRM_COPY_TO_USER_IOCTL((struct xgi_mem_alloc __user *) data, + alloc, sizeof(alloc)); - lut_id = (used_block->offset >> PAGE_SHIFT); - lut_addr = info->lut_base; - lut_addr += lut_id; - XGI_INFO("lutAddr: 0x%p lutID: 0x%lx \n", lut_addr, lut_id); - - /* alloc free pages from system */ - page_count = used_block->page_count; - page_block = used_block->page_block; - prev_page_block = used_block->page_block; - for (i = 0; page_count > 0; i++) { - /* if size is bigger than 2M bytes, it should be split */ - if (page_count > (1 << XGI_PCIE_ALLOC_MAX_ORDER)) { - page_order = XGI_PCIE_ALLOC_MAX_ORDER; - } else { - count = page_count; - for (page_order = 0; count; count >>= 1, ++page_order) ; - - if ((page_count << 1) == (1 << page_order)) { - page_order -= 1; - } - } + return 0; +} - count = (1 << page_order); - page_addr = __get_free_pages(GFP_KERNEL, page_order); - XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-page_addr=0x%lx \n", - page_addr); - if (!page_addr) { - XGI_ERROR - ("No: %d :Can't get free pages: 0x%lx from system memory !\n", - i, count); - goto fail; - } +/** + * Free all blocks associated with a particular file handle. + */ +void xgi_pcie_free_all(struct xgi_info * info, DRMFILE filp) +{ + if (!info->pcie_heap.initialized) { + return; + } - /* Jong 05/30/2006; test */ - memset((unsigned char *)page_addr, 0xFF, - PAGE_SIZE << page_order); - /* memset((unsigned char *)page_addr, 0, PAGE_SIZE << page_order); */ - - if (page_block == NULL) { - page_block = - (struct xgi_page_block *) - kmalloc(sizeof(struct xgi_page_block), GFP_KERNEL); - if (!page_block) { - XGI_ERROR - ("Can't get memory for page_block! \n"); - goto fail; - } - } + down(&info->pcie_sem); - if (prev_page_block == NULL) { - used_block->page_block = page_block; - prev_page_block = page_block; - } else { - prev_page_block->next = page_block; - prev_page_block = page_block; - } + do { + struct xgi_mem_block *block; - page_block->next = NULL; - page_block->phys_addr = __pa(page_addr); - page_block->virt_addr = page_addr; - page_block->page_count = count; - page_block->page_order = page_order; - - XGI_INFO - ("Jong05302006-xgi_pcie_mem_alloc-page_block->phys_addr=0x%lx \n", - page_block->phys_addr); - XGI_INFO - ("Jong05302006-xgi_pcie_mem_alloc-page_block->virt_addr=0x%lx \n", - page_block->virt_addr); - - page = virt_to_page(page_addr); - - //XGI_INFO("No: %d page_order: 0x%lx page_count: 0x%x count: 0x%lx index: 0x%lx lut_addr: 0x%p" - // "page_block->phys_addr: 0x%lx page_block->virt_addr: 0x%lx \n", - // i, page_order, page_count, count, index, lut_addr, page_block->phys_addr, page_block->virt_addr); - - for (j = 0; j < count; j++, page++, lut_addr++) { - used_block->page_table[index + j].phys_addr = - __pa(page_address(page)); - used_block->page_table[index + j].virt_addr = - (unsigned long)page_address(page); - - XGI_INFO - ("Jong05302006-xgi_pcie_mem_alloc-used_block->page_table[index + j].phys_addr=0x%lx \n", - used_block->page_table[index + j].phys_addr); - XGI_INFO - ("Jong05302006-xgi_pcie_mem_alloc-used_block->page_table[index + j].virt_addr=0x%lx \n", - used_block->page_table[index + j].virt_addr); - - *lut_addr = __pa(page_address(page)); - XGI_INC_PAGE_COUNT(page); - XGILockPage(page); - - if (temp) { - XGI_INFO - ("__pa(page_address(page)): 0x%lx lutAddr: 0x%p lutAddr No: 0x%x = 0x%lx \n", - __pa(page_address(page)), lut_addr, j, - *lut_addr); - temp--; + list_for_each_entry(block, &info->pcie_heap.used_list, list) { + if (block->filp == filp) { + break; } } - page_block = page_block->next; - page_count -= count; - index += count; - temp = 0; - } - - used_block->owner = owner; - list_add(&used_block->list, &xgi_pcie_heap->used_list); - -#if defined(__i386__) || defined(__x86_64__) - asm volatile ("wbinvd":::"memory"); -#else - mb(); -#endif - - /* Flush GART Table */ - bWriteReg(0xB03F, 0x40); - bWriteReg(0xB03F, 0x00); - - if (owner == PCIE_3D) { - xgi_pcie_vertex_block = used_block; - } - - if (owner == PCIE_3D_CMDLIST) { - xgi_pcie_cmdlist_block = used_block; - } - - if (owner == PCIE_3D_SCRATCHPAD) { - xgi_pcie_scratchpad_block = used_block; - } - - XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-End \n"); - return (used_block); - - fail: - xgi_pcie_block_stuff_free(used_block); - kmem_cache_free(xgi_pcie_cache_block, used_block); - return NULL; -} - -static struct xgi_pcie_block *xgi_pcie_mem_free(struct xgi_info * info, - unsigned long offset) -{ - struct xgi_pcie_block *used_block, *block; - struct xgi_pcie_block *prev, *next; - unsigned long upper, lower; - - list_for_each_entry(block, &xgi_pcie_heap->used_list, list) { - if (block->offset == offset) { + if (&block->list == &info->pcie_heap.used_list) { break; } - } - - if (&block->list == &xgi_pcie_heap->used_list) { - XGI_ERROR("can't find block: 0x%lx to free!\n", offset); - return (NULL); - } - - used_block = block; - XGI_INFO - ("used_block: 0x%p, offset = 0x%lx, size = 0x%lx, bus_addr = 0x%lx, hw_addr = 0x%lx\n", - used_block, used_block->offset, used_block->size, - used_block->bus_addr, used_block->hw_addr); - - xgi_pcie_block_stuff_free(used_block); - /* update xgi_pcie_heap */ - xgi_pcie_heap->max_freesize += used_block->size; + (void) xgi_pcie_free(info, block->offset, filp); + } while(1); - prev = next = NULL; - upper = used_block->offset + used_block->size; - lower = used_block->offset; - - list_for_each_entry(block, &xgi_pcie_heap->free_list, list) { - if (block->offset == upper) { - next = block; - } else if ((block->offset + block->size) == lower) { - prev = block; - } - } - - XGI_INFO("next = 0x%p, prev = 0x%p\n", next, prev); - list_del(&used_block->list); - - if (prev && next) { - prev->size += (used_block->size + next->size); - list_del(&next->list); - XGI_INFO("free node 0x%p\n", next); - kmem_cache_free(xgi_pcie_cache_block, next); - kmem_cache_free(xgi_pcie_cache_block, used_block); - next = NULL; - used_block = NULL; - return (prev); - } - - if (prev) { - prev->size += used_block->size; - XGI_INFO("free node 0x%p\n", used_block); - kmem_cache_free(xgi_pcie_cache_block, used_block); - used_block = NULL; - return (prev); - } - - if (next) { - next->size += used_block->size; - next->offset = used_block->offset; - XGI_INFO("free node 0x%p\n", used_block); - kmem_cache_free(xgi_pcie_cache_block, used_block); - used_block = NULL; - return (next); - } - - used_block->bus_addr = 0; - used_block->hw_addr = 0; - used_block->page_count = 0; - used_block->page_order = 0; - list_add(&used_block->list, &xgi_pcie_heap->free_list); - XGI_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n", - used_block, used_block->offset, used_block->size); - return (used_block); + up(&info->pcie_sem); } -void xgi_pcie_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, - pid_t pid) -{ - struct xgi_pcie_block *block; - - xgi_down(info->pcie_sem); - block = xgi_pcie_mem_alloc(info, alloc->size, alloc->owner); - xgi_up(info->pcie_sem); - - if (block == NULL) { - alloc->location = XGI_MEMLOC_INVALID; - alloc->size = 0; - alloc->bus_addr = 0; - alloc->hw_addr = 0; - XGI_ERROR("PCIE RAM allocation failed\n"); - } else { - XGI_INFO - ("PCIE RAM allocation succeeded: offset = 0x%lx, bus_addr = 0x%lx\n", - block->offset, block->bus_addr); - alloc->location = XGI_MEMLOC_NON_LOCAL; - alloc->size = block->size; - alloc->bus_addr = block->bus_addr; - alloc->hw_addr = block->hw_addr; - - /* - manage mempid, handle PCIE_3D, PCIE_3D_TEXTURE. - PCIE_3D request means a opengl process created. - PCIE_3D_TEXTURE request means texture cannot alloc from fb. - */ - if ((alloc->owner == PCIE_3D) - || (alloc->owner == PCIE_3D_TEXTURE)) { - struct xgi_mem_pid *mempid_block = - kmalloc(sizeof(struct xgi_mem_pid), GFP_KERNEL); - if (!mempid_block) - XGI_ERROR("mempid_block alloc failed\n"); - mempid_block->location = XGI_MEMLOC_NON_LOCAL; - if (alloc->owner == PCIE_3D) - mempid_block->bus_addr = 0xFFFFFFFF; /*xgi_pcie_vertex_block has the address */ - else - mempid_block->bus_addr = alloc->bus_addr; - mempid_block->pid = pid; - - XGI_INFO - ("Memory ProcessID add one pcie block pid:%ld successfully! \n", - mempid_block->pid); - list_add(&mempid_block->list, &xgi_mempid_list); - } - } -} -void xgi_pcie_free(struct xgi_info * info, unsigned long bus_addr) +int xgi_pcie_free(struct xgi_info * info, unsigned long offset, DRMFILE filp) { - struct xgi_pcie_block *block; - unsigned long offset = bus_addr - info->pcie.base; - struct xgi_mem_pid *mempid_block; - struct xgi_mem_pid *mempid_freeblock = NULL; - char isvertex = 0; - int processcnt; - - if (xgi_pcie_vertex_block - && xgi_pcie_vertex_block->bus_addr == bus_addr) - isvertex = 1; - - if (isvertex) { - /*check is there any other process using vertex */ - processcnt = 0; - - list_for_each_entry(mempid_block, &xgi_mempid_list, list) { - if (mempid_block->location == XGI_MEMLOC_NON_LOCAL - && mempid_block->bus_addr == 0xFFFFFFFF) { - ++processcnt; - } - } - if (processcnt > 1) { - return; - } - } + const bool isvertex = (xgi_pcie_vertex_block + && (xgi_pcie_vertex_block->offset == offset)); + int err; - xgi_down(info->pcie_sem); - block = xgi_pcie_mem_free(info, offset); - xgi_up(info->pcie_sem); + down(&info->pcie_sem); + err = xgi_mem_free(&info->pcie_heap, offset, filp); + up(&info->pcie_sem); - if (block == NULL) { - XGI_ERROR("xgi_pcie_free() failed at base 0x%lx\n", offset); + if (err) { + DRM_ERROR("xgi_pcie_free() failed at base 0x%lx\n", offset); } if (isvertex) xgi_pcie_vertex_block = NULL; - /* manage mempid */ - list_for_each_entry(mempid_block, &xgi_mempid_list, list) { - if (mempid_block->location == XGI_MEMLOC_NON_LOCAL - && ((isvertex && mempid_block->bus_addr == 0xFFFFFFFF) - || (!isvertex && mempid_block->bus_addr == bus_addr))) { - mempid_freeblock = mempid_block; - break; - } - } - if (mempid_freeblock) { - list_del(&mempid_freeblock->list); - XGI_INFO - ("Memory ProcessID delete one pcie block pid:%ld successfully! \n", - mempid_freeblock->pid); - kfree(mempid_freeblock); - } + return err; } -/* - * given a bus address, fid the pcie mem block - * uses the bus address as the key. - */ -struct xgi_pcie_block *xgi_find_pcie_block(struct xgi_info * info, - unsigned long address) -{ - struct xgi_pcie_block *block; - int i; - - list_for_each_entry(block, &xgi_pcie_heap->used_list, list) { - if (block->bus_addr == address) { - return block; - } - - if (block->page_table) { - for (i = 0; i < block->page_count; i++) { - unsigned long offset = block->bus_addr; - if ((address >= offset) - && (address < (offset + PAGE_SIZE))) { - return block; - } - } - } - } +int xgi_pcie_free_ioctl(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + struct xgi_info *info = dev->dev_private; + u32 offset; - XGI_ERROR("could not find map for vm 0x%lx\n", address); + DRM_COPY_FROM_USER_IOCTL(offset, (unsigned long __user *) data, + sizeof(offset)); - return NULL; + return xgi_pcie_free(info, offset, filp); } + /** * xgi_find_pcie_virt * @address: GE HW address @@ -880,60 +296,43 @@ struct xgi_pcie_block *xgi_find_pcie_block(struct xgi_info * info, * Returns CPU virtual address. Assumes the CPU VAddr is continuous in not * the same block */ -void *xgi_find_pcie_virt(struct xgi_info * info, unsigned long address) +void *xgi_find_pcie_virt(struct xgi_info * info, u32 address) { - struct xgi_pcie_block *block; - const unsigned long offset_in_page = address & (PAGE_SIZE - 1); - - XGI_INFO("begin (address = 0x%lx, offset_in_page = %lu)\n", - address, offset_in_page); - - list_for_each_entry(block, &xgi_pcie_heap->used_list, list) { - XGI_INFO("block = 0x%p (hw_addr = 0x%lx, size=%lu)\n", - block, block->hw_addr, block->size); - - if ((address >= block->hw_addr) - && (address < (block->hw_addr + block->size))) { - const unsigned long loc_in_pagetable = - (address - block->hw_addr) >> PAGE_SHIFT; - void *const ret = - (void *)(block->page_table[loc_in_pagetable]. - virt_addr + offset_in_page); - - XGI_INFO("PAGE_SHIFT = %d\n", PAGE_SHIFT); - XGI_INFO("block->page_table[0x%lx].virt_addr = 0x%lx\n", - loc_in_pagetable, - block->page_table[loc_in_pagetable].virt_addr); - XGI_INFO("return 0x%p\n", ret); - - return ret; - } - } + const unsigned long offset = address - info->pcie.base; - XGI_ERROR("could not find map for vm 0x%lx\n", address); - return NULL; + return ((u8 *) info->dev->sg->virtual) + offset; } /* address -- GE hw address */ -void xgi_test_rwinkernel(struct xgi_info * info, unsigned long address) +int xgi_test_rwinkernel_ioctl(DRM_IOCTL_ARGS) { + DRM_DEVICE; + struct xgi_info *info = dev->dev_private; + u32 address; u32 *virtaddr = 0; - XGI_INFO("input GE HW addr is 0x%x\n", address); + DRM_COPY_FROM_USER_IOCTL(address, (unsigned long __user *) data, + sizeof(address)); + + DRM_INFO("input GE HW addr is 0x%x\n", address); if (address == 0) { - return; + return DRM_ERR(EFAULT); } virtaddr = (u32 *)xgi_find_pcie_virt(info, address); - XGI_INFO("convert to CPU virt addr 0x%p\n", virtaddr); + DRM_INFO("convert to CPU virt addr 0x%p\n", virtaddr); if (virtaddr != NULL) { - XGI_INFO("original [virtaddr] = 0x%x\n", *virtaddr); + DRM_INFO("original [virtaddr] = 0x%x\n", *virtaddr); *virtaddr = 0x00f00fff; - XGI_INFO("modified [virtaddr] = 0x%x\n", *virtaddr); + DRM_INFO("modified [virtaddr] = 0x%x\n", *virtaddr); + } else { + return DRM_ERR(EFAULT); } + + return 0; } diff --git a/linux-core/xgi_pcie.h b/linux-core/xgi_pcie.h deleted file mode 100644 index b66d6a28..00000000 --- a/linux-core/xgi_pcie.h +++ /dev/null @@ -1,68 +0,0 @@ - -/**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - ***************************************************************************/ - -#ifndef _XGI_PCIE_H_ -#define _XGI_PCIE_H_ - -#ifndef XGI_PCIE_ALLOC_MAX_ORDER -#define XGI_PCIE_ALLOC_MAX_ORDER 1 /* 8K in Kernel 2.4.* */ -#endif - -struct xgi_page_block { - struct xgi_page_block *next; - unsigned long phys_addr; - unsigned long virt_addr; - unsigned long page_count; - unsigned long page_order; -}; - -struct xgi_pcie_block { - struct list_head list; - unsigned long offset; /* block's offset in pcie memory, begin from 0 */ - unsigned long size; /* The block size. */ - unsigned long bus_addr; /* CPU access address/bus address */ - unsigned long hw_addr; /* GE access address */ - - unsigned long page_count; - unsigned long page_order; - struct xgi_page_block *page_block; - struct xgi_pte *page_table; /* list of physical pages allocated */ - - atomic_t use_count; - enum PcieOwner owner; - unsigned long processID; -}; - -struct xgi_pcie_heap { - struct list_head free_list; - struct list_head used_list; - struct list_head sort_list; - unsigned long max_freesize; -}; - -#endif diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h index bc3e2a1e..b211626a 100644 --- a/linux-core/xgi_regs.h +++ b/linux-core/xgi_regs.h @@ -29,269 +29,100 @@ #ifndef _XGI_REGS_H_ #define _XGI_REGS_H_ -#ifndef XGI_MMIO -#define XGI_MMIO 1 -#endif +#include "drmP.h" +#include "drm.h" -#if XGI_MMIO -#define OUTB(port, value) writeb(value, info->mmio.vbase + port) -#define INB(port) readb(info->mmio.vbase + port) -#define OUTW(port, value) writew(value, info->mmio.vbase + port) -#define INW(port) readw(info->mmio.vbase + port) -#define OUTDW(port, value) writel(value, info->mmio.vbase + port) -#define INDW(port) readl(info->mmio.vbase + port) -#else -#define OUTB(port, value) outb(value, port) -#define INB(port) inb(port) -#define OUTW(port, value) outw(value, port) -#define INW(port) inw(port) -#define OUTDW(port, value) outl(value, port) -#define INDW(port) inl(port) -#endif /* Hardware access functions */ -static inline void OUT3C5B(struct xgi_info * info, u8 index, u8 data) -{ - OUTB(0x3C4, index); - OUTB(0x3C5, data); -} - -static inline void OUT3X5B(struct xgi_info * info, u8 index, u8 data) -{ - OUTB(0x3D4, index); - OUTB(0x3D5, data); -} - -static inline void OUT3CFB(struct xgi_info * info, u8 index, u8 data) -{ - OUTB(0x3CE, index); - OUTB(0x3CF, data); -} - -static inline u8 IN3C5B(struct xgi_info * info, u8 index) -{ - volatile u8 data = 0; - OUTB(0x3C4, index); - data = INB(0x3C5); - return data; -} - -static inline u8 IN3X5B(struct xgi_info * info, u8 index) +static inline void OUT3C5B(struct drm_map * map, u8 index, u8 data) { - volatile u8 data = 0; - OUTB(0x3D4, index); - data = INB(0x3D5); - return data; + DRM_WRITE8(map, 0x3C4, index); + DRM_WRITE8(map, 0x3C5, data); } -static inline u8 IN3CFB(struct xgi_info * info, u8 index) +static inline void OUT3X5B(struct drm_map * map, u8 index, u8 data) { - volatile u8 data = 0; - OUTB(0x3CE, index); - data = INB(0x3CF); - return data; + DRM_WRITE8(map, 0x3D4, index); + DRM_WRITE8(map, 0x3D5, data); } -static inline void OUT3C5W(struct xgi_info * info, u8 index, u16 data) +static inline void OUT3CFB(struct drm_map * map, u8 index, u8 data) { - OUTB(0x3C4, index); - OUTB(0x3C5, data); + DRM_WRITE8(map, 0x3CE, index); + DRM_WRITE8(map, 0x3CF, data); } -static inline void OUT3X5W(struct xgi_info * info, u8 index, u16 data) +static inline u8 IN3C5B(struct drm_map * map, u8 index) { - OUTB(0x3D4, index); - OUTB(0x3D5, data); + DRM_WRITE8(map, 0x3C4, index); + return DRM_READ8(map, 0x3C5); } -static inline void OUT3CFW(struct xgi_info * info, u8 index, u8 data) +static inline u8 IN3X5B(struct drm_map * map, u8 index) { - OUTB(0x3CE, index); - OUTB(0x3CF, data); + DRM_WRITE8(map, 0x3D4, index); + return DRM_READ8(map, 0x3D5); } -static inline u8 IN3C5W(struct xgi_info * info, u8 index) +static inline u8 IN3CFB(struct drm_map * map, u8 index) { - volatile u8 data = 0; - OUTB(0x3C4, index); - data = INB(0x3C5); - return data; + DRM_WRITE8(map, 0x3CE, index); + return DRM_READ8(map, 0x3CF); } -static inline u8 IN3X5W(struct xgi_info * info, u8 index) -{ - volatile u8 data = 0; - OUTB(0x3D4, index); - data = INB(0x3D5); - return data; -} - -static inline u8 IN3CFW(struct xgi_info * info, u8 index) -{ - volatile u8 data = 0; - OUTB(0x3CE, index); - data = INB(0x3CF); - return data; -} - -static inline u8 readAttr(struct xgi_info * info, u8 index) -{ - INB(0x3DA); /* flip-flop to index */ - OUTB(0x3C0, index); - return INB(0x3C1); -} - -static inline void writeAttr(struct xgi_info * info, u8 index, u8 value) -{ - INB(0x3DA); /* flip-flop to index */ - OUTB(0x3C0, index); - OUTB(0x3C0, value); -} /* * Graphic engine register (2d/3d) acessing interface */ -static inline void WriteRegDWord(struct xgi_info * info, u32 addr, u32 data) -{ - XGI_INFO("mmio vbase = 0x%p, addr = 0x%x, data = 0x%x\n", - info->mmio->vbase, addr, data); - - *(volatile u32 *)(info->mmio.vbase + addr) = (data); -} - -static inline void WriteRegWord(struct xgi_info * info, u32 addr, u16 data) +static inline void dwWriteReg(struct drm_map * map, u32 addr, u32 data) { - *(volatile u16 *)(info->mmio.vbase + addr) = (data); -} - -static inline void WriteRegByte(struct xgi_info * info, u32 addr, u8 data) -{ - *(volatile u8 *)(info->mmio.vbase + addr) = (data); -} - -static inline u32 ReadRegDWord(struct xgi_info * info, u32 addr) -{ - volatile u32 data; - data = *(volatile u32 *)(info->mmio.vbase + addr); - return data; -} - -static inline u16 ReadRegWord(struct xgi_info * info, u32 addr) -{ - volatile u16 data; - data = *(volatile u16 *)(info->mmio.vbase + addr); - return data; -} + DRM_INFO("mmio_map->handle = 0x%p, addr = 0x%x, data = 0x%x\n", + map->handle, addr, data); -static inline u8 ReadRegByte(struct xgi_info * info, u32 addr) -{ - volatile u8 data; - data = *(volatile u8 *)(info->mmio.vbase + addr); - return data; + DRM_WRITE32(map, addr, data); } -#if 0 -extern void OUT3C5B(struct xgi_info * info, u8 index, u8 data); -extern void OUT3X5B(struct xgi_info * info, u8 index, u8 data); -extern void OUT3CFB(struct xgi_info * info, u8 index, u8 data); -extern u8 IN3C5B(struct xgi_info * info, u8 index); -extern u8 IN3X5B(struct xgi_info * info, u8 index); -extern u8 IN3CFB(struct xgi_info * info, u8 index); -extern void OUT3C5W(struct xgi_info * info, u8 index, u8 data); -extern void OUT3X5W(struct xgi_info * info, u8 index, u8 data); -extern void OUT3CFW(struct xgi_info * info, u8 index, u8 data); -extern u8 IN3C5W(struct xgi_info * info, u8 index); -extern u8 IN3X5W(struct xgi_info * info, u8 index); -extern u8 IN3CFW(struct xgi_info * info, u8 index); - -extern void WriteRegDWord(struct xgi_info * info, u32 addr, u32 data); -extern void WriteRegWord(struct xgi_info * info, u32 addr, u16 data); -extern void WriteRegByte(struct xgi_info * info, u32 addr, u8 data); -extern u32 ReadRegDWord(struct xgi_info * info, u32 addr); -extern u16 ReadRegWord(struct xgi_info * info, u32 addr); -extern u8 ReadRegByte(struct xgi_info * info, u32 addr); - -extern void EnableProtect(); -extern void DisableProtect(); -#endif - -#define Out(port, data) OUTB(port, data) -#define bOut(port, data) OUTB(port, data) -#define wOut(port, data) OUTW(port, data) -#define dwOut(port, data) OUTDW(port, data) - -#define Out3x5(index, data) OUT3X5B(info, index, data) -#define bOut3x5(index, data) OUT3X5B(info, index, data) -#define wOut3x5(index, data) OUT3X5W(info, index, data) - -#define Out3c5(index, data) OUT3C5B(info, index, data) -#define bOut3c5(index, data) OUT3C5B(info, index, data) -#define wOut3c5(index, data) OUT3C5W(info, index, data) - -#define Out3cf(index, data) OUT3CFB(info, index, data) -#define bOut3cf(index, data) OUT3CFB(info, index, data) -#define wOut3cf(index, data) OUT3CFW(info, index, data) - -#define In(port) INB(port) -#define bIn(port) INB(port) -#define wIn(port) INW(port) -#define dwIn(port) INDW(port) - -#define In3x5(index) IN3X5B(info, index) -#define bIn3x5(index) IN3X5B(info, index) -#define wIn3x5(index) IN3X5W(info, index) - -#define In3c5(index) IN3C5B(info, index) -#define bIn3c5(index) IN3C5B(info, index) -#define wIn3c5(index) IN3C5W(info, index) - -#define In3cf(index) IN3CFB(info, index) -#define bIn3cf(index) IN3CFB(info, index) -#define wIn3cf(index) IN3CFW(info, index) - -#define dwWriteReg(addr, data) WriteRegDWord(info, addr, data) -#define wWriteReg(addr, data) WriteRegWord(info, addr, data) -#define bWriteReg(addr, data) WriteRegByte(info, addr, data) -#define dwReadReg(addr) ReadRegDWord(info, addr) -#define wReadReg(addr) ReadRegWord(info, addr) -#define bReadReg(addr) ReadRegByte(info, addr) static inline void xgi_enable_mmio(struct xgi_info * info) { u8 protect = 0; + u8 temp; /* Unprotect registers */ - outb(0x11, 0x3C4); - protect = inb(0x3C5); - outb(0x92, 0x3C5); + DRM_WRITE8(info->mmio_map, 0x3C4, 0x11); + protect = DRM_READ8(info->mmio_map, 0x3C5); + DRM_WRITE8(info->mmio_map, 0x3C5, 0x92); - outb(0x3A, 0x3D4); - outb(inb(0x3D5) | 0x20, 0x3D5); + DRM_WRITE8(info->mmio_map, 0x3D4, 0x3A); + temp = DRM_READ8(info->mmio_map, 0x3D5); + DRM_WRITE8(info->mmio_map, 0x3D5, temp | 0x20); /* Enable MMIO */ - outb(0x39, 0x3D4); - outb(inb(0x3D5) | 0x01, 0x3D5); + DRM_WRITE8(info->mmio_map, 0x3D4, 0x39); + temp = DRM_READ8(info->mmio_map, 0x3D5); + DRM_WRITE8(info->mmio_map, 0x3D5, temp | 0x01); - OUTB(0x3C4, 0x11); - OUTB(0x3C5, protect); + /* Protect registers */ + OUT3C5B(info->mmio_map, 0x11, protect); } static inline void xgi_disable_mmio(struct xgi_info * info) { u8 protect = 0; + u8 temp; - /* unprotect registers */ - OUTB(0x3C4, 0x11); - protect = INB(0x3C5); - OUTB(0x3C5, 0x92); + /* Unprotect registers */ + DRM_WRITE8(info->mmio_map, 0x3C4, 0x11); + protect = DRM_READ8(info->mmio_map, 0x3C5); + DRM_WRITE8(info->mmio_map, 0x3C5, 0x92); /* Disable MMIO access */ - OUTB(0x3D4, 0x39); - OUTB(0x3D5, INB(0x3D5) & 0xFE); + DRM_WRITE8(info->mmio_map, 0x3D4, 0x39); + temp = DRM_READ8(info->mmio_map, 0x3D5); + DRM_WRITE8(info->mmio_map, 0x3D5, temp & 0xFE); /* Protect registers */ - outb(0x11, 0x3C4); - outb(protect, 0x3C5); + OUT3C5B(info->mmio_map, 0x11, protect); } static inline void xgi_enable_ge(struct xgi_info * info) @@ -300,36 +131,36 @@ static inline void xgi_enable_ge(struct xgi_info * info) int wait = 0; // Enable GE - OUTW(0x3C4, 0x9211); + DRM_WRITE16(info->mmio_map, 0x3C4, 0x9211); // Save and close dynamic gating - bOld3cf2a = bIn3cf(0x2a); - bOut3cf(0x2a, bOld3cf2a & 0xfe); + bOld3cf2a = IN3CFB(info->mmio_map, 0x2a); + OUT3CFB(info->mmio_map, 0x2a, bOld3cf2a & 0xfe); // Reset both 3D and 2D engine - bOut3x5(0x36, 0x84); + OUT3X5B(info->mmio_map, 0x36, 0x84); wait = 10; while (wait--) { - bIn(0x36); + DRM_READ8(info->mmio_map, 0x36); } - bOut3x5(0x36, 0x94); + OUT3X5B(info->mmio_map, 0x36, 0x94); wait = 10; while (wait--) { - bIn(0x36); + DRM_READ8(info->mmio_map, 0x36); } - bOut3x5(0x36, 0x84); + OUT3X5B(info->mmio_map, 0x36, 0x84); wait = 10; while (wait--) { - bIn(0x36); + DRM_READ8(info->mmio_map, 0x36); } // Enable 2D engine only - bOut3x5(0x36, 0x80); + OUT3X5B(info->mmio_map, 0x36, 0x80); // Enable 2D+3D engine - bOut3x5(0x36, 0x84); + OUT3X5B(info->mmio_map, 0x36, 0x84); // Restore dynamic gating - bOut3cf(0x2a, bOld3cf2a); + OUT3CFB(info->mmio_map, 0x2a, bOld3cf2a); } static inline void xgi_disable_ge(struct xgi_info * info) @@ -337,50 +168,50 @@ static inline void xgi_disable_ge(struct xgi_info * info) int wait = 0; // Reset both 3D and 2D engine - bOut3x5(0x36, 0x84); + OUT3X5B(info->mmio_map, 0x36, 0x84); wait = 10; while (wait--) { - bIn(0x36); + DRM_READ8(info->mmio_map, 0x36); } - bOut3x5(0x36, 0x94); + OUT3X5B(info->mmio_map, 0x36, 0x94); wait = 10; while (wait--) { - bIn(0x36); + DRM_READ8(info->mmio_map, 0x36); } - bOut3x5(0x36, 0x84); + OUT3X5B(info->mmio_map, 0x36, 0x84); wait = 10; while (wait--) { - bIn(0x36); + DRM_READ8(info->mmio_map, 0x36); } // Disable 2D engine only - bOut3x5(0x36, 0); + OUT3X5B(info->mmio_map, 0x36, 0); } static inline void xgi_enable_dvi_interrupt(struct xgi_info * info) { - Out3cf(0x39, In3cf(0x39) & ~0x01); //Set 3cf.39 bit 0 to 0 - Out3cf(0x39, In3cf(0x39) | 0x01); //Set 3cf.39 bit 0 to 1 - Out3cf(0x39, In3cf(0x39) | 0x02); + OUT3CFB(info->mmio_map, 0x39, IN3CFB(info->mmio_map, 0x39) & ~0x01); //Set 3cf.39 bit 0 to 0 + OUT3CFB(info->mmio_map, 0x39, IN3CFB(info->mmio_map, 0x39) | 0x01); //Set 3cf.39 bit 0 to 1 + OUT3CFB(info->mmio_map, 0x39, IN3CFB(info->mmio_map, 0x39) | 0x02); } static inline void xgi_disable_dvi_interrupt(struct xgi_info * info) { - Out3cf(0x39, In3cf(0x39) & ~0x02); + OUT3CFB(info->mmio_map, 0x39, IN3CFB(info->mmio_map, 0x39) & ~0x02); } static inline void xgi_enable_crt1_interrupt(struct xgi_info * info) { - Out3cf(0x3d, In3cf(0x3d) | 0x04); - Out3cf(0x3d, In3cf(0x3d) & ~0x04); - Out3cf(0x3d, In3cf(0x3d) | 0x08); + OUT3CFB(info->mmio_map, 0x3d, IN3CFB(info->mmio_map, 0x3d) | 0x04); + OUT3CFB(info->mmio_map, 0x3d, IN3CFB(info->mmio_map, 0x3d) & ~0x04); + OUT3CFB(info->mmio_map, 0x3d, IN3CFB(info->mmio_map, 0x3d) | 0x08); } static inline void xgi_disable_crt1_interrupt(struct xgi_info * info) { - Out3cf(0x3d, In3cf(0x3d) & ~0x08); + OUT3CFB(info->mmio_map, 0x3d, IN3CFB(info->mmio_map, 0x3d) & ~0x08); } #endif -- cgit v1.2.3 From 2f53ce4af2f7db911d908ff382738f30be004e8b Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 19 Jul 2007 11:05:13 -0700 Subject: Move MMIO drm_addmap (and code that depends on it) to xgi_bootstrap. For reasons that I don't understand, the drm_addmap call would succeed in xgi_driver_load, but writes to the map later would oops. Moving it to xgi_bootstrap fixes this problem. --- linux-core/xgi_drv.c | 62 ++++++++++++++++++++++++++-------------------------- 1 file changed, 31 insertions(+), 31 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 3b9f4cb1..13e79169 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -188,7 +188,38 @@ int xgi_bootstrap(DRM_IOCTL_ARGS) return 0; } + err = drm_addmap(dev, info->mmio.base, info->mmio.size, + _DRM_REGISTERS, _DRM_KERNEL, + &info->mmio_map); + if (err) { + DRM_ERROR("Unable to map MMIO region: %d\n", err); + return err; + } + xgi_enable_mmio(info); + //xgi_enable_ge(info); + + info->fb.size = IN3CFB(info->mmio_map, 0x54) * 8 * 1024 * 1024; + + DRM_INFO("fb base: 0x%lx, size: 0x%x (probed)\n", + (unsigned long) info->fb.base, info->fb.size); + + + if ((info->fb.base == 0) || (info->fb.size == 0)) { + DRM_ERROR("frame buffer appears to be wrong: 0x%lx 0x%x\n", + (unsigned long) info->fb.base, info->fb.size); + return DRM_ERR(EINVAL); + } + + + /* Init the resource manager */ + err = xgi_fb_heap_init(info); + if (err) { + DRM_ERROR("xgi_fb_heap_init() failed\n"); + return err; + } + + info->pcie.size = bs.gart_size * (1024 * 1024); @@ -280,36 +311,12 @@ int xgi_driver_load(struct drm_device *dev, unsigned long flags) } - err = drm_addmap(dev, info->mmio.base, info->mmio.size, - _DRM_REGISTERS, _DRM_KERNEL | _DRM_READ_ONLY, - &info->mmio_map); - if (err) { - DRM_ERROR("Unable to map MMIO region: %d\n", err); - return err; - } - - xgi_enable_mmio(info); - //xgi_enable_ge(info); - info->fb.base = drm_get_resource_start(dev, 0); info->fb.size = drm_get_resource_len(dev, 0); DRM_INFO("fb base: 0x%lx, size: 0x%x\n", (unsigned long) info->fb.base, info->fb.size); - info->fb.size = IN3CFB(info->mmio_map, 0x54) * 8 * 1024 * 1024; - - DRM_INFO("fb base: 0x%lx, size: 0x%x (probed)\n", - (unsigned long) info->fb.base, info->fb.size); - - - if ((info->fb.base == 0) || (info->fb.size == 0)) { - DRM_ERROR("frame buffer appears to be wrong: 0x%lx 0x%x\n", - (unsigned long) info->fb.base, info->fb.size); - return DRM_ERR(EINVAL); - } - - xgi_mem_block_cache = kmem_cache_create("xgi_mem_block", sizeof(struct xgi_mem_block), @@ -321,13 +328,6 @@ int xgi_driver_load(struct drm_device *dev, unsigned long flags) } - /* Init the resource manager */ - err = xgi_fb_heap_init(info); - if (err) { - DRM_ERROR("xgi_fb_heap_init() failed\n"); - return err; - } - return 0; } -- cgit v1.2.3 From 15245b670e5359a7dbf9151aa9f160e929e0b46b Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 19 Jul 2007 11:38:56 -0700 Subject: Rework xgi_(pcie|fb)_free_all to prevent deadlock. --- linux-core/xgi_fb.c | 2 +- linux-core/xgi_pcie.c | 24 ++++++++++++++++++------ 2 files changed, 19 insertions(+), 7 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index ce689847..a5885198 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -365,7 +365,7 @@ void xgi_fb_free_all(struct xgi_info * info, DRMFILE filp) break; } - (void) xgi_fb_free(info, block->offset, filp); + (void) xgi_mem_free(&info->fb_heap, block->offset, filp); } while(1); up(&info->fb_sem); diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 49c531fc..9dee888b 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -34,6 +34,9 @@ static struct xgi_mem_block *xgi_pcie_vertex_block = NULL; static struct xgi_mem_block *xgi_pcie_cmdlist_block = NULL; static struct xgi_mem_block *xgi_pcie_scratchpad_block = NULL; +static int xgi_pcie_free_locked(struct xgi_info * info, + unsigned long offset, DRMFILE filp); + static int xgi_pcie_lut_init(struct xgi_info * info) { u8 temp = 0; @@ -248,30 +251,39 @@ void xgi_pcie_free_all(struct xgi_info * info, DRMFILE filp) break; } - (void) xgi_pcie_free(info, block->offset, filp); + (void) xgi_pcie_free_locked(info, block->offset, filp); } while(1); up(&info->pcie_sem); } -int xgi_pcie_free(struct xgi_info * info, unsigned long offset, DRMFILE filp) +int xgi_pcie_free_locked(struct xgi_info * info, + unsigned long offset, DRMFILE filp) { const bool isvertex = (xgi_pcie_vertex_block && (xgi_pcie_vertex_block->offset == offset)); + int err = xgi_mem_free(&info->pcie_heap, offset, filp); + + if (!err && isvertex) + xgi_pcie_vertex_block = NULL; + + return err; +} + + +int xgi_pcie_free(struct xgi_info * info, unsigned long offset, DRMFILE filp) +{ int err; down(&info->pcie_sem); - err = xgi_mem_free(&info->pcie_heap, offset, filp); + err = xgi_pcie_free_locked(info, offset, filp); up(&info->pcie_sem); if (err) { DRM_ERROR("xgi_pcie_free() failed at base 0x%lx\n", offset); } - if (isvertex) - xgi_pcie_vertex_block = NULL; - return err; } -- cgit v1.2.3 From a33f5487296eacf503f5b27ba829f5fbdae8e63b Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 19 Jul 2007 19:05:52 -0700 Subject: Debug message and comment clean up in xgi_submit_cmdlist. --- linux-core/xgi_cmdlist.c | 25 +++++-------------------- 1 file changed, 5 insertions(+), 20 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index d2018057..2fdfcc91 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -66,58 +66,43 @@ static void xgi_submit_cmdlist(struct xgi_info * info, { const unsigned int beginPort = getCurBatchBeginPort(pCmdInfo); - DRM_INFO("After getCurBatchBeginPort()\n"); if (s_cmdring._lastBatchStartAddr == 0) { const unsigned int portOffset = BASE_3D_ENG + beginPort; - /* Jong 06/13/2006; remove marked for system hang test */ - /* xgi_waitfor_pci_idle(info); */ - // Enable PCI Trigger Mode + /* Enable PCI Trigger Mode + */ DRM_INFO("Enable PCI Trigger Mode \n"); - - /* Jong 06/14/2006; 0x400001a */ dwWriteReg(info->mmio_map, BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | M2REG_CLEAR_COUNTERS_MASK | 0x08 | M2REG_PCI_TRIGGER_MODE_MASK); - /* Jong 06/14/2006; 0x400000a */ dwWriteReg(info->mmio_map, BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | 0x08 | M2REG_PCI_TRIGGER_MODE_MASK); - // Send PCI begin command - DRM_INFO("Send PCI begin command \n"); + /* Send PCI begin command + */ DRM_INFO("portOffset=%d, beginPort=%d\n", portOffset, beginPort); - /* beginPort = 48; */ - /* 0xc100000 */ dwWriteReg(info->mmio_map, portOffset, (beginPort << 22) + (BEGIN_VALID_MASK) + pCmdInfo->_curDebugID); - DRM_INFO("Send PCI begin command- After\n"); - - /* 0x80000024 */ dwWriteReg(info->mmio_map, portOffset + 4, BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize); - /* 0x1010000 */ dwWriteReg(info->mmio_map, portOffset + 8, (pCmdInfo->_firstBeginAddr >> 4)); - /* Jong 06/12/2006; system hang; marked for test */ dwWriteReg(info->mmio_map, portOffset + 12, 0); - - /* Jong 06/13/2006; remove marked for system hang test */ - /* xgi_waitfor_pci_idle(info); */ } else { u32 *lastBatchVirtAddr; @@ -154,7 +139,7 @@ static void xgi_submit_cmdlist(struct xgi_info * info, } s_cmdring._lastBatchStartAddr = pCmdInfo->_lastBeginAddr; - DRM_INFO("End\n"); + DRM_INFO("%s: exit\n", __func__); } -- cgit v1.2.3 From 970674f4867d65bd16cf3585d46930b72a827cce Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 19 Jul 2007 19:08:47 -0700 Subject: Fix error handing related to xgi_cmdlist_initialize. xgi_cmdlist_initialize wasn't correctly checking for errors from xgi_pcie_alloc. Furthermore, xgi_bootstrap, the one caller of xgi_cmdlist_initialize, wasn't check its return value. --- linux-core/xgi_cmdlist.c | 10 +++++----- linux-core/xgi_drv.c | 6 +++++- 2 files changed, 10 insertions(+), 6 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 2fdfcc91..885b5066 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -45,11 +45,11 @@ int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) .size = size, .owner = PCIE_2D, }; + int err; - xgi_pcie_alloc(info, &mem_alloc, 0); - - if ((mem_alloc.size == 0) && (mem_alloc.hw_addr == 0)) { - return -1; + err = xgi_pcie_alloc(info, &mem_alloc, 0); + if (err) { + return err; } s_cmdring._cmdRingSize = mem_alloc.size; @@ -58,7 +58,7 @@ int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) s_cmdring._lastBatchStartAddr = 0; s_cmdring._cmdRingOffset = 0; - return 1; + return 0; } static void xgi_submit_cmdlist(struct xgi_info * info, diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 13e79169..c4e7daae 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -231,7 +231,11 @@ int xgi_bootstrap(DRM_IOCTL_ARGS) } /* Alloc 1M bytes for cmdbuffer which is flush2D batch array */ - xgi_cmdlist_initialize(info, 0x100000); + err = xgi_cmdlist_initialize(info, 0x100000); + if (err) { + DRM_ERROR("xgi_cmdlist_initialize() failed\n"); + return err; + } info->bootstrap_done = 1; return 0; -- cgit v1.2.3 From 56665a42f470d5cf8cb4865558cb658dff15a9dd Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 19 Jul 2007 19:09:24 -0700 Subject: Delete unused variable in xgi_driver_load. --- linux-core/xgi_drv.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index c4e7daae..bcb6946d 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -286,11 +286,8 @@ void xgi_kern_isr_bh(struct drm_device *dev) int xgi_driver_load(struct drm_device *dev, unsigned long flags) { - struct xgi_info *info; - int err; - + struct xgi_info *info = drm_alloc(sizeof(*info), DRM_MEM_DRIVER); - info = drm_alloc(sizeof(*info), DRM_MEM_DRIVER); if (!info) return DRM_ERR(ENOMEM); -- cgit v1.2.3 From 6bd848307485f678915913f282e2ea59ae3ca1a8 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 20 Jul 2007 10:57:40 -0700 Subject: Change handling of begin types slightly. Moved the getCurBatchBeginPort before its only caller. Modified function to return the command ID instead of the port offset. Function also now assumes input begin type is value. Added code to ioctl handler to validate begin type. --- linux-core/xgi_cmdlist.c | 54 +++++++++++++++++++++++++++--------------------- 1 file changed, 30 insertions(+), 24 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 885b5066..6cc4c142 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -34,7 +34,7 @@ struct xgi_cmdring_info s_cmdring; static void addFlush2D(struct xgi_info * info); -static unsigned int getCurBatchBeginPort(struct xgi_cmd_info * pCmdInfo); +static unsigned int get_batch_command(enum xgi_batch_type type); static void triggerHWCommandList(struct xgi_info * info, unsigned int triggerCounter); static void xgi_cmdlist_reset(void); @@ -61,14 +61,33 @@ int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) return 0; } + +/** + * get_batch_command - Get the command ID for the current begin type. + * @type: Type of the current batch + * + * See section 3.2.2 "Begin" (page 15) of the 3D SPG. + * + * This function assumes that @type is on the range [0,3]. + */ +unsigned int get_batch_command(enum xgi_batch_type type) +{ + static const unsigned int ports[4] = { + 0x30 >> 2, 0x40 >> 2, 0x50 >> 2, 0x20 >> 2 + }; + + return ports[type]; +} + + static void xgi_submit_cmdlist(struct xgi_info * info, - struct xgi_cmd_info * pCmdInfo) + const struct xgi_cmd_info * pCmdInfo) { - const unsigned int beginPort = getCurBatchBeginPort(pCmdInfo); + const unsigned int cmd = get_batch_command(pCmdInfo->_firstBeginType); if (s_cmdring._lastBatchStartAddr == 0) { - const unsigned int portOffset = BASE_3D_ENG + beginPort; + const unsigned int portOffset = BASE_3D_ENG + (cmd << 2); /* Enable PCI Trigger Mode @@ -90,10 +109,10 @@ static void xgi_submit_cmdlist(struct xgi_info * info, /* Send PCI begin command */ DRM_INFO("portOffset=%d, beginPort=%d\n", - portOffset, beginPort); + portOffset, cmd << 2); dwWriteReg(info->mmio_map, portOffset, - (beginPort << 22) + (BEGIN_VALID_MASK) + + (cmd << 24) + (BEGIN_VALID_MASK) + pCmdInfo->_curDebugID); dwWriteReg(info->mmio_map, portOffset + 4, @@ -128,7 +147,7 @@ static void xgi_submit_cmdlist(struct xgi_info * info, lastBatchVirtAddr[3] = 0; //barrier(); lastBatchVirtAddr[0] = - (beginPort << 22) + (BEGIN_VALID_MASK) + + (cmd << 24) + (BEGIN_VALID_MASK) + (0xffff & pCmdInfo->_curDebugID); /* Jong 06/12/2006; system hang; marked for test */ @@ -153,6 +172,10 @@ int xgi_submit_cmdlist_ioctl(DRM_IOCTL_ARGS) (struct xgi_cmd_info __user *) data, sizeof(cmd_list)); + if (cmd_list._firstBeginType > BTYPE_CTRL) { + return DRM_ERR(EINVAL); + } + xgi_submit_cmdlist(info, &cmd_list); return 0; } @@ -238,23 +261,6 @@ static void triggerHWCommandList(struct xgi_info * info, } } -static unsigned int getCurBatchBeginPort(struct xgi_cmd_info * pCmdInfo) -{ - // Convert the batch type to begin port ID - switch (pCmdInfo->_firstBeginType) { - case BTYPE_2D: - return 0x30; - case BTYPE_3D: - return 0x40; - case BTYPE_FLIP: - return 0x50; - case BTYPE_CTRL: - return 0x20; - default: - //ASSERT(0); - return 0xff; - } -} static void addFlush2D(struct xgi_info * info) { -- cgit v1.2.3 From 659209cb2d59c7b25df58d130d0649f8f899b693 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 20 Jul 2007 11:29:16 -0700 Subject: Clean up generation of begin commands in xgi_submit_cmdlist Generate the begin command once in a temporary buffer. Then, depending on whether the command is to be written directly to the hardware or to a secondary buffer, copy to command to the correct place. --- linux-core/xgi_cmdlist.c | 49 ++++++++++++++++++------------------------------ linux-core/xgi_cmdlist.h | 1 + 2 files changed, 19 insertions(+), 31 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 6cc4c142..682c4ac1 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -84,8 +84,15 @@ static void xgi_submit_cmdlist(struct xgi_info * info, const struct xgi_cmd_info * pCmdInfo) { const unsigned int cmd = get_batch_command(pCmdInfo->_firstBeginType); + u32 begin[4]; + begin[0] = (cmd << 24) | (BEGIN_VALID_MASK) | + (BEGIN_BEGIN_IDENTIFICATION_MASK & pCmdInfo->_curDebugID); + begin[1] = BEGIN_LINK_ENABLE_MASK | pCmdInfo->_firstSize; + begin[2] = pCmdInfo->_firstBeginAddr >> 4; + begin[3] = 0; + if (s_cmdring._lastBatchStartAddr == 0) { const unsigned int portOffset = BASE_3D_ENG + (cmd << 2); @@ -111,17 +118,10 @@ static void xgi_submit_cmdlist(struct xgi_info * info, DRM_INFO("portOffset=%d, beginPort=%d\n", portOffset, cmd << 2); - dwWriteReg(info->mmio_map, portOffset, - (cmd << 24) + (BEGIN_VALID_MASK) + - pCmdInfo->_curDebugID); - - dwWriteReg(info->mmio_map, portOffset + 4, - BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize); - - dwWriteReg(info->mmio_map, portOffset + 8, - (pCmdInfo->_firstBeginAddr >> 4)); - - dwWriteReg(info->mmio_map, portOffset + 12, 0); + dwWriteReg(info->mmio_map, portOffset, begin[0]); + dwWriteReg(info->mmio_map, portOffset + 4, begin[1]); + dwWriteReg(info->mmio_map, portOffset + 8, begin[2]); + dwWriteReg(info->mmio_map, portOffset + 12, begin[3]); } else { u32 *lastBatchVirtAddr; @@ -135,26 +135,13 @@ static void xgi_submit_cmdlist(struct xgi_info * info, xgi_find_pcie_virt(info, s_cmdring._lastBatchStartAddr); - /* lastBatchVirtAddr should *never* be NULL. However, there - * are currently some bugs that cause this to happen. The - * if-statement here prevents some fatal (i.e., hard lock - * requiring the reset button) oopses. - */ - if (lastBatchVirtAddr) { - lastBatchVirtAddr[1] = - BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize; - lastBatchVirtAddr[2] = pCmdInfo->_firstBeginAddr >> 4; - lastBatchVirtAddr[3] = 0; - //barrier(); - lastBatchVirtAddr[0] = - (cmd << 24) + (BEGIN_VALID_MASK) + - (0xffff & pCmdInfo->_curDebugID); - - /* Jong 06/12/2006; system hang; marked for test */ - triggerHWCommandList(info, pCmdInfo->_beginCount); - } else { - DRM_ERROR("lastBatchVirtAddr is NULL\n"); - } + lastBatchVirtAddr[1] = begin[1]; + lastBatchVirtAddr[2] = begin[2]; + lastBatchVirtAddr[3] = begin[3]; + wmb(); + lastBatchVirtAddr[0] = begin[0]; + + triggerHWCommandList(info, pCmdInfo->_beginCount); } s_cmdring._lastBatchStartAddr = pCmdInfo->_lastBeginAddr; diff --git a/linux-core/xgi_cmdlist.h b/linux-core/xgi_cmdlist.h index 4bc56ec1..08029386 100644 --- a/linux-core/xgi_cmdlist.h +++ b/linux-core/xgi_cmdlist.h @@ -40,6 +40,7 @@ #define M2REG_PCI_TRIGGER_MODE_MASK (ONE_BIT_MASK<<1) #define BEGIN_VALID_MASK (ONE_BIT_MASK<<20) #define BEGIN_LINK_ENABLE_MASK (ONE_BIT_MASK<<31) +#define BEGIN_BEGIN_IDENTIFICATION_MASK (TWENTY_BIT_MASK<<0) #define M2REG_PCI_TRIGGER_REGISTER_ADDRESS 0x14 typedef enum { -- cgit v1.2.3 From ed82d5398a751cf755cf4168cbb79b181facc86f Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 20 Jul 2007 11:31:01 -0700 Subject: Clean up flush command generation in addFlush2D. --- linux-core/xgi_cmdlist.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 682c4ac1..b93541f3 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -280,11 +280,9 @@ static void addFlush2D(struct xgi_info * info) lastBatchVirtAddr[1] = BEGIN_LINK_ENABLE_MASK + 0x08; lastBatchVirtAddr[2] = flushBatchHWAddr >> 4; lastBatchVirtAddr[3] = 0; - - //barrier(); - - // BTYPE_CTRL & NO debugID - lastBatchVirtAddr[0] = (0x20 << 22) + (BEGIN_VALID_MASK); + wmb(); + lastBatchVirtAddr[0] = (get_batch_command(BTYPE_CTRL) << 24) + | (BEGIN_VALID_MASK); triggerHWCommandList(info, 1); -- cgit v1.2.3 From 5dc9fd96d7bf48003db832f145ad8acb4bcb73b4 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Fri, 20 Jul 2007 12:55:51 -0700 Subject: Fix linux spinlock macros after the last commit. --- linux-core/drmP.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 575e6255..af859c38 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -1266,12 +1266,12 @@ static inline void drm_ctl_free(void *pt, size_t size, int area) * Initialize the lock for use. name is an optional string describing the * lock */ -#define DRM_SPININIT(l,name) spin_lock_init(l); +#define DRM_SPININIT(l,name) spin_lock_init(l) #define DRM_SPINUNINIT(l) -#define DRM_SPINLOCK(l) spin_lock(l); -#define DRM_SPINUNLOCK(u) spin_unlock(l); -#define DRM_SPINLOCK_IRQSAVE(l, flags) spin_lock_irqflags(l, _flags); -#define DRM_SPINUNLOCK_IRQRESTORE(u, flags) spin_unlock_irqrestore(l, _flags); +#define DRM_SPINLOCK(l) spin_lock(l) +#define DRM_SPINUNLOCK(l) spin_unlock(l) +#define DRM_SPINLOCK_IRQSAVE(l, _flags) spin_lock_irqsave(l, _flags); +#define DRM_SPINUNLOCK_IRQRESTORE(l, _flags) spin_unlock_irqrestore(l, _flags); #define DRM_SPINLOCK_ASSERT(l) do {} while (0) #endif /* __KERNEL__ */ -- cgit v1.2.3 From e39286eb5eab8846a228863abf8f1b8b07a9e29d Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Thu, 19 Jul 2007 17:00:17 -0700 Subject: Remove DRM_ERR OS macro. This was used to make all ioctl handlers return -errno on linux and errno on *BSD. Instead, just return -errno in shared code, and flip sign on return from shared code to *BSD code. --- linux-core/drm_drawable.c | 6 +++--- linux-core/drm_ioctl.c | 2 +- linux-core/drm_lock.c | 2 +- linux-core/drm_os_linux.h | 1 - linux-core/i810_dma.c | 2 +- linux-core/nouveau_sgdma.c | 10 +++++----- linux-core/sis_drv.c | 2 +- linux-core/sis_mm.c | 4 ++-- linux-core/via_buffer.c | 2 +- linux-core/via_dmablit.c | 32 ++++++++++++++++---------------- linux-core/via_fence.c | 2 +- linux-core/via_mm.c | 6 +++--- 12 files changed, 35 insertions(+), 36 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_drawable.c b/linux-core/drm_drawable.c index d6cdba56..2787c9a3 100644 --- a/linux-core/drm_drawable.c +++ b/linux-core/drm_drawable.c @@ -130,7 +130,7 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) if (update.num && !rects) { DRM_ERROR("Failed to allocate cliprect memory\n"); - err = DRM_ERR(ENOMEM); + err = -ENOMEM; goto error; } @@ -140,7 +140,7 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) update.num * sizeof(*rects))) { DRM_ERROR("Failed to copy cliprects from userspace\n"); - err = DRM_ERR(EFAULT); + err = -EFAULT; goto error; } @@ -161,7 +161,7 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) break; default: DRM_ERROR("Invalid update type %d\n", update.type); - return DRM_ERR(EINVAL); + return -EINVAL; } return 0; diff --git a/linux-core/drm_ioctl.c b/linux-core/drm_ioctl.c index a7bacbb8..a2c3952c 100644 --- a/linux-core/drm_ioctl.c +++ b/linux-core/drm_ioctl.c @@ -121,7 +121,7 @@ int drm_setunique(struct inode *inode, struct file *filp, */ ret = sscanf(dev->unique, "PCI:%d:%d:%d", &bus, &slot, &func); if (ret != 3) - return DRM_ERR(EINVAL); + return -EINVAL; domain = bus >> 8; bus &= 0xff; diff --git a/linux-core/drm_lock.c b/linux-core/drm_lock.c index 1ba01aab..f3685ce0 100644 --- a/linux-core/drm_lock.c +++ b/linux-core/drm_lock.c @@ -125,7 +125,7 @@ int drm_lock(struct inode *inode, struct file *filp, if (dev->driver->dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT)) { if (dev->driver->dma_quiescent(dev)) { DRM_DEBUG( "%d waiting for DMA quiescent\n", lock.context); - return DRM_ERR(EBUSY); + return -EBUSY; } } diff --git a/linux-core/drm_os_linux.h b/linux-core/drm_os_linux.h index 9d0d3f69..3d2ad779 100644 --- a/linux-core/drm_os_linux.h +++ b/linux-core/drm_os_linux.h @@ -10,7 +10,6 @@ #define DRMFILE struct file * /** Ioctl arguments */ #define DRM_IOCTL_ARGS struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data -#define DRM_ERR(d) -(d) /** Current process ID */ #define DRM_CURRENTPID current->pid #define DRM_SUSER(p) capable(CAP_SYS_ADMIN) diff --git a/linux-core/i810_dma.c b/linux-core/i810_dma.c index 1e6d8cd3..4b43647e 100644 --- a/linux-core/i810_dma.c +++ b/linux-core/i810_dma.c @@ -399,7 +399,7 @@ static int i810_dma_initialize(struct drm_device * dev, i810_dma_cleanup(dev); DRM_ERROR("can not ioremap virtual address for" " ring buffer\n"); - return DRM_ERR(ENOMEM); + return -ENOMEM; } dev_priv->ring.virtual_start = dev_priv->ring.map.handle; diff --git a/linux-core/nouveau_sgdma.c b/linux-core/nouveau_sgdma.c index a65317cd..0ddac952 100644 --- a/linux-core/nouveau_sgdma.c +++ b/linux-core/nouveau_sgdma.c @@ -33,7 +33,7 @@ nouveau_sgdma_populate(struct drm_ttm_backend *be, unsigned long num_pages, DRM_DEBUG("num_pages = %ld\n", num_pages); if (nvbe->pagelist) - return DRM_ERR(EINVAL); + return -EINVAL; nvbe->pages = (num_pages << PAGE_SHIFT) >> NV_CTXDMA_PAGE_SHIFT; nvbe->pagelist = drm_alloc(nvbe->pages*sizeof(dma_addr_t), DRM_MEM_PAGES); @@ -48,7 +48,7 @@ nouveau_sgdma_populate(struct drm_ttm_backend *be, unsigned long num_pages, if (pci_dma_mapping_error(nvbe->pagelist[d])) { be->func->clear(be); DRM_ERROR("pci_map_page failed\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } nvbe->pages_populated = ++d; } @@ -92,7 +92,7 @@ nouveau_sgdma_bind(struct drm_ttm_backend *be, unsigned long pg_start, DRM_DEBUG("pg=0x%lx (0x%llx), cached=%d\n", pg_start, offset, cached); if (offset & NV_CTXDMA_PAGE_MASK) - return DRM_ERR(EINVAL); + return -EINVAL; nvbe->pte_start = (offset >> NV_CTXDMA_PAGE_SHIFT); if (dev_priv->card_type < NV_50) nvbe->pte_start += 2; /* skip ctxdma header */ @@ -102,7 +102,7 @@ nouveau_sgdma_bind(struct drm_ttm_backend *be, unsigned long pg_start, if (pteval & NV_CTXDMA_PAGE_MASK) { DRM_ERROR("Bad pteval 0x%llx\n", pteval); - return DRM_ERR(EINVAL); + return -EINVAL; } if (dev_priv->card_type < NV_50) { @@ -282,7 +282,7 @@ nouveau_sgdma_nottm_hack_init(struct drm_device *dev) dev_priv->gart_info.sg_be = nouveau_sgdma_init_ttm(dev); if (!dev_priv->gart_info.sg_be) - return DRM_ERR(ENOMEM); + return -ENOMEM; be = dev_priv->gart_info.sg_be; /* Hack the aperture size down to the amount of system memory diff --git a/linux-core/sis_drv.c b/linux-core/sis_drv.c index b4c3f93b..c9112c63 100644 --- a/linux-core/sis_drv.c +++ b/linux-core/sis_drv.c @@ -43,7 +43,7 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset) dev_priv = drm_calloc(1, sizeof(drm_sis_private_t), DRM_MEM_DRIVER); if (dev_priv == NULL) - return DRM_ERR(ENOMEM); + return -ENOMEM; dev->dev_private = (void *)dev_priv; dev_priv->chipset = chipset; diff --git a/linux-core/sis_mm.c b/linux-core/sis_mm.c index edbf8bf4..f9c7a7e2 100644 --- a/linux-core/sis_mm.c +++ b/linux-core/sis_mm.c @@ -139,7 +139,7 @@ static int sis_drm_alloc(struct drm_device * dev, struct drm_file * priv, dev_priv->agp_initialized)) { DRM_ERROR ("Attempt to allocate from uninitialized memory manager.\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } mem.size = (mem.size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT; @@ -158,7 +158,7 @@ static int sis_drm_alloc(struct drm_device * dev, struct drm_file * priv, mem.offset = 0; mem.size = 0; mem.free = 0; - retval = DRM_ERR(ENOMEM); + retval = -ENOMEM; } DRM_COPY_TO_USER_IOCTL(argp, mem, sizeof(mem)); diff --git a/linux-core/via_buffer.c b/linux-core/via_buffer.c index 0461b3c7..eb5ea826 100644 --- a/linux-core/via_buffer.c +++ b/linux-core/via_buffer.c @@ -60,7 +60,7 @@ static int via_vram_info(struct drm_device *dev, struct pci_dev *pdev = dev->pdev; unsigned long flags; - int ret = DRM_ERR(EINVAL); + int ret = -EINVAL; int i; for (i=0; i<6; ++i) { flags = pci_resource_flags(pdev, i); diff --git a/linux-core/via_dmablit.c b/linux-core/via_dmablit.c index 6422609c..5e73bd1a 100644 --- a/linux-core/via_dmablit.c +++ b/linux-core/via_dmablit.c @@ -236,7 +236,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) first_pfn + 1; if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages))) - return DRM_ERR(ENOMEM); + return -ENOMEM; memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages); down_read(¤t->mm->mmap_sem); ret = get_user_pages(current, current->mm, (unsigned long) xfer->mem_addr, @@ -248,7 +248,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) if (ret < 0) return ret; vsg->state = dr_via_pages_locked; - return DRM_ERR(EINVAL); + return -EINVAL; } vsg->state = dr_via_pages_locked; DRM_DEBUG("DMA pages locked\n"); @@ -271,14 +271,14 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg) vsg->descriptors_per_page; if (NULL == (vsg->desc_pages = kmalloc(sizeof(void *) * vsg->num_desc_pages, GFP_KERNEL))) - return DRM_ERR(ENOMEM); + return -ENOMEM; memset(vsg->desc_pages, 0, sizeof(void *) * vsg->num_desc_pages); vsg->state = dr_via_desc_pages_alloc; for (i=0; inum_desc_pages; ++i) { if (NULL == (vsg->desc_pages[i] = (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL))) - return DRM_ERR(ENOMEM); + return -ENOMEM; } DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages, vsg->num_desc); @@ -606,7 +606,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli if (xfer->num_lines <= 0 || xfer->line_length <= 0) { DRM_ERROR("Zero size bitblt.\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } /* @@ -619,7 +619,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli if ((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) { DRM_ERROR("Too large system memory stride. Stride: %d, " "Length: %d\n", xfer->mem_stride, xfer->line_length); - return DRM_ERR(EINVAL); + return -EINVAL; } if ((xfer->mem_stride == xfer->line_length) && @@ -637,7 +637,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) { DRM_ERROR("Too large PCI DMA bitblt.\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } /* @@ -648,7 +648,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli if (xfer->mem_stride < xfer->line_length || abs(xfer->fb_stride) < xfer->line_length) { DRM_ERROR("Invalid frame-buffer / memory stride.\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } /* @@ -661,13 +661,13 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) || ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) { DRM_ERROR("Invalid DRM bitblt alignment.\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } #else if ((((unsigned long)xfer->mem_addr & 15) || ((unsigned long)xfer->fb_addr & 3)) || ((xfer->num_lines > 1) && ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) { DRM_ERROR("Invalid DRM bitblt alignment.\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } #endif @@ -707,7 +707,7 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine) DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0); if (ret) { - return (DRM_ERR(EINTR) == ret) ? DRM_ERR(EAGAIN) : ret; + return (-EINTR == ret) ? -EAGAIN : ret; } spin_lock_irqsave(&blitq->blit_lock, irqsave); @@ -751,7 +751,7 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer) if (dev_priv == NULL) { DRM_ERROR("Called without initialization.\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } engine = (xfer->to_fb) ? 0 : 1; @@ -761,7 +761,7 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer) } if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) { via_dmablit_release_slot(blitq); - return DRM_ERR(ENOMEM); + return -ENOMEM; } if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) { via_dmablit_release_slot(blitq); @@ -801,12 +801,12 @@ via_dma_blit_sync( DRM_IOCTL_ARGS ) DRM_COPY_FROM_USER_IOCTL(sync, (drm_via_blitsync_t *)data, sizeof(sync)); if (sync.engine >= VIA_NUM_BLIT_ENGINES) - return DRM_ERR(EINVAL); + return -EINVAL; err = via_dmablit_sync(dev, sync.sync_handle, sync.engine); - if (DRM_ERR(EINTR) == err) - err = DRM_ERR(EAGAIN); + if (-EINTR) == err + err = -EAGAIN; return err; } diff --git a/linux-core/via_fence.c b/linux-core/via_fence.c index a8db3d12..a6d4ece9 100644 --- a/linux-core/via_fence.c +++ b/linux-core/via_fence.c @@ -142,7 +142,7 @@ int via_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t fl *native_type = DRM_FENCE_TYPE_EXE; break; default: - ret = DRM_ERR(EINVAL); + ret = -EINVAL; break; } return ret; diff --git a/linux-core/via_mm.c b/linux-core/via_mm.c index 1ac51050..7cb8651d 100644 --- a/linux-core/via_mm.c +++ b/linux-core/via_mm.c @@ -138,7 +138,7 @@ int via_mem_alloc(DRM_IOCTL_ARGS) if (mem.type > VIA_MEM_AGP) { DRM_ERROR("Unknown memory type allocation\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } mutex_lock(&dev->struct_mutex); if (0 == ((mem.type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized : @@ -146,7 +146,7 @@ int via_mem_alloc(DRM_IOCTL_ARGS) DRM_ERROR ("Attempt to allocate from uninitialized memory manager.\n"); mutex_unlock(&dev->struct_mutex); - return DRM_ERR(EINVAL); + return -EINVAL; } tmpSize = (mem.size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT; @@ -164,7 +164,7 @@ int via_mem_alloc(DRM_IOCTL_ARGS) mem.size = 0; mem.index = 0; DRM_DEBUG("Video memory allocation failed\n"); - retval = DRM_ERR(ENOMEM); + retval = -ENOMEM; } DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem, sizeof(mem)); -- cgit v1.2.3 From c1119b1b092527fbb6950d0b5e51e076ddb00f29 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Fri, 20 Jul 2007 06:39:25 -0700 Subject: Replace filp in ioctl arguments with drm_file *file_priv. As a fallout, replace filp storage with file_priv storage for "unique identifier of a client" all over the DRM. There is a 1:1 mapping, so this should be a noop. This could be a minor performance improvement, as everything on Linux dereferenced filp to get file_priv anyway, while only the mmap ioctls went the other direction. --- linux-core/drmP.h | 133 +++++++++++++++++++++++--------------------- linux-core/drm_agpsupport.c | 53 ++++++++---------- linux-core/drm_auth.c | 26 ++++----- linux-core/drm_bo.c | 100 +++++++++++++++++---------------- linux-core/drm_bufs.c | 66 ++++++++++------------ linux-core/drm_context.c | 56 +++++++++---------- linux-core/drm_dma.c | 11 ++-- linux-core/drm_drv.c | 29 +++++----- linux-core/drm_fence.c | 32 +++++------ linux-core/drm_fops.c | 34 +++++------ linux-core/drm_ioc32.c | 2 +- linux-core/drm_ioctl.c | 39 ++++++------- linux-core/drm_irq.c | 19 +++---- linux-core/drm_lock.c | 26 ++++----- linux-core/drm_os_linux.h | 9 +-- linux-core/drm_scatter.c | 13 ++--- linux-core/drm_vm.c | 6 +- linux-core/i810_dma.c | 127 +++++++++++++++++++----------------------- linux-core/i810_drv.h | 7 ++- linux-core/sis_mm.c | 16 +++--- linux-core/via_dmablit.c | 2 +- linux-core/via_mm.c | 10 ++-- 22 files changed, 387 insertions(+), 429 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drmP.h b/linux-core/drmP.h index af859c38..f4367955 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -84,6 +84,8 @@ #include "drm_os_linux.h" #include "drm_hashtab.h" +struct drm_file; + /* If you want the memory alloc debug functionality, change define below */ /* #define DEBUG_MEMORY */ @@ -248,15 +250,15 @@ * Test that the hardware lock is held by the caller, returning otherwise. * * \param dev DRM device. - * \param filp file pointer of the caller. + * \param file_priv DRM file private pointer of the caller. */ -#define LOCK_TEST_WITH_RETURN( dev, filp ) \ +#define LOCK_TEST_WITH_RETURN( dev, file_priv ) \ do { \ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || \ - dev->lock.filp != filp ) { \ + dev->lock.file_priv != file_priv ) { \ DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\ __FUNCTION__, _DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ),\ - dev->lock.filp, filp ); \ + dev->lock.file_priv, file_priv ); \ return -EINVAL; \ } \ } while (0) @@ -277,11 +279,11 @@ do { \ * Ioctl function type. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private pointer. * \param cmd command. * \param arg argument. */ -typedef int drm_ioctl_t(struct inode *inode, struct file *filp, +typedef int drm_ioctl_t(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, @@ -323,7 +325,7 @@ struct drm_buf { __volatile__ int waiting; /**< On kernel DMA queue */ __volatile__ int pending; /**< On hardware DMA queue */ wait_queue_head_t dma_wait; /**< Processes waiting */ - struct file *filp; /**< Pointer to holding file descr */ + struct drm_file *file_priv; /**< Private of holding file descr */ int context; /**< Kernel queue for this buffer */ int while_locked; /**< Dispatch this buffer while locked */ enum { @@ -419,6 +421,7 @@ struct drm_file { struct list_head user_objects; struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES]; + struct file *filp; void *driver_priv; }; @@ -446,7 +449,8 @@ struct drm_queue { */ struct drm_lock_data { struct drm_hw_lock *hw_lock; /**< Hardware lock */ - struct file *filp; /**< File descr of lock holder (0=kernel) */ + /** Private of lock holder's file (NULL=kernel) */ + struct drm_file *file_priv; wait_queue_head_t lock_queue; /**< Queue of blocked processes */ unsigned long lock_time; /**< Time of last lock in jiffies */ spinlock_t spinlock; @@ -603,7 +607,7 @@ struct drm_driver { int (*load) (struct drm_device *, unsigned long flags); int (*firstopen) (struct drm_device *); int (*open) (struct drm_device *, struct drm_file *); - void (*preclose) (struct drm_device *, struct file * filp); + void (*preclose) (struct drm_device *, struct drm_file *file_priv); void (*postclose) (struct drm_device *, struct drm_file *); void (*lastclose) (struct drm_device *); int (*unload) (struct drm_device *); @@ -637,11 +641,12 @@ struct drm_driver { void (*irq_preinstall) (struct drm_device * dev); void (*irq_postinstall) (struct drm_device * dev); void (*irq_uninstall) (struct drm_device * dev); - void (*reclaim_buffers) (struct drm_device *dev, struct file * filp); + void (*reclaim_buffers) (struct drm_device *dev, + struct drm_file *file_priv); void (*reclaim_buffers_locked) (struct drm_device *dev, - struct file * filp); + struct drm_file *file_priv); void (*reclaim_buffers_idlelocked) (struct drm_device *dev, - struct file * filp); + struct drm_file *file_priv); unsigned long (*get_map_ofs) (struct drm_map * map); unsigned long (*get_reg_ofs) (struct drm_device * dev); void (*set_version) (struct drm_device * dev, struct drm_set_version * sv); @@ -939,69 +944,70 @@ extern void drm_init_memctl(size_t low_threshold, size_t unit_size); /* Misc. IOCTL support (drm_ioctl.h) */ -extern int drm_irq_by_busid(struct inode *inode, struct file *filp, +extern int drm_irq_by_busid(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_getunique(struct inode *inode, struct file *filp, +extern int drm_getunique(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_setunique(struct inode *inode, struct file *filp, +extern int drm_setunique(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_getmap(struct inode *inode, struct file *filp, +extern int drm_getmap(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_getclient(struct inode *inode, struct file *filp, +extern int drm_getclient(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_getstats(struct inode *inode, struct file *filp, +extern int drm_getstats(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_setversion(struct inode *inode, struct file *filp, +extern int drm_setversion(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_noop(struct inode *inode, struct file *filp, +extern int drm_noop(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); /* Context IOCTL support (drm_context.h) */ -extern int drm_resctx(struct inode *inode, struct file *filp, +extern int drm_resctx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_addctx(struct inode *inode, struct file *filp, +extern int drm_addctx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_modctx(struct inode *inode, struct file *filp, +extern int drm_modctx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_getctx(struct inode *inode, struct file *filp, +extern int drm_getctx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_switchctx(struct inode *inode, struct file *filp, +extern int drm_switchctx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_newctx(struct inode *inode, struct file *filp, +extern int drm_newctx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_rmctx(struct inode *inode, struct file *filp, +extern int drm_rmctx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); extern int drm_ctxbitmap_init(struct drm_device *dev); extern void drm_ctxbitmap_cleanup(struct drm_device *dev); extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle); -extern int drm_setsareactx(struct inode *inode, struct file *filp, +extern int drm_setsareactx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_getsareactx(struct inode *inode, struct file *filp, +extern int drm_getsareactx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); /* Drawable IOCTL support (drm_drawable.h) */ -extern int drm_adddraw(struct inode *inode, struct file *filp, +extern int drm_adddraw(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_rmdraw(struct inode *inode, struct file *filp, +extern int drm_rmdraw(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_update_drawable_info(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); +extern int drm_update_drawable_info(struct inode *inode, + struct drm_file *file_priv, + unsigned int cmd, unsigned long arg); extern struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id); extern void drm_drawable_free_all(struct drm_device *dev); /* Authentication IOCTL support (drm_auth.h) */ -extern int drm_getmagic(struct inode *inode, struct file *filp, +extern int drm_getmagic(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_authmagic(struct inode *inode, struct file *filp, +extern int drm_authmagic(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); /* Locking IOCTL support (drm_lock.h) */ -extern int drm_lock(struct inode *inode, struct file *filp, +extern int drm_lock(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_unlock(struct inode *inode, struct file *filp, +extern int drm_unlock(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context); @@ -1013,8 +1019,7 @@ extern void drm_idlelock_release(struct drm_lock_data *lock_data); * DMA quiscent + idle. DMA quiescent usually requires the hardware lock. */ -extern int drm_i_have_hw_lock(struct file *filp); -extern int drm_kernel_take_hw_lock(struct file *filp); +extern int drm_i_have_hw_lock(struct drm_file *file_priv); /* Buffer management support (drm_bufs.h) */ extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request); @@ -1023,21 +1028,21 @@ extern int drm_addbufs_fb (struct drm_device *dev, struct drm_buf_desc * request extern int drm_addmap(struct drm_device *dev, unsigned int offset, unsigned int size, enum drm_map_type type, enum drm_map_flags flags, drm_local_map_t ** map_ptr); -extern int drm_addmap_ioctl(struct inode *inode, struct file *filp, +extern int drm_addmap_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); extern int drm_rmmap(struct drm_device *dev, drm_local_map_t *map); extern int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map); -extern int drm_rmmap_ioctl(struct inode *inode, struct file *filp, +extern int drm_rmmap_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_addbufs(struct inode *inode, struct file *filp, +extern int drm_addbufs(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_infobufs(struct inode *inode, struct file *filp, +extern int drm_infobufs(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_markbufs(struct inode *inode, struct file *filp, +extern int drm_markbufs(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_freebufs(struct inode *inode, struct file *filp, +extern int drm_freebufs(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); -extern int drm_mapbufs(struct inode *inode, struct file *filp, +extern int drm_mapbufs(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); extern int drm_order(unsigned long size); extern unsigned long drm_get_resource_start(struct drm_device *dev, @@ -1052,10 +1057,11 @@ extern struct drm_map_list *drm_find_matching_map(struct drm_device *dev, extern int drm_dma_setup(struct drm_device *dev); extern void drm_dma_takedown(struct drm_device *dev); extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf); -extern void drm_core_reclaim_buffers(struct drm_device *dev, struct file *filp); +extern void drm_core_reclaim_buffers(struct drm_device *dev, + struct drm_file *filp); /* IRQ support (drm_irq.h) */ -extern int drm_control(struct inode *inode, struct file *filp, +extern int drm_control(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS); extern int drm_irq_uninstall(struct drm_device *dev); @@ -1063,7 +1069,7 @@ extern void drm_driver_irq_preinstall(struct drm_device *dev); extern void drm_driver_irq_postinstall(struct drm_device *dev); extern void drm_driver_irq_uninstall(struct drm_device *dev); -extern int drm_wait_vblank(struct inode *inode, struct file *filp, +extern int drm_wait_vblank(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); extern void drm_vbl_send_signals(struct drm_device *dev); @@ -1072,28 +1078,31 @@ extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_de /* AGP/GART support (drm_agpsupport.h) */ extern struct drm_agp_head *drm_agp_init(struct drm_device *dev); extern int drm_agp_acquire(struct drm_device *dev); -extern int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); +extern int drm_agp_acquire_ioctl(struct inode *inode, + struct drm_file *file_priv, + unsigned int cmd, unsigned long arg); extern int drm_agp_release(struct drm_device *dev); -extern int drm_agp_release_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); +extern int drm_agp_release_ioctl(struct inode *inode, + struct drm_file *file_priv, + unsigned int cmd, unsigned long arg); extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); -extern int drm_agp_enable_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); +extern int drm_agp_enable_ioctl(struct inode *inode, + struct drm_file *file_priv, + unsigned int cmd, unsigned long arg); extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info); -extern int drm_agp_info_ioctl(struct inode *inode, struct file *filp, +extern int drm_agp_info_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); -extern int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp, +extern int drm_agp_alloc_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); -extern int drm_agp_free_ioctl(struct inode *inode, struct file *filp, +extern int drm_agp_free_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); -extern int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp, +extern int drm_agp_unbind_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); -extern int drm_agp_bind_ioctl(struct inode *inode, struct file *filp, +extern int drm_agp_bind_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) extern DRM_AGP_MEM *drm_agp_allocate_memory(size_t pages, u32 type); @@ -1128,10 +1137,10 @@ extern int drm_proc_cleanup(int minor, /* Scatter Gather Support (drm_scatter.h) */ extern void drm_sg_cleanup(struct drm_sg_mem * entry); -extern int drm_sg_alloc_ioctl(struct inode *inode, struct file *filp, +extern int drm_sg_alloc_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request); -extern int drm_sg_free(struct inode *inode, struct file *filp, +extern int drm_sg_free(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); /* ATI PCIGART support (ati_pcigart.h) */ diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c index 57c88638..ab7b8c90 100644 --- a/linux-core/drm_agpsupport.c +++ b/linux-core/drm_agpsupport.c @@ -40,7 +40,7 @@ * Get AGP information. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a (output) drm_agp_info structure. * \return zero on success or a negative number on failure. @@ -70,10 +70,9 @@ int drm_agp_info(struct drm_device * dev, struct drm_agp_info *info) } EXPORT_SYMBOL(drm_agp_info); -int drm_agp_info_ioctl(struct inode *inode, struct file *filp, +int drm_agp_info_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; struct drm_device *dev = priv->head->dev; struct drm_agp_info info; int err; @@ -123,7 +122,7 @@ EXPORT_SYMBOL(drm_agp_acquire); * Acquire the AGP device (ioctl). * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument. * \return zero on success or a negative number on failure. @@ -131,12 +130,10 @@ EXPORT_SYMBOL(drm_agp_acquire); * Verifies the AGP device hasn't been acquired before and calls * \c agp_backend_acquire. */ -int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp, +int drm_agp_acquire_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - - return drm_agp_acquire( (struct drm_device *) priv->head->dev ); + return drm_agp_acquire( (struct drm_device *) file_priv->head->dev ); } /** @@ -162,12 +159,11 @@ int drm_agp_release(struct drm_device *dev) } EXPORT_SYMBOL(drm_agp_release); -int drm_agp_release_ioctl(struct inode *inode, struct file *filp, +int drm_agp_release_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; - + struct drm_device *dev = file_priv->head->dev; + return drm_agp_release(dev); } @@ -198,11 +194,10 @@ int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode) } EXPORT_SYMBOL(drm_agp_enable); -int drm_agp_enable_ioctl(struct inode *inode, struct file *filp, +int drm_agp_enable_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_agp_mode mode; @@ -216,7 +211,7 @@ int drm_agp_enable_ioctl(struct inode *inode, struct file *filp, * Allocate AGP memory. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv file private pointer. * \param cmd command. * \param arg pointer to a drm_agp_buffer structure. * \return zero on success or a negative number on failure. @@ -259,11 +254,10 @@ int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request) EXPORT_SYMBOL(drm_agp_alloc); -int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp, +int drm_agp_alloc_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_agp_buffer request; struct drm_agp_buffer __user *argp = (void __user *)arg; int err; @@ -315,7 +309,7 @@ static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev, * Unbind AGP memory from the GATT (ioctl). * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_agp_binding structure. * \return zero on success or a negative number on failure. @@ -342,11 +336,10 @@ int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request) EXPORT_SYMBOL(drm_agp_unbind); -int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp, +int drm_agp_unbind_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_agp_binding request; if (copy_from_user @@ -361,7 +354,7 @@ int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp, * Bind AGP memory into the GATT (ioctl) * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_agp_binding structure. * \return zero on success or a negative number on failure. @@ -393,11 +386,10 @@ int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request) EXPORT_SYMBOL(drm_agp_bind); -int drm_agp_bind_ioctl(struct inode *inode, struct file *filp, +int drm_agp_bind_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_agp_binding request; if (copy_from_user @@ -412,7 +404,7 @@ int drm_agp_bind_ioctl(struct inode *inode, struct file *filp, * Free AGP memory (ioctl). * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_agp_buffer structure. * \return zero on success or a negative number on failure. @@ -443,11 +435,10 @@ EXPORT_SYMBOL(drm_agp_free); -int drm_agp_free_ioctl(struct inode *inode, struct file *filp, +int drm_agp_free_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_agp_buffer request; if (copy_from_user diff --git a/linux-core/drm_auth.c b/linux-core/drm_auth.c index 4c48d872..f10a57b1 100644 --- a/linux-core/drm_auth.c +++ b/linux-core/drm_auth.c @@ -127,27 +127,26 @@ static int drm_remove_magic(struct drm_device * dev, drm_magic_t magic) * Get a unique magic number (ioctl). * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a resulting drm_auth structure. * \return zero on success, or a negative number on failure. * * If there is a magic number in drm_file::magic then use it, otherwise * searches an unique non-zero magic number and add it associating it with \p - * filp. + * file_priv. */ -int drm_getmagic(struct inode *inode, struct file *filp, +int drm_getmagic(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { static drm_magic_t sequence = 0; static DEFINE_SPINLOCK(lock); - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_auth auth; /* Find unique magic */ - if (priv->magic) { - auth.magic = priv->magic; + if (file_priv->magic) { + auth.magic = file_priv->magic; } else { do { spin_lock(&lock); @@ -156,8 +155,8 @@ int drm_getmagic(struct inode *inode, struct file *filp, auth.magic = sequence++; spin_unlock(&lock); } while (drm_find_file(dev, auth.magic)); - priv->magic = auth.magic; - drm_add_magic(dev, priv, auth.magic); + file_priv->magic = auth.magic; + drm_add_magic(dev, file_priv, auth.magic); } DRM_DEBUG("%u\n", auth.magic); @@ -170,18 +169,17 @@ int drm_getmagic(struct inode *inode, struct file *filp, * Authenticate with a magic. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_auth structure. * \return zero if authentication successed, or a negative number otherwise. * - * Checks if \p filp is associated with the magic number passed in \arg. + * Checks if \p file_priv is associated with the magic number passed in \arg. */ -int drm_authmagic(struct inode *inode, struct file *filp, +int drm_authmagic(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_auth auth; struct drm_file *file; diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 374be04e..671c6232 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -505,7 +505,8 @@ void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo) } } -static void drm_bo_base_deref_locked(struct drm_file * priv, struct drm_user_object * uo) +static void drm_bo_base_deref_locked(struct drm_file * file_priv, + struct drm_user_object * uo) { struct drm_buffer_object *bo = drm_user_object_entry(uo, struct drm_buffer_object, base); @@ -535,13 +536,13 @@ static void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo) * and deregister fence object usage. */ -int drm_fence_buffer_objects(struct drm_file * priv, +int drm_fence_buffer_objects(struct drm_file * file_priv, struct list_head *list, uint32_t fence_flags, struct drm_fence_object * fence, struct drm_fence_object ** used_fence) { - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_buffer_manager *bm = &dev->bm; struct drm_buffer_object *entry; @@ -921,21 +922,21 @@ static int drm_bo_new_mask(struct drm_buffer_object * bo, * Call dev->struct_mutex locked. */ -struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file * priv, +struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv, uint32_t handle, int check_owner) { struct drm_user_object *uo; struct drm_buffer_object *bo; - uo = drm_lookup_user_object(priv, handle); + uo = drm_lookup_user_object(file_priv, handle); if (!uo || (uo->type != drm_buffer_type)) { DRM_ERROR("Could not find buffer object 0x%08x\n", handle); return NULL; } - if (check_owner && priv != uo->owner) { - if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE)) + if (check_owner && file_priv != uo->owner) { + if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE)) return NULL; } @@ -1102,17 +1103,17 @@ static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo, * unregistered. */ -static int drm_buffer_object_map(struct drm_file * priv, uint32_t handle, +static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle, uint32_t map_flags, unsigned hint, struct drm_bo_info_rep *rep) { struct drm_buffer_object *bo; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; int ret = 0; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; mutex_lock(&dev->struct_mutex); - bo = drm_lookup_buffer_object(priv, handle, 1); + bo = drm_lookup_buffer_object(file_priv, handle, 1); mutex_unlock(&dev->struct_mutex); if (!bo) @@ -1169,7 +1170,7 @@ static int drm_buffer_object_map(struct drm_file * priv, uint32_t handle, } mutex_lock(&dev->struct_mutex); - ret = drm_add_ref_object(priv, &bo->base, _DRM_REF_TYPE1); + ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1); mutex_unlock(&dev->struct_mutex); if (ret) { if (atomic_add_negative(-1, &bo->mapped)) @@ -1183,28 +1184,28 @@ static int drm_buffer_object_map(struct drm_file * priv, uint32_t handle, return ret; } -static int drm_buffer_object_unmap(struct drm_file * priv, uint32_t handle) +static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle) { - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_buffer_object *bo; struct drm_ref_object *ro; int ret = 0; mutex_lock(&dev->struct_mutex); - bo = drm_lookup_buffer_object(priv, handle, 1); + bo = drm_lookup_buffer_object(file_priv, handle, 1); if (!bo) { ret = -EINVAL; goto out; } - ro = drm_lookup_ref_object(priv, &bo->base, _DRM_REF_TYPE1); + ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1); if (!ro) { ret = -EINVAL; goto out; } - drm_remove_ref_object(priv, ro); + drm_remove_ref_object(file_priv, ro); drm_bo_usage_deref_locked(&bo); out: mutex_unlock(&dev->struct_mutex); @@ -1215,7 +1216,7 @@ static int drm_buffer_object_unmap(struct drm_file * priv, uint32_t handle) * Call struct-sem locked. */ -static void drm_buffer_user_object_unmap(struct drm_file * priv, +static void drm_buffer_user_object_unmap(struct drm_file *file_priv, struct drm_user_object * uo, enum drm_ref_type action) { @@ -1489,19 +1490,19 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo, return 0; } -static int drm_bo_handle_validate(struct drm_file * priv, +static int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle, uint32_t fence_class, uint64_t flags, uint64_t mask, uint32_t hint, struct drm_bo_info_rep *rep) { - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_buffer_object *bo; int ret; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; mutex_lock(&dev->struct_mutex); - bo = drm_lookup_buffer_object(priv, handle, 1); + bo = drm_lookup_buffer_object(file_priv, handle, 1); mutex_unlock(&dev->struct_mutex); if (!bo) { return -EINVAL; @@ -1532,14 +1533,14 @@ static int drm_bo_handle_validate(struct drm_file * priv, return ret; } -static int drm_bo_handle_info(struct drm_file *priv, uint32_t handle, +static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle, struct drm_bo_info_rep *rep) { - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_buffer_object *bo; mutex_lock(&dev->struct_mutex); - bo = drm_lookup_buffer_object(priv, handle, 1); + bo = drm_lookup_buffer_object(file_priv, handle, 1); mutex_unlock(&dev->struct_mutex); if (!bo) { @@ -1554,17 +1555,17 @@ static int drm_bo_handle_info(struct drm_file *priv, uint32_t handle, return 0; } -static int drm_bo_handle_wait(struct drm_file *priv, uint32_t handle, +static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle, uint32_t hint, struct drm_bo_info_rep *rep) { - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_buffer_object *bo; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; int ret; mutex_lock(&dev->struct_mutex); - bo = drm_lookup_buffer_object(priv, handle, 1); + bo = drm_lookup_buffer_object(file_priv, handle, 1); mutex_unlock(&dev->struct_mutex); if (!bo) { @@ -1672,14 +1673,15 @@ int drm_buffer_object_create(struct drm_device *dev, return ret; } -static int drm_bo_add_user_object(struct drm_file * priv, struct drm_buffer_object * bo, +static int drm_bo_add_user_object(struct drm_file *file_priv, + struct drm_buffer_object *bo, int shareable) { - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; int ret; mutex_lock(&dev->struct_mutex); - ret = drm_add_user_object(priv, &bo->base, shareable); + ret = drm_add_user_object(file_priv, &bo->base, shareable); if (ret) goto out; @@ -1693,9 +1695,9 @@ static int drm_bo_add_user_object(struct drm_file * priv, struct drm_buffer_obje return ret; } -static int drm_bo_lock_test(struct drm_device * dev, struct file *filp) +static int drm_bo_lock_test(struct drm_device * dev, struct drm_file *file_priv) { - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); return 0; } @@ -1724,10 +1726,10 @@ int drm_bo_op_ioctl(DRM_IOCTL_ARGS) ret = 0; switch (req->op) { case drm_bo_validate: - ret = drm_bo_lock_test(dev, filp); + ret = drm_bo_lock_test(dev, file_priv); if (ret) break; - ret = drm_bo_handle_validate(priv, req->bo_req.handle, + ret = drm_bo_handle_validate(file_priv, req->bo_req.handle, req->bo_req.fence_class, req->bo_req.flags, req->bo_req.mask, @@ -1779,18 +1781,18 @@ int drm_bo_create_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - ret = drm_bo_lock_test(dev, filp); + ret = drm_bo_lock_test(dev, file_priv); if (ret) goto out; - ret = drm_buffer_object_create(priv->head->dev, + ret = drm_buffer_object_create(file_priv->head->dev, req->size, req->type, req->mask, req->hint, req->page_alignment, req->buffer_start, &entry); if (ret) goto out; - ret = drm_bo_add_user_object(priv, entry, + ret = drm_bo_add_user_object(file_priv, entry, req->mask & DRM_BO_FLAG_SHAREABLE); if (ret) { drm_bo_usage_deref_unlocked(&entry); @@ -1822,12 +1824,12 @@ int drm_bo_destroy_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); mutex_lock(&dev->struct_mutex); - uo = drm_lookup_user_object(priv, arg.handle); - if (!uo || (uo->type != drm_buffer_type) || uo->owner != priv) { + uo = drm_lookup_user_object(file_priv, arg.handle); + if (!uo || (uo->type != drm_buffer_type) || uo->owner != file_priv) { mutex_unlock(&dev->struct_mutex); return -EINVAL; } - ret = drm_remove_user_object(priv, uo); + ret = drm_remove_user_object(file_priv, uo); mutex_unlock(&dev->struct_mutex); return ret; @@ -1847,7 +1849,7 @@ int drm_bo_map_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - ret = drm_buffer_object_map(priv, req->handle, req->mask, + ret = drm_buffer_object_map(file_priv, req->handle, req->mask, req->hint, rep); if (ret) return ret; @@ -1868,7 +1870,7 @@ int drm_bo_unmap_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - ret = drm_buffer_object_unmap(priv, arg.handle); + ret = drm_buffer_object_unmap(file_priv, arg.handle); return ret; } @@ -1889,12 +1891,12 @@ int drm_bo_reference_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - ret = drm_user_object_ref(priv, req->handle, + ret = drm_user_object_ref(file_priv, req->handle, drm_buffer_type, &uo); if (ret) return ret; - ret = drm_bo_handle_info(priv, req->handle, rep); + ret = drm_bo_handle_info(file_priv, req->handle, rep); if (ret) return ret; @@ -1915,7 +1917,7 @@ int drm_bo_unreference_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - ret = drm_user_object_unref(priv, arg.handle, drm_buffer_type); + ret = drm_user_object_unref(file_priv, arg.handle, drm_buffer_type); return ret; } @@ -1934,7 +1936,7 @@ int drm_bo_info_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - ret = drm_bo_handle_info(priv, req->handle, rep); + ret = drm_bo_handle_info(file_priv, req->handle, rep); if (ret) return ret; DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); @@ -1955,7 +1957,7 @@ int drm_bo_wait_idle_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - ret = drm_bo_handle_wait(priv, req->handle, + ret = drm_bo_handle_wait(file_priv, req->handle, req->hint, rep); if (ret) return ret; @@ -2407,7 +2409,7 @@ int drm_mm_takedown_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); mutex_lock(&dev->bm.init_mutex); mutex_lock(&dev->struct_mutex); ret = -EINVAL; @@ -2448,7 +2450,7 @@ int drm_mm_lock_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); mutex_lock(&dev->bm.init_mutex); mutex_lock(&dev->struct_mutex); ret = drm_bo_lock_mm(dev, arg.mem_type); @@ -2474,7 +2476,7 @@ int drm_mm_unlock_ioctl(DRM_IOCTL_ARGS) } DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); mutex_lock(&dev->bm.init_mutex); mutex_lock(&dev->struct_mutex); ret = 0; diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c index c1e23b5c..a571b817 100644 --- a/linux-core/drm_bufs.c +++ b/linux-core/drm_bufs.c @@ -92,7 +92,7 @@ static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash, * Ioctl to specify a range of memory that is available for mapping by a non-root process. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_map structure. * \return zero on success or a negative value on error. @@ -326,19 +326,15 @@ int drm_addmap(struct drm_device *dev, unsigned int offset, EXPORT_SYMBOL(drm_addmap); -int drm_addmap_ioctl(struct inode *inode, struct file *filp, +int drm_addmap_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_map map; struct drm_map_list *maplist; struct drm_map __user *argp = (void __user *)arg; int err; - if (!(filp->f_mode & 3)) - return -EACCES; /* Require read/write */ - if (copy_from_user(&map, argp, sizeof(map))) { return -EFAULT; } @@ -366,7 +362,7 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp, * isn't in use. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a struct drm_map structure. * \return zero on success or a negative value on error. @@ -455,11 +451,10 @@ EXPORT_SYMBOL(drm_rmmap); * gets used by drivers that the server doesn't need to care about. This seems * unlikely. */ -int drm_rmmap_ioctl(struct inode *inode, struct file *filp, +int drm_rmmap_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_map request; drm_local_map_t *map = NULL; struct drm_map_list *r_list; @@ -667,7 +662,7 @@ int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request) buf->waiting = 0; buf->pending = 0; init_waitqueue_head(&buf->dma_wait); - buf->filp = NULL; + buf->file_priv = NULL; buf->dev_priv_size = dev->driver->dev_priv_size; buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); @@ -878,7 +873,7 @@ int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request) buf->waiting = 0; buf->pending = 0; init_waitqueue_head(&buf->dma_wait); - buf->filp = NULL; + buf->file_priv = NULL; buf->dev_priv_size = dev->driver->dev_priv_size; buf->dev_private = drm_alloc(buf->dev_priv_size, @@ -1056,7 +1051,7 @@ static int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc * request) buf->waiting = 0; buf->pending = 0; init_waitqueue_head(&buf->dma_wait); - buf->filp = NULL; + buf->file_priv = NULL; buf->dev_priv_size = dev->driver->dev_priv_size; buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); @@ -1217,7 +1212,7 @@ int drm_addbufs_fb(struct drm_device *dev, struct drm_buf_desc *request) buf->waiting = 0; buf->pending = 0; init_waitqueue_head(&buf->dma_wait); - buf->filp = NULL; + buf->file_priv = NULL; buf->dev_priv_size = dev->driver->dev_priv_size; buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); @@ -1282,7 +1277,7 @@ EXPORT_SYMBOL(drm_addbufs_fb); * Add buffers for DMA transfers (ioctl). * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a struct drm_buf_desc request. * \return zero on success or a negative number on failure. @@ -1292,12 +1287,11 @@ EXPORT_SYMBOL(drm_addbufs_fb); * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent * PCI memory respectively. */ -int drm_addbufs(struct inode *inode, struct file *filp, +int drm_addbufs(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { struct drm_buf_desc request; - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; int ret; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) @@ -1336,7 +1330,7 @@ int drm_addbufs(struct inode *inode, struct file *filp, * large buffers can be used for image transfer). * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_buf_info structure. * \return zero on success or a negative number on failure. @@ -1345,11 +1339,10 @@ int drm_addbufs(struct inode *inode, struct file *filp, * lock, preventing of allocating more buffers after this call. Information * about each requested buffer is then copied into user space. */ -int drm_infobufs(struct inode *inode, struct file *filp, +int drm_infobufs(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_device_dma *dma = dev->dma; struct drm_buf_info request; struct drm_buf_info __user *argp = (void __user *)arg; @@ -1423,7 +1416,7 @@ int drm_infobufs(struct inode *inode, struct file *filp, * Specifies a low and high water mark for buffer allocation * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg a pointer to a drm_buf_desc structure. * \return zero on success or a negative number on failure. @@ -1433,11 +1426,10 @@ int drm_infobufs(struct inode *inode, struct file *filp, * * \note This ioctl is deprecated and mostly never used. */ -int drm_markbufs(struct inode *inode, struct file *filp, +int drm_markbufs(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_device_dma *dma = dev->dma; struct drm_buf_desc request; int order; @@ -1475,7 +1467,7 @@ int drm_markbufs(struct inode *inode, struct file *filp, * Unreserve the buffers in list, previously reserved using drmDMA. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_buf_free structure. * \return zero on success or a negative number on failure. @@ -1483,11 +1475,10 @@ int drm_markbufs(struct inode *inode, struct file *filp, * Calls free_buffer() for each used buffer. * This function is primarily used for debugging. */ -int drm_freebufs(struct inode *inode, struct file *filp, +int drm_freebufs(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_device_dma *dma = dev->dma; struct drm_buf_free request; int i; @@ -1514,7 +1505,7 @@ int drm_freebufs(struct inode *inode, struct file *filp, return -EINVAL; } buf = dma->buflist[idx]; - if (buf->filp != filp) { + if (buf->file_priv != file_priv) { DRM_ERROR("Process %d freeing buffer not owned\n", current->pid); return -EINVAL; @@ -1529,7 +1520,7 @@ int drm_freebufs(struct inode *inode, struct file *filp, * Maps all of the DMA buffers into client-virtual space (ioctl). * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_buf_map structure. * \return zero on success or a negative number on failure. @@ -1539,11 +1530,10 @@ int drm_freebufs(struct inode *inode, struct file *filp, * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls * drm_mmap_dma(). */ -int drm_mapbufs(struct inode *inode, struct file *filp, +int drm_mapbufs(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_device_dma *dma = dev->dma; struct drm_buf_map __user *argp = (void __user *)arg; int retcode = 0; @@ -1584,14 +1574,14 @@ int drm_mapbufs(struct inode *inode, struct file *filp, goto done; } down_write(¤t->mm->mmap_sem); - virtual = do_mmap(filp, 0, map->size, + virtual = do_mmap(file_priv->filp, 0, map->size, PROT_READ | PROT_WRITE, MAP_SHARED, token); up_write(¤t->mm->mmap_sem); } else { down_write(¤t->mm->mmap_sem); - virtual = do_mmap(filp, 0, dma->byte_count, + virtual = do_mmap(file_priv->filp, 0, dma->byte_count, PROT_READ | PROT_WRITE, MAP_SHARED, 0); up_write(¤t->mm->mmap_sem); diff --git a/linux-core/drm_context.c b/linux-core/drm_context.c index a0b1a7ec..76e13f65 100644 --- a/linux-core/drm_context.c +++ b/linux-core/drm_context.c @@ -132,7 +132,7 @@ void drm_ctxbitmap_cleanup(struct drm_device *dev) * Get per-context SAREA. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument pointing to a drm_ctx_priv_map structure. * \return zero on success or a negative number on failure. @@ -140,11 +140,10 @@ void drm_ctxbitmap_cleanup(struct drm_device *dev) * Gets the map from drm_device::ctx_idr with the handle specified and * returns its handle. */ -int drm_getsareactx(struct inode *inode, struct file *filp, +int drm_getsareactx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_ctx_priv_map __user *argp = (void __user *)arg; struct drm_ctx_priv_map request; struct drm_map *map; @@ -183,7 +182,7 @@ int drm_getsareactx(struct inode *inode, struct file *filp, * Set per-context SAREA. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument pointing to a drm_ctx_priv_map structure. * \return zero on success or a negative number on failure. @@ -191,11 +190,10 @@ int drm_getsareactx(struct inode *inode, struct file *filp, * Searches the mapping specified in \p arg and update the entry in * drm_device::ctx_idr with it. */ -int drm_setsareactx(struct inode *inode, struct file *filp, +int drm_setsareactx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_ctx_priv_map request; struct drm_map *map = NULL; struct drm_map_list *r_list = NULL; @@ -293,12 +291,12 @@ static int drm_context_switch_complete(struct drm_device *dev, int new) * Reserve contexts. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument pointing to a drm_ctx_res structure. * \return zero on success or a negative number on failure. */ -int drm_resctx(struct inode *inode, struct file *filp, +int drm_resctx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { struct drm_ctx_res res; @@ -328,18 +326,17 @@ int drm_resctx(struct inode *inode, struct file *filp, * Add context. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument pointing to a drm_ctx structure. * \return zero on success or a negative number on failure. * * Get a new handle for the context and copy to userspace. */ -int drm_addctx(struct inode *inode, struct file *filp, +int drm_addctx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_ctx_list *ctx_entry; struct drm_ctx __user *argp = (void __user *)arg; struct drm_ctx ctx; @@ -375,7 +372,7 @@ int drm_addctx(struct inode *inode, struct file *filp, INIT_LIST_HEAD(&ctx_entry->head); ctx_entry->handle = ctx.handle; - ctx_entry->tag = priv; + ctx_entry->tag = file_priv; mutex_lock(&dev->ctxlist_mutex); list_add(&ctx_entry->head, &dev->ctxlist); @@ -387,7 +384,7 @@ int drm_addctx(struct inode *inode, struct file *filp, return 0; } -int drm_modctx(struct inode *inode, struct file *filp, +int drm_modctx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { /* This does nothing */ @@ -398,12 +395,12 @@ int drm_modctx(struct inode *inode, struct file *filp, * Get context. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument pointing to a drm_ctx structure. * \return zero on success or a negative number on failure. */ -int drm_getctx(struct inode *inode, struct file *filp, +int drm_getctx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { struct drm_ctx __user *argp = (void __user *)arg; @@ -424,18 +421,17 @@ int drm_getctx(struct inode *inode, struct file *filp, * Switch context. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument pointing to a drm_ctx structure. * \return zero on success or a negative number on failure. * * Calls context_switch(). */ -int drm_switchctx(struct inode *inode, struct file *filp, +int drm_switchctx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_ctx ctx; if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) @@ -449,18 +445,17 @@ int drm_switchctx(struct inode *inode, struct file *filp, * New context. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument pointing to a drm_ctx structure. * \return zero on success or a negative number on failure. * * Calls context_switch_complete(). */ -int drm_newctx(struct inode *inode, struct file *filp, +int drm_newctx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_ctx ctx; if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) @@ -476,18 +471,17 @@ int drm_newctx(struct inode *inode, struct file *filp, * Remove context. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument pointing to a drm_ctx structure. * \return zero on success or a negative number on failure. * * If not the special kernel context, calls ctxbitmap_free() to free the specified context. */ -int drm_rmctx(struct inode *inode, struct file *filp, +int drm_rmctx(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_ctx ctx; if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) @@ -495,7 +489,7 @@ int drm_rmctx(struct inode *inode, struct file *filp, DRM_DEBUG("%d\n", ctx.handle); if (ctx.handle == DRM_KERNEL_CONTEXT + 1) { - priv->remove_auth_on_close = 1; + file_priv->remove_auth_on_close = 1; } if (ctx.handle != DRM_KERNEL_CONTEXT) { if (dev->driver->context_dtor) diff --git a/linux-core/drm_dma.c b/linux-core/drm_dma.c index d2a88d52..7cc44193 100644 --- a/linux-core/drm_dma.c +++ b/linux-core/drm_dma.c @@ -136,7 +136,7 @@ void drm_free_buffer(struct drm_device * dev, struct drm_buf * buf) buf->waiting = 0; buf->pending = 0; - buf->filp = NULL; + buf->file_priv = NULL; buf->used = 0; if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) @@ -148,11 +148,12 @@ void drm_free_buffer(struct drm_device * dev, struct drm_buf * buf) /** * Reclaim the buffers. * - * \param filp file pointer. + * \param file_priv DRM file private. * - * Frees each buffer associated with \p filp not already on the hardware. + * Frees each buffer associated with \p file_priv not already on the hardware. */ -void drm_core_reclaim_buffers(struct drm_device *dev, struct file *filp) +void drm_core_reclaim_buffers(struct drm_device *dev, + struct drm_file *file_priv) { struct drm_device_dma *dma = dev->dma; int i; @@ -160,7 +161,7 @@ void drm_core_reclaim_buffers(struct drm_device *dev, struct file *filp) if (!dma) return; for (i = 0; i < dma->buf_count; i++) { - if (dma->buflist[i]->filp == filp) { + if (dma->buflist[i]->file_priv == file_priv) { switch (dma->buflist[i]->list) { case DRM_LIST_NONE: drm_free_buffer(dev, dma->buflist[i]); diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 84efbfe7..92b07729 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -51,7 +51,7 @@ static void drm_cleanup(struct drm_device * dev); int drm_fb_loaded = 0; -static int drm_version(struct inode *inode, struct file *filp, +static int drm_version(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg); /** Ioctl table */ @@ -276,7 +276,7 @@ int drm_lastclose(struct drm_device * dev) if (dev->lock.hw_lock) { dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */ - dev->lock.filp = NULL; + dev->lock.file_priv = NULL; wake_up_interruptible(&dev->lock.lock_queue); } dev->dev_mapping = NULL; @@ -538,18 +538,17 @@ module_exit(drm_core_exit); * Get version information * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_version structure. * \return zero on success or negative number on failure. * * Fills in the version information in \p arg. */ -static int drm_version(struct inode *inode, struct file *filp, +static int drm_version(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_version __user *argp = (void __user *)arg; struct drm_version version; int len; @@ -573,7 +572,7 @@ static int drm_version(struct inode *inode, struct file *filp, * Called whenever a process performs an ioctl on /dev/drm. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument. * \return zero on success or negative number on failure. @@ -584,8 +583,8 @@ static int drm_version(struct inode *inode, struct file *filp, int drm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_file *file_priv = filp->private_data; + struct drm_device *dev = file_priv->head->dev; struct drm_ioctl_desc *ioctl; drm_ioctl_t *func; unsigned int nr = DRM_IOCTL_NR(cmd); @@ -593,11 +592,11 @@ int drm_ioctl(struct inode *inode, struct file *filp, atomic_inc(&dev->ioctl_count); atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); - ++priv->ioctl_count; + ++file_priv->ioctl_count; DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n", - current->pid, cmd, nr, (long)old_encode_dev(priv->head->device), - priv->authenticated); + current->pid, cmd, nr, (long)old_encode_dev(file_priv->head->device), + file_priv->authenticated); if ((nr >= DRM_CORE_IOCTL_COUNT) && ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) @@ -619,11 +618,11 @@ int drm_ioctl(struct inode *inode, struct file *filp, DRM_DEBUG("no function\n"); retcode = -EINVAL; } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) || - ((ioctl->flags & DRM_AUTH) && !priv->authenticated) || - ((ioctl->flags & DRM_MASTER) && !priv->master)) { + ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) || + ((ioctl->flags & DRM_MASTER) && !file_priv->master)) { retcode = -EACCES; } else { - retcode = func(inode, filp, cmd, arg); + retcode = func(inode, file_priv, cmd, arg); } err_i1: atomic_dec(&dev->ioctl_count); diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index 9b2fa405..3a3035e1 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -582,12 +582,12 @@ int drm_fence_create_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); if (arg.flags & DRM_FENCE_FLAG_EMIT) - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); ret = drm_fence_object_create(dev, arg.class, arg.type, arg.flags, &fence); if (ret) return ret; - ret = drm_fence_add_user_object(priv, fence, + ret = drm_fence_add_user_object(file_priv, fence, arg.flags & DRM_FENCE_FLAG_SHAREABLE); if (ret) { @@ -630,12 +630,12 @@ int drm_fence_destroy_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); mutex_lock(&dev->struct_mutex); - uo = drm_lookup_user_object(priv, arg.handle); - if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) { + uo = drm_lookup_user_object(file_priv, arg.handle); + if (!uo || (uo->type != drm_fence_type) || uo->owner != file_priv) { mutex_unlock(&dev->struct_mutex); return -EINVAL; } - ret = drm_remove_user_object(priv, uo); + ret = drm_remove_user_object(file_priv, uo); mutex_unlock(&dev->struct_mutex); return ret; } @@ -658,10 +658,10 @@ int drm_fence_reference_ioctl(DRM_IOCTL_ARGS) } DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - ret = drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo); + ret = drm_user_object_ref(file_priv, arg.handle, drm_fence_type, &uo); if (ret) return ret; - fence = drm_lookup_fence_object(priv, arg.handle); + fence = drm_lookup_fence_object(file_priv, arg.handle); read_lock_irqsave(&fm->lock, flags); arg.class = fence->class; @@ -689,7 +689,7 @@ int drm_fence_unreference_ioctl(DRM_IOCTL_ARGS) } DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - return drm_user_object_unref(priv, arg.handle, drm_fence_type); + return drm_user_object_unref(file_priv, arg.handle, drm_fence_type); } int drm_fence_signaled_ioctl(DRM_IOCTL_ARGS) @@ -709,7 +709,7 @@ int drm_fence_signaled_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - fence = drm_lookup_fence_object(priv, arg.handle); + fence = drm_lookup_fence_object(file_priv, arg.handle); if (!fence) return -EINVAL; @@ -741,7 +741,7 @@ int drm_fence_flush_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - fence = drm_lookup_fence_object(priv, arg.handle); + fence = drm_lookup_fence_object(file_priv, arg.handle); if (!fence) return -EINVAL; ret = drm_fence_object_flush(fence, arg.type); @@ -775,7 +775,7 @@ int drm_fence_wait_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - fence = drm_lookup_fence_object(priv, arg.handle); + fence = drm_lookup_fence_object(file_priv, arg.handle); if (!fence) return -EINVAL; ret = drm_fence_object_wait(fence, @@ -811,8 +811,8 @@ int drm_fence_emit_ioctl(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - LOCK_TEST_WITH_RETURN(dev, filp); - fence = drm_lookup_fence_object(priv, arg.handle); + LOCK_TEST_WITH_RETURN(dev, file_priv); + fence = drm_lookup_fence_object(file_priv, arg.handle); if (!fence) return -EINVAL; ret = drm_fence_object_emit(fence, arg.flags, arg.class, @@ -850,12 +850,12 @@ int drm_fence_buffers_ioctl(DRM_IOCTL_ARGS) DRM_ERROR("Buffer object manager is not initialized\n"); return -EINVAL; } - LOCK_TEST_WITH_RETURN(dev, filp); - ret = drm_fence_buffer_objects(priv, NULL, arg.flags, + LOCK_TEST_WITH_RETURN(dev, file_priv); + ret = drm_fence_buffer_objects(file_priv, NULL, arg.flags, NULL, &fence); if (ret) return ret; - ret = drm_fence_add_user_object(priv, fence, + ret = drm_fence_add_user_object(file_priv, fence, arg.flags & DRM_FENCE_FLAG_SHAREABLE); if (ret) diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c index d542d4e3..0162f113 100644 --- a/linux-core/drm_fops.c +++ b/linux-core/drm_fops.c @@ -252,6 +252,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp, memset(priv, 0, sizeof(*priv)); filp->private_data = priv; + priv->filp = filp; priv->uid = current->euid; priv->pid = current->pid; priv->minor = minor; @@ -376,7 +377,7 @@ static void drm_object_release(struct file *filp) { * Release file. * * \param inode device inode - * \param filp file pointer. + * \param file_priv DRM file private. * \return zero on success or a negative number on failure. * * If the hardware lock is held then free it, and take it again for the kernel @@ -386,29 +387,28 @@ static void drm_object_release(struct file *filp) { */ int drm_release(struct inode *inode, struct file *filp) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev; + struct drm_file *file_priv = filp->private_data; + struct drm_device *dev = file_priv->head->dev; int retcode = 0; lock_kernel(); - dev = priv->head->dev; DRM_DEBUG("open_count = %d\n", dev->open_count); if (dev->driver->preclose) - dev->driver->preclose(dev, filp); + dev->driver->preclose(dev, file_priv); /* ======================================================== * Begin inline drm_release */ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", - current->pid, (long)old_encode_dev(priv->head->device), + current->pid, (long)old_encode_dev(dev), dev->open_count); if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) { - if (drm_i_have_hw_lock(filp)) { - dev->driver->reclaim_buffers_locked(dev, filp); + if (drm_i_have_hw_lock(file_priv)) { + dev->driver->reclaim_buffers_locked(dev, file_priv); } else { unsigned long _end=jiffies + 3*DRM_HZ; int locked = 0; @@ -434,7 +434,7 @@ int drm_release(struct inode *inode, struct file *filp) "\tI will go on reclaiming the buffers anyway.\n"); } - dev->driver->reclaim_buffers_locked(dev, filp); + dev->driver->reclaim_buffers_locked(dev, file_priv); drm_idlelock_release(&dev->lock); } } @@ -442,12 +442,12 @@ int drm_release(struct inode *inode, struct file *filp) if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) { drm_idlelock_take(&dev->lock); - dev->driver->reclaim_buffers_idlelocked(dev, filp); + dev->driver->reclaim_buffers_idlelocked(dev, file_priv); drm_idlelock_release(&dev->lock); } - if (drm_i_have_hw_lock(filp)) { + if (drm_i_have_hw_lock(file_priv)) { DRM_DEBUG("File %p released, freeing lock for context %d\n", filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); @@ -458,7 +458,7 @@ int drm_release(struct inode *inode, struct file *filp) if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && !dev->driver->reclaim_buffers_locked) { - dev->driver->reclaim_buffers(dev, filp); + dev->driver->reclaim_buffers(dev, file_priv); } drm_fasync(-1, filp, 0); @@ -469,7 +469,7 @@ int drm_release(struct inode *inode, struct file *filp) struct drm_ctx_list *pos, *n; list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { - if (pos->tag == priv && + if (pos->tag == file_priv && pos->handle != DRM_KERNEL_CONTEXT) { if (dev->driver->context_dtor) dev->driver->context_dtor(dev, @@ -487,18 +487,18 @@ int drm_release(struct inode *inode, struct file *filp) mutex_lock(&dev->struct_mutex); drm_object_release(filp); - if (priv->remove_auth_on_close == 1) { + if (file_priv->remove_auth_on_close == 1) { struct drm_file *temp; list_for_each_entry(temp, &dev->filelist, lhead) temp->authenticated = 0; } - list_del(&priv->lhead); + list_del(&file_priv->lhead); mutex_unlock(&dev->struct_mutex); if (dev->driver->postclose) - dev->driver->postclose(dev, priv); - drm_free(priv, sizeof(*priv), DRM_MEM_FILES); + dev->driver->postclose(dev, file_priv); + drm_free(file_priv, sizeof(*file_priv), DRM_MEM_FILES); /* ======================================================== * End inline drm_release diff --git a/linux-core/drm_ioc32.c b/linux-core/drm_ioc32.c index b1162785..558376de 100644 --- a/linux-core/drm_ioc32.c +++ b/linux-core/drm_ioc32.c @@ -1040,7 +1040,7 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = { * Called whenever a 32-bit process running under a 64-bit kernel * performs an ioctl on /dev/drm. * - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument. * \return zero on success or negative number on failure. diff --git a/linux-core/drm_ioctl.c b/linux-core/drm_ioctl.c index a2c3952c..6f0ef149 100644 --- a/linux-core/drm_ioctl.c +++ b/linux-core/drm_ioctl.c @@ -42,18 +42,17 @@ * Get the bus id. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_unique structure. * \return zero on success or a negative number on failure. * * Copies the bus id from drm_device::unique into user space. */ -int drm_getunique(struct inode *inode, struct file *filp, +int drm_getunique(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_unique __user *argp = (void __user *)arg; struct drm_unique u; @@ -73,7 +72,7 @@ int drm_getunique(struct inode *inode, struct file *filp, * Set the bus id. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_unique structure. * \return zero on success or a negative number on failure. @@ -83,11 +82,10 @@ int drm_getunique(struct inode *inode, struct file *filp, * in interface version 1.1 and will return EBUSY when setversion has requested * version 1.1 or greater. */ -int drm_setunique(struct inode *inode, struct file *filp, +int drm_setunique(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_unique u; int domain, bus, slot, func, ret; @@ -167,7 +165,7 @@ static int drm_set_busid(struct drm_device * dev) * Get a mapping information. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_map structure. * @@ -176,11 +174,10 @@ static int drm_set_busid(struct drm_device * dev) * Searches for the mapping with the specified offset and copies its information * into userspace */ -int drm_getmap(struct inode *inode, struct file *filp, +int drm_getmap(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_map __user *argp = (void __user *)arg; struct drm_map map; struct drm_map_list *r_list = NULL; @@ -228,7 +225,7 @@ int drm_getmap(struct inode *inode, struct file *filp, * Get client information. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_client structure. * @@ -237,11 +234,10 @@ int drm_getmap(struct inode *inode, struct file *filp, * Searches for the client with the specified index and copies its information * into userspace */ -int drm_getclient(struct inode *inode, struct file *filp, +int drm_getclient(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_client __user *argp = (struct drm_client __user *)arg; struct drm_client client; struct drm_file *pt; @@ -280,17 +276,16 @@ int drm_getclient(struct inode *inode, struct file *filp, * Get statistics information. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_stats structure. * * \return zero on success or a negative number on failure. */ -int drm_getstats(struct inode *inode, struct file *filp, +int drm_getstats(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_stats stats; int i; @@ -320,7 +315,7 @@ int drm_getstats(struct inode *inode, struct file *filp, * Setversion ioctl. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_lock structure. * \return zero on success or negative number on failure. @@ -372,7 +367,7 @@ int drm_setversion(DRM_IOCTL_ARGS) } /** No-op ioctl. */ -int drm_noop(struct inode *inode, struct file *filp, unsigned int cmd, +int drm_noop(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { DRM_DEBUG("\n"); diff --git a/linux-core/drm_irq.c b/linux-core/drm_irq.c index 140ceca6..36df557b 100644 --- a/linux-core/drm_irq.c +++ b/linux-core/drm_irq.c @@ -41,7 +41,7 @@ * Get interrupt from bus id. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_irq_busid structure. * \return zero on success or a negative number on failure. @@ -50,11 +50,10 @@ * This IOCTL is deprecated, and will now return EINVAL for any busid not equal * to that of the device that this DRM instance attached to. */ -int drm_irq_by_busid(struct inode *inode, struct file *filp, +int drm_irq_by_busid(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_irq_busid __user *argp = (void __user *)arg; struct drm_irq_busid p; @@ -185,18 +184,17 @@ EXPORT_SYMBOL(drm_irq_uninstall); * IRQ control ioctl. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_control structure. * \return zero on success or a negative number on failure. * * Calls irq_install() or irq_uninstall() according to \p arg. */ -int drm_control(struct inode *inode, struct file *filp, +int drm_control(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_control ctl; /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */ @@ -225,7 +223,7 @@ int drm_control(struct inode *inode, struct file *filp, * Wait for VBLANK. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param data user argument, pointing to a drm_wait_vblank structure. * \return zero on success or a negative number on failure. @@ -242,8 +240,7 @@ int drm_control(struct inode *inode, struct file *filp, */ int drm_wait_vblank(DRM_IOCTL_ARGS) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; union drm_wait_vblank __user *argp = (void __user *)data; union drm_wait_vblank vblwait; struct timeval now; diff --git a/linux-core/drm_lock.c b/linux-core/drm_lock.c index f3685ce0..54e34e14 100644 --- a/linux-core/drm_lock.c +++ b/linux-core/drm_lock.c @@ -41,23 +41,22 @@ static int drm_notifier(void *priv); * Lock ioctl. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_lock structure. * \return zero on success or negative number on failure. * * Add the current task to the lock wait queue, and attempt to take to lock. */ -int drm_lock(struct inode *inode, struct file *filp, +int drm_lock(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; DECLARE_WAITQUEUE(entry, current); struct drm_lock lock; int ret = 0; - ++priv->lock_count; + ++file_priv->lock_count; if (copy_from_user(&lock, (struct drm_lock __user *) arg, sizeof(lock))) return -EFAULT; @@ -88,7 +87,7 @@ int drm_lock(struct inode *inode, struct file *filp, break; } if (drm_lock_take(&dev->lock, lock.context)) { - dev->lock.filp = filp; + dev->lock.file_priv = file_priv; dev->lock.lock_time = jiffies; atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); break; /* Got lock */ @@ -142,18 +141,17 @@ int drm_lock(struct inode *inode, struct file *filp, * Unlock ioctl. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_lock structure. * \return zero on success or negative number on failure. * * Transfer and free the lock. */ -int drm_unlock(struct inode *inode, struct file *filp, +int drm_unlock(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_lock lock; unsigned long irqflags; @@ -258,7 +256,7 @@ static int drm_lock_transfer(struct drm_lock_data *lock_data, unsigned int old, new, prev; volatile unsigned int *lock = &lock_data->hw_lock->lock; - lock_data->filp = NULL; + lock_data->file_priv = NULL; do { old = *lock; new = context | _DRM_LOCK_HELD; @@ -391,13 +389,13 @@ void drm_idlelock_release(struct drm_lock_data *lock_data) EXPORT_SYMBOL(drm_idlelock_release); -int drm_i_have_hw_lock(struct file *filp) +int drm_i_have_hw_lock(struct drm_file *file_priv) { DRM_DEVICE; - return (priv->lock_count && dev->lock.hw_lock && + return (file_priv->lock_count && dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && - dev->lock.filp == filp); + dev->lock.file_priv == file_priv); } EXPORT_SYMBOL(drm_i_have_hw_lock); diff --git a/linux-core/drm_os_linux.h b/linux-core/drm_os_linux.h index 3d2ad779..3f143833 100644 --- a/linux-core/drm_os_linux.h +++ b/linux-core/drm_os_linux.h @@ -6,10 +6,8 @@ #include /* For task queue support */ #include -/** File pointer type */ -#define DRMFILE struct file * /** Ioctl arguments */ -#define DRM_IOCTL_ARGS struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data +#define DRM_IOCTL_ARGS struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long data /** Current process ID */ #define DRM_CURRENTPID current->pid #define DRM_SUSER(p) capable(CAP_SYS_ADMIN) @@ -51,8 +49,7 @@ /** Read/write memory barrier */ #define DRM_MEMORYBARRIER() mb() /** DRM device local declaration */ -#define DRM_DEVICE struct drm_file *priv = filp->private_data; \ - struct drm_device *dev = priv->head->dev +#define DRM_DEVICE struct drm_device *dev = file_priv->head->dev /** IRQ handler arguments and return type and values */ #define DRM_IRQ_ARGS int irq, void *arg @@ -116,8 +113,6 @@ static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size) #define DRM_GET_USER_UNCHECKED(val, uaddr) \ __get_user(val, uaddr) -#define DRM_GET_PRIV_WITH_RETURN(_priv, _filp) _priv = _filp->private_data - #define DRM_HZ HZ #define DRM_WAIT_ON( ret, queue, timeout, condition ) \ diff --git a/linux-core/drm_scatter.c b/linux-core/drm_scatter.c index 7c13610d..58696347 100644 --- a/linux-core/drm_scatter.c +++ b/linux-core/drm_scatter.c @@ -187,10 +187,10 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request) } EXPORT_SYMBOL(drm_sg_alloc); -int drm_sg_alloc_ioctl(struct inode *inode, struct file *filp, +int drm_sg_alloc_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; + struct drm_device *dev = file_priv->head->dev; struct drm_scatter_gather __user *argp = (void __user *)arg; struct drm_scatter_gather request; int ret; @@ -198,11 +198,11 @@ int drm_sg_alloc_ioctl(struct inode *inode, struct file *filp, if (copy_from_user(&request, argp, sizeof(request))) return -EFAULT; - ret = drm_sg_alloc(priv->head->dev, &request); + ret = drm_sg_alloc(dev, &request); if ( ret ) return ret; if (copy_to_user(argp, &request, sizeof(request))) { - drm_sg_cleanup(priv->head->dev->sg); + drm_sg_cleanup(dev->sg); return -EFAULT; } @@ -211,11 +211,10 @@ int drm_sg_alloc_ioctl(struct inode *inode, struct file *filp, } -int drm_sg_free(struct inode *inode, struct file *filp, +int drm_sg_free(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_scatter_gather request; struct drm_sg_mem *entry; diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 265a59d8..c4e790ef 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -477,7 +477,7 @@ static void drm_vm_close(struct vm_area_struct *vma) /** * mmap DMA memory. * - * \param filp file pointer. + * \param file_priv DRM file private. * \param vma virtual memory area. * \return zero on success or a negative number on failure. * @@ -543,7 +543,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs); /** * mmap DMA memory. * - * \param filp file pointer. + * \param file_priv DRM file private. * \param vma virtual memory area. * \return zero on success or a negative number on failure. * @@ -865,7 +865,7 @@ static struct vm_operations_struct drm_bo_vm_ops = { * mmap buffer object memory. * * \param vma virtual memory area. - * \param filp file pointer. + * \param file_priv DRM file private. * \param map The buffer object drm map. * \return zero on success or a negative number on failure. */ diff --git a/linux-core/i810_dma.c b/linux-core/i810_dma.c index 4b43647e..1e74d792 100644 --- a/linux-core/i810_dma.c +++ b/linux-core/i810_dma.c @@ -139,10 +139,9 @@ static const struct file_operations i810_buffer_fops = { .fasync = drm_fasync, }; -static int i810_map_buffer(struct drm_buf * buf, struct file *filp) +static int i810_map_buffer(struct drm_buf * buf, struct drm_file *file_priv) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; drm_i810_buf_priv_t *buf_priv = buf->dev_private; drm_i810_private_t *dev_priv = dev->dev_private; const struct file_operations *old_fops; @@ -152,14 +151,14 @@ static int i810_map_buffer(struct drm_buf * buf, struct file *filp) return -EINVAL; down_write(¤t->mm->mmap_sem); - old_fops = filp->f_op; - filp->f_op = &i810_buffer_fops; + old_fops = file_priv->filp->f_op; + file_priv->filp->f_op = &i810_buffer_fops; dev_priv->mmap_buffer = buf; - buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total, + buf_priv->virtual = (void *)do_mmap(file_priv->filp, 0, buf->total, PROT_READ | PROT_WRITE, MAP_SHARED, buf->bus_address); dev_priv->mmap_buffer = NULL; - filp->f_op = old_fops; + file_priv->filp->f_op = old_fops; if (IS_ERR(buf_priv->virtual)) { /* Real error */ DRM_ERROR("mmap error\n"); @@ -192,7 +191,7 @@ static int i810_unmap_buffer(struct drm_buf * buf) } static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d, - struct file *filp) + struct drm_file *file_priv) { struct drm_buf *buf; drm_i810_buf_priv_t *buf_priv; @@ -205,13 +204,13 @@ static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d, return retcode; } - retcode = i810_map_buffer(buf, filp); + retcode = i810_map_buffer(buf, file_priv); if (retcode) { i810_freelist_put(dev, buf); DRM_ERROR("mapbuf failed, retcode %d\n", retcode); return retcode; } - buf->filp = filp; + buf->file_priv = file_priv; buf_priv = buf->dev_private; d->granted = 1; d->request_idx = buf->idx; @@ -492,11 +491,10 @@ static int i810_dma_init_compat(drm_i810_init_t * init, unsigned long arg) return 0; } -static int i810_dma_init(struct inode *inode, struct file *filp, +static int i810_dma_init(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; drm_i810_private_t *dev_priv; drm_i810_init_t init; int retcode = 0; @@ -987,7 +985,8 @@ static int i810_flush_queue(struct drm_device * dev) } /* Must be called with the lock held */ -static void i810_reclaim_buffers(struct drm_device *dev, struct file *filp) +static void i810_reclaim_buffers(struct drm_device *dev, + struct drm_file *file_priv) { struct drm_device_dma *dma = dev->dma; int i; @@ -1005,7 +1004,7 @@ static void i810_reclaim_buffers(struct drm_device *dev, struct file *filp) struct drm_buf *buf = dma->buflist[i]; drm_i810_buf_priv_t *buf_priv = buf->dev_private; - if (buf->filp == filp && buf_priv) { + if (buf->file_priv == file_priv && buf_priv) { int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_FREE); @@ -1017,23 +1016,21 @@ static void i810_reclaim_buffers(struct drm_device *dev, struct file *filp) } } -static int i810_flush_ioctl(struct inode *inode, struct file *filp, +static int i810_flush_ioctl(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); i810_flush_queue(dev); return 0; } -static int i810_dma_vertex(struct inode *inode, struct file *filp, +static int i810_dma_vertex(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_device_dma *dma = dev->dma; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; u32 *hw_status = dev_priv->hw_status_page; @@ -1045,7 +1042,7 @@ static int i810_dma_vertex(struct inode *inode, struct file *filp, (&vertex, (drm_i810_vertex_t __user *) arg, sizeof(vertex))) return -EFAULT; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n", vertex.idx, vertex.used, vertex.discard); @@ -1065,18 +1062,17 @@ static int i810_dma_vertex(struct inode *inode, struct file *filp, return 0; } -static int i810_clear_bufs(struct inode *inode, struct file *filp, +static int i810_clear_bufs(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; drm_i810_clear_t clear; if (copy_from_user (&clear, (drm_i810_clear_t __user *) arg, sizeof(clear))) return -EFAULT; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); /* GH: Someone's doing nasty things... */ if (!dev->dev_private) { @@ -1088,25 +1084,24 @@ static int i810_clear_bufs(struct inode *inode, struct file *filp, return 0; } -static int i810_swap_bufs(struct inode *inode, struct file *filp, +static int i810_swap_bufs(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; DRM_DEBUG("i810_swap_bufs\n"); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); i810_dma_dispatch_swap(dev); return 0; } -static int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd, +static int i810_getage(struct inode *inode, struct drm_file *file_priv, + unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; u32 *hw_status = dev_priv->hw_status_page; drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) @@ -1116,11 +1111,10 @@ static int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd, return 0; } -static int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd, - unsigned long arg) +static int i810_getbuf(struct inode *inode, struct drm_file *file_priv, + unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; int retcode = 0; drm_i810_dma_t d; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; @@ -1131,11 +1125,11 @@ static int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd, if (copy_from_user(&d, (drm_i810_dma_t __user *) arg, sizeof(d))) return -EFAULT; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); d.granted = 0; - retcode = i810_dma_get_buffer(dev, &d, filp); + retcode = i810_dma_get_buffer(dev, &d, file_priv); DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n", current->pid, retcode, d.granted); @@ -1147,15 +1141,15 @@ static int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd, return retcode; } -static int i810_copybuf(struct inode *inode, - struct file *filp, unsigned int cmd, unsigned long arg) +static int i810_copybuf(struct inode *inode, struct drm_file *file_priv, + unsigned int cmd, unsigned long arg) { /* Never copy - 2.4.x doesn't need it */ return 0; } -static int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd, - unsigned long arg) +static int i810_docopy(struct inode *inode, struct drm_file *file_priv, + unsigned int cmd, unsigned long arg) { /* Never copy - 2.4.x doesn't need it */ return 0; @@ -1221,11 +1215,10 @@ static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf, ADVANCE_LP_RING(); } -static int i810_dma_mc(struct inode *inode, struct file *filp, +static int i810_dma_mc(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; struct drm_device_dma *dma = dev->dma; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; u32 *hw_status = dev_priv->hw_status_page; @@ -1236,7 +1229,7 @@ static int i810_dma_mc(struct inode *inode, struct file *filp, if (copy_from_user(&mc, (drm_i810_mc_t __user *) arg, sizeof(mc))) return -EFAULT; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); if (mc.idx >= dma->buf_count || mc.idx < 0) return -EINVAL; @@ -1252,21 +1245,19 @@ static int i810_dma_mc(struct inode *inode, struct file *filp, return 0; } -static int i810_rstatus(struct inode *inode, struct file *filp, +static int i810_rstatus(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; return (int)(((u32 *) (dev_priv->hw_status_page))[4]); } -static int i810_ov0_info(struct inode *inode, struct file *filp, +static int i810_ov0_info(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; drm_i810_overlay_t data; @@ -1278,25 +1269,23 @@ static int i810_ov0_info(struct inode *inode, struct file *filp, return 0; } -static int i810_fstatus(struct inode *inode, struct file *filp, +static int i810_fstatus(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); return I810_READ(0x30008); } -static int i810_ov0_flip(struct inode *inode, struct file *filp, +static int i810_ov0_flip(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); //Tell the overlay to update I810_WRITE(0x30000, dev_priv->overlay_physical | 0x80000000); @@ -1327,16 +1316,15 @@ static int i810_do_cleanup_pageflip(struct drm_device * dev) return 0; } -static int i810_flip_bufs(struct inode *inode, struct file *filp, +static int i810_flip_bufs(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long arg) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; drm_i810_private_t *dev_priv = dev->dev_private; DRM_DEBUG("%s\n", __FUNCTION__); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); if (!dev_priv->page_flipping) i810_do_init_pageflip(dev); @@ -1362,7 +1350,7 @@ void i810_driver_lastclose(struct drm_device * dev) i810_dma_cleanup(dev); } -void i810_driver_preclose(struct drm_device * dev, DRMFILE filp) +void i810_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) { if (dev->dev_private) { drm_i810_private_t *dev_priv = dev->dev_private; @@ -1372,9 +1360,10 @@ void i810_driver_preclose(struct drm_device * dev, DRMFILE filp) } } -void i810_driver_reclaim_buffers_locked(struct drm_device * dev, struct file *filp) +void i810_driver_reclaim_buffers_locked(struct drm_device * dev, + struct drm_file *file_priv) { - i810_reclaim_buffers(dev, filp); + i810_reclaim_buffers(dev, file_priv); } int i810_driver_dma_quiescent(struct drm_device * dev) diff --git a/linux-core/i810_drv.h b/linux-core/i810_drv.h index 3627d774..c525e165 100644 --- a/linux-core/i810_drv.h +++ b/linux-core/i810_drv.h @@ -117,12 +117,13 @@ typedef struct drm_i810_private { /* i810_dma.c */ extern int i810_driver_dma_quiescent(struct drm_device * dev); extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev, - struct file *filp); + struct drm_file *file_priv); extern int i810_driver_load(struct drm_device *, unsigned long flags); extern void i810_driver_lastclose(struct drm_device * dev); -extern void i810_driver_preclose(struct drm_device * dev, DRMFILE filp); +extern void i810_driver_preclose(struct drm_device * dev, + struct drm_file *file_priv); extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev, - struct file *filp); + struct drm_file *file_priv); extern int i810_driver_device_is_agp(struct drm_device * dev); extern struct drm_ioctl_desc i810_ioctls[]; diff --git a/linux-core/sis_mm.c b/linux-core/sis_mm.c index f9c7a7e2..0e9ed65d 100644 --- a/linux-core/sis_mm.c +++ b/linux-core/sis_mm.c @@ -122,7 +122,7 @@ static int sis_fb_init(DRM_IOCTL_ARGS) return 0; } -static int sis_drm_alloc(struct drm_device * dev, struct drm_file * priv, +static int sis_drm_alloc(struct drm_device * dev, struct drm_file *file_priv, unsigned long data, int pool) { drm_sis_private_t *dev_priv = dev->dev_private; @@ -144,7 +144,7 @@ static int sis_drm_alloc(struct drm_device * dev, struct drm_file * priv, mem.size = (mem.size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT; item = drm_sman_alloc(&dev_priv->sman, pool, mem.size, 0, - (unsigned long)priv); + (unsigned long)file_priv); mutex_unlock(&dev->struct_mutex); if (item) { @@ -190,7 +190,7 @@ static int sis_drm_free(DRM_IOCTL_ARGS) static int sis_fb_alloc(DRM_IOCTL_ARGS) { DRM_DEVICE; - return sis_drm_alloc(dev, priv, data, VIDEO_TYPE); + return sis_drm_alloc(dev, file_priv, data, VIDEO_TYPE); } static int sis_ioctl_agp_init(DRM_IOCTL_ARGS) @@ -225,7 +225,7 @@ static int sis_ioctl_agp_alloc(DRM_IOCTL_ARGS) { DRM_DEVICE; - return sis_drm_alloc(dev, priv, data, AGP_TYPE); + return sis_drm_alloc(dev, file_priv, data, AGP_TYPE); } static drm_local_map_t *sis_reg_init(struct drm_device *dev) @@ -314,13 +314,13 @@ void sis_lastclose(struct drm_device *dev) mutex_unlock(&dev->struct_mutex); } -void sis_reclaim_buffers_locked(struct drm_device * dev, struct file *filp) +void sis_reclaim_buffers_locked(struct drm_device * dev, + struct drm_file *file_priv) { drm_sis_private_t *dev_priv = dev->dev_private; - struct drm_file *priv = filp->private_data; mutex_lock(&dev->struct_mutex); - if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)priv)) { + if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) { mutex_unlock(&dev->struct_mutex); return; } @@ -329,7 +329,7 @@ void sis_reclaim_buffers_locked(struct drm_device * dev, struct file *filp) dev->driver->dma_quiescent(dev); } - drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)priv); + drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv); mutex_unlock(&dev->struct_mutex); return; } diff --git a/linux-core/via_dmablit.c b/linux-core/via_dmablit.c index 5e73bd1a..10289a89 100644 --- a/linux-core/via_dmablit.c +++ b/linux-core/via_dmablit.c @@ -805,7 +805,7 @@ via_dma_blit_sync( DRM_IOCTL_ARGS ) err = via_dmablit_sync(dev, sync.sync_handle, sync.engine); - if (-EINTR) == err + if (-EINTR == err) err = -EAGAIN; return err; diff --git a/linux-core/via_mm.c b/linux-core/via_mm.c index 7cb8651d..411c3d52 100644 --- a/linux-core/via_mm.c +++ b/linux-core/via_mm.c @@ -151,7 +151,7 @@ int via_mem_alloc(DRM_IOCTL_ARGS) tmpSize = (mem.size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT; item = drm_sman_alloc(&dev_priv->sman, mem.type, tmpSize, 0, - (unsigned long)priv); + (unsigned long)file_priv); mutex_unlock(&dev->struct_mutex); if (item) { mem.offset = ((mem.type == VIA_MEM_VIDEO) ? @@ -190,13 +190,13 @@ int via_mem_free(DRM_IOCTL_ARGS) } -void via_reclaim_buffers_locked(struct drm_device * dev, struct file *filp) +void via_reclaim_buffers_locked(struct drm_device * dev, + struct drm_file *file_priv) { drm_via_private_t *dev_priv = dev->dev_private; - struct drm_file *priv = filp->private_data; mutex_lock(&dev->struct_mutex); - if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)priv)) { + if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) { mutex_unlock(&dev->struct_mutex); return; } @@ -205,7 +205,7 @@ void via_reclaim_buffers_locked(struct drm_device * dev, struct file *filp) dev->driver->dma_quiescent(dev); } - drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)priv); + drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv); mutex_unlock(&dev->struct_mutex); return; } -- cgit v1.2.3 From 5b38e134163cc375e91424c4688cc9328c6e9082 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Thu, 19 Jul 2007 17:11:11 -0700 Subject: Replace DRM_IOCTL_ARGS with (dev, data, file_priv) and remove DRM_DEVICE. The data is now in kernel space, copied in/out as appropriate according to the This results in DRM_COPY_{TO,FROM}_USER going away, and error paths to deal with those failures. This also means that XFree86 4.2.0 support for i810 DRM is lost. --- linux-core/drmP.h | 202 +++++++++++++++++----------------- linux-core/drm_agpsupport.c | 105 +++++------------- linux-core/drm_auth.c | 36 +++---- linux-core/drm_bo.c | 181 +++++++++++++------------------ linux-core/drm_bufs.c | 161 ++++++++++----------------- linux-core/drm_context.c | 149 +++++++++---------------- linux-core/drm_drawable.c | 61 +++++------ linux-core/drm_drv.c | 223 ++++++++++++++++++++------------------ linux-core/drm_fence.c | 151 +++++++++++--------------- linux-core/drm_fops.c | 4 +- linux-core/drm_ioctl.c | 171 +++++++++++++---------------- linux-core/drm_irq.c | 87 +++++++-------- linux-core/drm_lock.c | 56 +++++----- linux-core/drm_memory_debug.c | 2 +- linux-core/drm_memory_debug.h | 2 +- linux-core/drm_objects.h | 57 +++++----- linux-core/drm_os_linux.h | 12 --- linux-core/drm_scatter.c | 37 ++----- linux-core/ffb_context.c | 22 ++-- linux-core/ffb_drv.c | 8 +- linux-core/ffb_drv.h | 2 +- linux-core/i810_dma.c | 246 +++++++++++++----------------------------- linux-core/i810_drm.h | 23 ---- linux-core/sis_mm.c | 90 +++++++--------- linux-core/via_dmablit.c | 22 ++-- linux-core/via_mm.c | 67 +++++------- 26 files changed, 868 insertions(+), 1309 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drmP.h b/linux-core/drmP.h index f4367955..2b7e0a44 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -275,16 +275,19 @@ do { \ return -EFAULT; \ } +struct drm_device; +struct drm_file; + /** * Ioctl function type. * - * \param inode device inode. + * \param dev DRM device structure + * \param data pointer to kernel-space stored data, copied in and out according + * to ioctl description. * \param file_priv DRM file private pointer. - * \param cmd command. - * \param arg argument. */ -typedef int drm_ioctl_t(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +typedef int drm_ioctl_t(struct drm_device *dev, void *data, + struct drm_file *file_priv); typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, unsigned long arg); @@ -294,9 +297,16 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, #define DRM_ROOT_ONLY 0x4 struct drm_ioctl_desc { + unsigned int cmd; drm_ioctl_t *func; int flags; }; +/** + * Creates a driver or general drm_ioctl_desc array entry for the given + * ioctl, for use by drm_ioctl(). + */ +#define DRM_IOCTL_DEF(ioctl, func, flags) \ + [DRM_IOCTL_NR(ioctl)] = {ioctl, func, flags} struct drm_magic_entry { struct list_head head; @@ -602,7 +612,6 @@ struct ati_pcigart_info { * in this family */ -struct drm_device; struct drm_driver { int (*load) (struct drm_device *, unsigned long flags); int (*firstopen) (struct drm_device *); @@ -611,7 +620,7 @@ struct drm_driver { void (*postclose) (struct drm_device *, struct drm_file *); void (*lastclose) (struct drm_device *); int (*unload) (struct drm_device *); - int (*dma_ioctl) (DRM_IOCTL_ARGS); + int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); void (*dma_ready) (struct drm_device *); int (*dma_quiescent) (struct drm_device *); int (*context_ctor) (struct drm_device * dev, int context); @@ -944,71 +953,70 @@ extern void drm_init_memctl(size_t low_threshold, size_t unit_size); /* Misc. IOCTL support (drm_ioctl.h) */ -extern int drm_irq_by_busid(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_getunique(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_setunique(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_getmap(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_getclient(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_getstats(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_setversion(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_noop(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_irq_by_busid(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_getunique(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_setunique(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_getmap(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_getclient(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_getstats(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_setversion(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_noop(struct drm_device *dev, void *data, + struct drm_file *file_priv); /* Context IOCTL support (drm_context.h) */ -extern int drm_resctx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_addctx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_modctx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_getctx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_switchctx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_newctx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_rmctx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_resctx(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_addctx(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_modctx(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_getctx(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_switchctx(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_newctx(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_rmctx(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int drm_ctxbitmap_init(struct drm_device *dev); extern void drm_ctxbitmap_cleanup(struct drm_device *dev); extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle); -extern int drm_setsareactx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_getsareactx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_setsareactx(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_getsareactx(struct drm_device *dev, void *data, + struct drm_file *file_priv); /* Drawable IOCTL support (drm_drawable.h) */ -extern int drm_adddraw(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_rmdraw(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_update_drawable_info(struct inode *inode, - struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_adddraw(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_rmdraw(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_update_drawable_info(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id); extern void drm_drawable_free_all(struct drm_device *dev); /* Authentication IOCTL support (drm_auth.h) */ -extern int drm_getmagic(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_authmagic(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_getmagic(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_authmagic(struct drm_device *dev, void *data, + struct drm_file *file_priv); /* Locking IOCTL support (drm_lock.h) */ -extern int drm_lock(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_unlock(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_lock(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_unlock(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context); extern void drm_idlelock_take(struct drm_lock_data *lock_data); @@ -1019,7 +1027,8 @@ extern void drm_idlelock_release(struct drm_lock_data *lock_data); * DMA quiscent + idle. DMA quiescent usually requires the hardware lock. */ -extern int drm_i_have_hw_lock(struct drm_file *file_priv); +extern int drm_i_have_hw_lock(struct drm_device *dev, + struct drm_file *file_priv); /* Buffer management support (drm_bufs.h) */ extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request); @@ -1028,22 +1037,22 @@ extern int drm_addbufs_fb (struct drm_device *dev, struct drm_buf_desc * request extern int drm_addmap(struct drm_device *dev, unsigned int offset, unsigned int size, enum drm_map_type type, enum drm_map_flags flags, drm_local_map_t ** map_ptr); -extern int drm_addmap_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_addmap_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int drm_rmmap(struct drm_device *dev, drm_local_map_t *map); extern int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map); -extern int drm_rmmap_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_addbufs(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_infobufs(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_markbufs(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_freebufs(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); -extern int drm_mapbufs(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_rmmap_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_addbufs(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_infobufs(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_markbufs(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_freebufs(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_mapbufs(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int drm_order(unsigned long size); extern unsigned long drm_get_resource_start(struct drm_device *dev, unsigned int resource); @@ -1061,16 +1070,16 @@ extern void drm_core_reclaim_buffers(struct drm_device *dev, struct drm_file *filp); /* IRQ support (drm_irq.h) */ -extern int drm_control(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_control(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS); extern int drm_irq_uninstall(struct drm_device *dev); extern void drm_driver_irq_preinstall(struct drm_device *dev); extern void drm_driver_irq_postinstall(struct drm_device *dev); extern void drm_driver_irq_uninstall(struct drm_device *dev); -extern int drm_wait_vblank(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_wait_vblank(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); extern void drm_vbl_send_signals(struct drm_device *dev); extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*)); @@ -1078,32 +1087,29 @@ extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_de /* AGP/GART support (drm_agpsupport.h) */ extern struct drm_agp_head *drm_agp_init(struct drm_device *dev); extern int drm_agp_acquire(struct drm_device *dev); -extern int drm_agp_acquire_ioctl(struct inode *inode, - struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int drm_agp_release(struct drm_device *dev); -extern int drm_agp_release_ioctl(struct inode *inode, - struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_agp_release_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); -extern int drm_agp_enable_ioctl(struct inode *inode, - struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_agp_enable_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info); -extern int drm_agp_info_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_agp_info_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); -extern int drm_agp_alloc_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_agp_alloc_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); -extern int drm_agp_free_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_agp_free_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); -extern int drm_agp_unbind_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); -extern int drm_agp_bind_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) extern DRM_AGP_MEM *drm_agp_allocate_memory(size_t pages, u32 type); #else @@ -1137,11 +1143,11 @@ extern int drm_proc_cleanup(int minor, /* Scatter Gather Support (drm_scatter.h) */ extern void drm_sg_cleanup(struct drm_sg_mem * entry); -extern int drm_sg_alloc_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request); -extern int drm_sg_free(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +extern int drm_sg_free(struct drm_device *dev, void *data, + struct drm_file *file_priv); /* ATI PCIGART support (ati_pcigart.h) */ extern int drm_ati_pcigart_init(struct drm_device *dev, struct ati_pcigart_info *gart_info); diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c index ab7b8c90..df54360d 100644 --- a/linux-core/drm_agpsupport.c +++ b/linux-core/drm_agpsupport.c @@ -70,19 +70,16 @@ int drm_agp_info(struct drm_device * dev, struct drm_agp_info *info) } EXPORT_SYMBOL(drm_agp_info); -int drm_agp_info_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_agp_info_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = priv->head->dev; - struct drm_agp_info info; + struct drm_agp_info *info = data; int err; - err = drm_agp_info(dev, &info); + err = drm_agp_info(dev, info); if (err) return err; - - if (copy_to_user((struct drm_agp_info __user *) arg, &info, sizeof(info))) - return -EFAULT; + return 0; } @@ -130,8 +127,8 @@ EXPORT_SYMBOL(drm_agp_acquire); * Verifies the AGP device hasn't been acquired before and calls * \c agp_backend_acquire. */ -int drm_agp_acquire_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { return drm_agp_acquire( (struct drm_device *) file_priv->head->dev ); } @@ -159,11 +156,9 @@ int drm_agp_release(struct drm_device *dev) } EXPORT_SYMBOL(drm_agp_release); -int drm_agp_release_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_agp_release_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - return drm_agp_release(dev); } @@ -194,17 +189,12 @@ int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode) } EXPORT_SYMBOL(drm_agp_enable); -int drm_agp_enable_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_agp_enable_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_agp_mode mode; - - - if (copy_from_user(&mode, (struct drm_agp_mode __user *) arg, sizeof(mode))) - return -EFAULT; + struct drm_agp_mode *mode = data; - return drm_agp_enable(dev, mode); + return drm_agp_enable(dev, *mode); } /** @@ -254,34 +244,12 @@ int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request) EXPORT_SYMBOL(drm_agp_alloc); -int drm_agp_alloc_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_agp_alloc_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_agp_buffer request; - struct drm_agp_buffer __user *argp = (void __user *)arg; - int err; - - if (copy_from_user(&request, argp, sizeof(request))) - return -EFAULT; - - err = drm_agp_alloc(dev, &request); - if (err) - return err; - - if (copy_to_user(argp, &request, sizeof(request))) { - struct drm_agp_mem *entry; - list_for_each_entry(entry, &dev->agp->memory, head) { - if (entry->handle == request.handle) - break; - } - list_del(&entry->head); - drm_free_agp(entry->memory, entry->pages); - drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); - return -EFAULT; - } + struct drm_agp_buffer *request = data; - return 0; + return drm_agp_alloc(dev, request); } /** @@ -336,17 +304,12 @@ int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request) EXPORT_SYMBOL(drm_agp_unbind); -int drm_agp_unbind_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_agp_binding request; - - if (copy_from_user - (&request, (struct drm_agp_binding __user *) arg, sizeof(request))) - return -EFAULT; + struct drm_agp_binding *request = data; - return drm_agp_unbind(dev, &request); + return drm_agp_unbind(dev, request); } @@ -386,17 +349,12 @@ int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request) EXPORT_SYMBOL(drm_agp_bind); -int drm_agp_bind_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_agp_bind_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_agp_binding request; + struct drm_agp_binding *request = data; - if (copy_from_user - (&request, (struct drm_agp_binding __user *) arg, sizeof(request))) - return -EFAULT; - - return drm_agp_bind(dev, &request); + return drm_agp_bind(dev, request); } @@ -435,17 +393,12 @@ EXPORT_SYMBOL(drm_agp_free); -int drm_agp_free_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_agp_free_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_agp_buffer request; - - if (copy_from_user - (&request, (struct drm_agp_buffer __user *) arg, sizeof(request))) - return -EFAULT; + struct drm_agp_buffer *request = data; - return drm_agp_free(dev, &request); + return drm_agp_free(dev, request); } diff --git a/linux-core/drm_auth.c b/linux-core/drm_auth.c index f10a57b1..e35e8b6d 100644 --- a/linux-core/drm_auth.c +++ b/linux-core/drm_auth.c @@ -136,32 +136,29 @@ static int drm_remove_magic(struct drm_device * dev, drm_magic_t magic) * searches an unique non-zero magic number and add it associating it with \p * file_priv. */ -int drm_getmagic(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) { static drm_magic_t sequence = 0; static DEFINE_SPINLOCK(lock); - struct drm_device *dev = file_priv->head->dev; - struct drm_auth auth; + struct drm_auth *auth = data; /* Find unique magic */ if (file_priv->magic) { - auth.magic = file_priv->magic; + auth->magic = file_priv->magic; } else { do { spin_lock(&lock); if (!sequence) ++sequence; /* reserve 0 */ - auth.magic = sequence++; + auth->magic = sequence++; spin_unlock(&lock); - } while (drm_find_file(dev, auth.magic)); - file_priv->magic = auth.magic; - drm_add_magic(dev, file_priv, auth.magic); + } while (drm_find_file(dev, auth->magic)); + file_priv->magic = auth->magic; + drm_add_magic(dev, file_priv, auth->magic); } - DRM_DEBUG("%u\n", auth.magic); - if (copy_to_user((struct drm_auth __user *) arg, &auth, sizeof(auth))) - return -EFAULT; + DRM_DEBUG("%u\n", auth->magic); + return 0; } @@ -176,19 +173,16 @@ int drm_getmagic(struct inode *inode, struct drm_file *file_priv, * * Checks if \p file_priv is associated with the magic number passed in \arg. */ -int drm_authmagic(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_authmagic(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_auth auth; + struct drm_auth *auth = data; struct drm_file *file; - if (copy_from_user(&auth, (struct drm_auth __user *) arg, sizeof(auth))) - return -EFAULT; - DRM_DEBUG("%u\n", auth.magic); - if ((file = drm_find_file(dev, auth.magic))) { + DRM_DEBUG("%u\n", auth->magic); + if ((file = drm_find_file(dev, auth->magic))) { file->authenticated = 1; - drm_remove_magic(dev, auth.magic); + drm_remove_magic(dev, auth->magic); return 0; } return -EINVAL; diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 671c6232..75d89e46 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1701,13 +1701,14 @@ static int drm_bo_lock_test(struct drm_device * dev, struct drm_file *file_priv) return 0; } -int drm_bo_op_ioctl(DRM_IOCTL_ARGS) +int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_bo_op_arg arg; - struct drm_bo_op_req *req = &arg.d.req; + struct drm_bo_op_arg curarg; + struct drm_bo_op_arg *arg = data; + struct drm_bo_op_req *req = &arg->d.req; struct drm_bo_info_rep rep; - unsigned long next; + unsigned long next = 0; + void __user *curuserarg = NULL; int ret; if (!dev->bm.initialized) { @@ -1716,10 +1717,16 @@ int drm_bo_op_ioctl(DRM_IOCTL_ARGS) } do { - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + if (next != 0) { + curuserarg = (void __user *)next; + if (copy_from_user(&curarg, curuserarg, + sizeof(arg)) != 0) + return -EFAULT; + arg = &curarg; + } - if (arg.handled) { - data = arg.next; + if (arg->handled) { + next = arg->next; continue; } @@ -1747,7 +1754,7 @@ int drm_bo_op_ioctl(DRM_IOCTL_ARGS) default: ret = -EINVAL; } - next = arg.next; + next = arg->next; /* * A signal interrupted us. Make sure the ioctl is restartable. @@ -1756,21 +1763,23 @@ int drm_bo_op_ioctl(DRM_IOCTL_ARGS) if (ret == -EAGAIN) return -EAGAIN; - arg.handled = 1; - arg.d.rep.ret = ret; - arg.d.rep.bo_info = rep; - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); - data = next; - } while (data); + arg->handled = 1; + arg->d.rep.ret = ret; + arg->d.rep.bo_info = rep; + if (arg != data) { + if (copy_to_user(curuserarg, &curarg, + sizeof(arg)) != 0) + return -EFAULT; + } + } while (next != 0); return 0; } -int drm_bo_create_ioctl(DRM_IOCTL_ARGS) +int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_bo_create_arg arg; - struct drm_bo_create_req *req = &arg.d.req; - struct drm_bo_info_rep *rep = &arg.d.rep; + struct drm_bo_create_arg *arg = data; + struct drm_bo_create_req *req = &arg->d.req; + struct drm_bo_info_rep *rep = &arg->d.rep; struct drm_buffer_object *entry; int ret = 0; @@ -1779,8 +1788,6 @@ int drm_bo_create_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - ret = drm_bo_lock_test(dev, file_priv); if (ret) goto out; @@ -1803,16 +1810,14 @@ int drm_bo_create_ioctl(DRM_IOCTL_ARGS) drm_bo_fill_rep_arg(entry, rep); mutex_unlock(&entry->mutex); - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); out: return ret; } -int drm_bo_destroy_ioctl(DRM_IOCTL_ARGS) +int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_bo_handle_arg arg; + struct drm_bo_handle_arg *arg = data; struct drm_user_object *uo; int ret = 0; @@ -1821,10 +1826,8 @@ int drm_bo_destroy_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - mutex_lock(&dev->struct_mutex); - uo = drm_lookup_user_object(file_priv, arg.handle); + uo = drm_lookup_user_object(file_priv, arg->handle); if (!uo || (uo->type != drm_buffer_type) || uo->owner != file_priv) { mutex_unlock(&dev->struct_mutex); return -EINVAL; @@ -1835,52 +1838,44 @@ int drm_bo_destroy_ioctl(DRM_IOCTL_ARGS) return ret; } -int drm_bo_map_ioctl(DRM_IOCTL_ARGS) +int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_bo_map_wait_idle_arg arg; - struct drm_bo_info_req *req = &arg.d.req; - struct drm_bo_info_rep *rep = &arg.d.rep; + struct drm_bo_map_wait_idle_arg *arg = data; + struct drm_bo_info_req *req = &arg->d.req; + struct drm_bo_info_rep *rep = &arg->d.rep; int ret; if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - ret = drm_buffer_object_map(file_priv, req->handle, req->mask, req->hint, rep); if (ret) return ret; - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return 0; } -int drm_bo_unmap_ioctl(DRM_IOCTL_ARGS) +int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_bo_handle_arg arg; + struct drm_bo_handle_arg *arg = data; int ret; if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - - ret = drm_buffer_object_unmap(file_priv, arg.handle); + ret = drm_buffer_object_unmap(file_priv, arg->handle); return ret; } -int drm_bo_reference_ioctl(DRM_IOCTL_ARGS) +int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_bo_reference_info_arg arg; - struct drm_bo_handle_arg *req = &arg.d.req; - struct drm_bo_info_rep *rep = &arg.d.rep; + struct drm_bo_reference_info_arg *arg = data; + struct drm_bo_handle_arg *req = &arg->d.req; + struct drm_bo_info_rep *rep = &arg->d.rep; struct drm_user_object *uo; int ret; @@ -1889,8 +1884,6 @@ int drm_bo_reference_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - ret = drm_user_object_ref(file_priv, req->handle, drm_buffer_type, &uo); if (ret) @@ -1900,14 +1893,12 @@ int drm_bo_reference_ioctl(DRM_IOCTL_ARGS) if (ret) return ret; - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return 0; } -int drm_bo_unreference_ioctl(DRM_IOCTL_ARGS) +int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_bo_handle_arg arg; + struct drm_bo_handle_arg *arg = data; int ret = 0; if (!dev->bm.initialized) { @@ -1915,18 +1906,15 @@ int drm_bo_unreference_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - - ret = drm_user_object_unref(file_priv, arg.handle, drm_buffer_type); + ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type); return ret; } -int drm_bo_info_ioctl(DRM_IOCTL_ARGS) +int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_bo_reference_info_arg arg; - struct drm_bo_handle_arg *req = &arg.d.req; - struct drm_bo_info_rep *rep = &arg.d.rep; + struct drm_bo_reference_info_arg *arg = data; + struct drm_bo_handle_arg *req = &arg->d.req; + struct drm_bo_info_rep *rep = &arg->d.rep; int ret; if (!dev->bm.initialized) { @@ -1934,35 +1922,29 @@ int drm_bo_info_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - ret = drm_bo_handle_info(file_priv, req->handle, rep); if (ret) return ret; - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); + return 0; } -int drm_bo_wait_idle_ioctl(DRM_IOCTL_ARGS) +int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_bo_map_wait_idle_arg arg; - struct drm_bo_info_req *req = &arg.d.req; - struct drm_bo_info_rep *rep = &arg.d.rep; + struct drm_bo_map_wait_idle_arg *arg = data; + struct drm_bo_info_req *req = &arg->d.req; + struct drm_bo_info_rep *rep = &arg->d.rep; int ret; if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - ret = drm_bo_handle_wait(file_priv, req->handle, req->hint, rep); if (ret) return ret; - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return 0; } @@ -2337,10 +2319,9 @@ int drm_bo_driver_init(struct drm_device * dev) EXPORT_SYMBOL(drm_bo_driver_init); -int drm_mm_init_ioctl(DRM_IOCTL_ARGS) +int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_mm_init_arg arg; + struct drm_mm_init_arg *arg = data; struct drm_buffer_manager *bm = &dev->bm; struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; @@ -2350,24 +2331,23 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); ret = -EINVAL; - if (arg.magic != DRM_BO_INIT_MAGIC) { + if (arg->magic != DRM_BO_INIT_MAGIC) { DRM_ERROR("You are using an old libdrm that is not compatible with\n" "\tthe kernel DRM module. Please upgrade your libdrm.\n"); return -EINVAL; } - if (arg.major != DRM_BO_INIT_MAJOR) { + if (arg->major != DRM_BO_INIT_MAJOR) { DRM_ERROR("libdrm and kernel DRM buffer object interface major\n" "\tversion don't match. Got %d, expected %d,\n", - arg.major, DRM_BO_INIT_MAJOR); + arg->major, DRM_BO_INIT_MAJOR); return -EINVAL; } - if (arg.minor > DRM_BO_INIT_MINOR) { + if (arg->minor > DRM_BO_INIT_MINOR) { DRM_ERROR("libdrm expects a newer DRM buffer object interface.\n" "\tlibdrm buffer object interface version is %d.%d.\n" "\tkernel DRM buffer object interface version is %d.%d\n", - arg.major, arg.minor, DRM_BO_INIT_MAJOR, DRM_BO_INIT_MINOR); + arg->major, arg->minor, DRM_BO_INIT_MAJOR, DRM_BO_INIT_MINOR); return -EINVAL; } @@ -2377,12 +2357,12 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS) DRM_ERROR("DRM memory manager was not initialized.\n"); goto out; } - if (arg.mem_type == 0) { + if (arg->mem_type == 0) { DRM_ERROR("System memory buffers already initialized.\n"); goto out; } - ret = drm_bo_init_mm(dev, arg.mem_type, - arg.p_offset, arg.p_size); + ret = drm_bo_init_mm(dev, arg->mem_type, + arg->p_offset, arg->p_size); out: mutex_unlock(&dev->struct_mutex); @@ -2390,14 +2370,12 @@ out: if (ret) return ret; - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return 0; } -int drm_mm_takedown_ioctl(DRM_IOCTL_ARGS) +int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_mm_type_arg arg; + struct drm_mm_type_arg *arg = data; struct drm_buffer_manager *bm = &dev->bm; struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; @@ -2407,8 +2385,6 @@ int drm_mm_takedown_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - LOCK_TEST_WITH_RETURN(dev, file_priv); mutex_lock(&dev->bm.init_mutex); mutex_lock(&dev->struct_mutex); @@ -2417,14 +2393,14 @@ int drm_mm_takedown_ioctl(DRM_IOCTL_ARGS) DRM_ERROR("DRM memory manager was not initialized\n"); goto out; } - if (arg.mem_type == 0) { + if (arg->mem_type == 0) { DRM_ERROR("No takedown for System memory buffers.\n"); goto out; } ret = 0; - if (drm_bo_clean_mm(dev, arg.mem_type)) { + if (drm_bo_clean_mm(dev, arg->mem_type)) { DRM_ERROR("Memory manager type %d not clean. " - "Delaying takedown\n", arg.mem_type); + "Delaying takedown\n", arg->mem_type); } out: mutex_unlock(&dev->struct_mutex); @@ -2432,14 +2408,12 @@ out: if (ret) return ret; - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return 0; } -int drm_mm_lock_ioctl(DRM_IOCTL_ARGS) +int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_mm_type_arg arg; + struct drm_mm_type_arg *arg = data; struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; @@ -2448,25 +2422,20 @@ int drm_mm_lock_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - LOCK_TEST_WITH_RETURN(dev, file_priv); mutex_lock(&dev->bm.init_mutex); mutex_lock(&dev->struct_mutex); - ret = drm_bo_lock_mm(dev, arg.mem_type); + ret = drm_bo_lock_mm(dev, arg->mem_type); mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->bm.init_mutex); if (ret) return ret; - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return 0; } -int drm_mm_unlock_ioctl(DRM_IOCTL_ARGS) +int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_mm_type_arg arg; struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; @@ -2475,7 +2444,6 @@ int drm_mm_unlock_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); LOCK_TEST_WITH_RETURN(dev, file_priv); mutex_lock(&dev->bm.init_mutex); mutex_lock(&dev->struct_mutex); @@ -2486,7 +2454,6 @@ int drm_mm_unlock_ioctl(DRM_IOCTL_ARGS) if (ret) return ret; - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return 0; } diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c index a571b817..e8864df0 100644 --- a/linux-core/drm_bufs.c +++ b/linux-core/drm_bufs.c @@ -326,33 +326,24 @@ int drm_addmap(struct drm_device *dev, unsigned int offset, EXPORT_SYMBOL(drm_addmap); -int drm_addmap_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_addmap_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_map map; + struct drm_map *map = data; struct drm_map_list *maplist; - struct drm_map __user *argp = (void __user *)arg; int err; - if (copy_from_user(&map, argp, sizeof(map))) { - return -EFAULT; - } - - if (!(capable(CAP_SYS_ADMIN) || map.type == _DRM_AGP)) + if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP)) return -EPERM; - err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags, - &maplist); + err = drm_addmap_core(dev, map->offset, map->size, map->type, + map->flags, &maplist); if (err) return err; - if (copy_to_user(argp, maplist->map, sizeof(struct drm_map))) - return -EFAULT; - /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */ - if (put_user((void *)(unsigned long)maplist->user_token, &argp->handle)) + if (put_user((void *)(unsigned long)maplist->user_token, &map->handle)) return -EFAULT; return 0; } @@ -451,23 +442,18 @@ EXPORT_SYMBOL(drm_rmmap); * gets used by drivers that the server doesn't need to care about. This seems * unlikely. */ -int drm_rmmap_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_rmmap_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_map request; + struct drm_map *request = data; drm_local_map_t *map = NULL; struct drm_map_list *r_list; int ret; - if (copy_from_user(&request, (struct drm_map __user *) arg, sizeof(request))) { - return -EFAULT; - } - mutex_lock(&dev->struct_mutex); list_for_each_entry(r_list, &dev->maplist, head) { if (r_list->map && - r_list->user_token == (unsigned long)request.handle && + r_list->user_token == (unsigned long)request->handle && r_list->map->flags & _DRM_REMOVABLE) { map = r_list->map; break; @@ -1287,38 +1273,27 @@ EXPORT_SYMBOL(drm_addbufs_fb); * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent * PCI memory respectively. */ -int drm_addbufs(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_addbufs(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_buf_desc request; - struct drm_device *dev = file_priv->head->dev; + struct drm_buf_desc *request = data; int ret; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) return -EINVAL; - if (copy_from_user(&request, (struct drm_buf_desc __user *) arg, - sizeof(request))) - return -EFAULT; - #if __OS_HAS_AGP - if (request.flags & _DRM_AGP_BUFFER) - ret = drm_addbufs_agp(dev, &request); + if (request->flags & _DRM_AGP_BUFFER) + ret = drm_addbufs_agp(dev, request); else #endif - if (request.flags & _DRM_SG_BUFFER) - ret = drm_addbufs_sg(dev, &request); - else if (request.flags & _DRM_FB_BUFFER) - ret = drm_addbufs_fb(dev, &request); + if (request->flags & _DRM_SG_BUFFER) + ret = drm_addbufs_sg(dev, request); + else if (request->flags & _DRM_FB_BUFFER) + ret = drm_addbufs_fb(dev, request); else - ret = drm_addbufs_pci(dev, &request); + ret = drm_addbufs_pci(dev, request); - if (ret == 0) { - if (copy_to_user((void __user *) arg, &request, - sizeof(request))) { - ret = -EFAULT; - } - } return ret; } @@ -1339,13 +1314,11 @@ int drm_addbufs(struct inode *inode, struct drm_file *file_priv, * lock, preventing of allocating more buffers after this call. Information * about each requested buffer is then copied into user space. */ -int drm_infobufs(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_infobufs(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; struct drm_device_dma *dma = dev->dma; - struct drm_buf_info request; - struct drm_buf_info __user *argp = (void __user *)arg; + struct drm_buf_info *request = data; int i; int count; @@ -1363,9 +1336,6 @@ int drm_infobufs(struct inode *inode, struct drm_file *file_priv, ++dev->buf_use; /* Can't allocate more after this call */ spin_unlock(&dev->count_lock); - if (copy_from_user(&request, argp, sizeof(request))) - return -EFAULT; - for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { if (dma->bufs[i].buf_count) ++count; @@ -1373,11 +1343,11 @@ int drm_infobufs(struct inode *inode, struct drm_file *file_priv, DRM_DEBUG("count = %d\n", count); - if (request.count >= count) { + if (request->count >= count) { for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { if (dma->bufs[i].buf_count) { struct drm_buf_desc __user *to = - &request.list[count]; + &request->list[count]; struct drm_buf_entry *from = &dma->bufs[i]; struct drm_freelist *list = &dma->bufs[i].freelist; if (copy_to_user(&to->count, @@ -1404,10 +1374,7 @@ int drm_infobufs(struct inode *inode, struct drm_file *file_priv, } } } - request.count = count; - - if (copy_to_user(argp, &request, sizeof(request))) - return -EFAULT; + request->count = count; return 0; } @@ -1426,12 +1393,11 @@ int drm_infobufs(struct inode *inode, struct drm_file *file_priv, * * \note This ioctl is deprecated and mostly never used. */ -int drm_markbufs(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_markbufs(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; struct drm_device_dma *dma = dev->dma; - struct drm_buf_desc request; + struct drm_buf_desc *request = data; int order; struct drm_buf_entry *entry; @@ -1441,24 +1407,20 @@ int drm_markbufs(struct inode *inode, struct drm_file *file_priv, if (!dma) return -EINVAL; - if (copy_from_user(&request, - (struct drm_buf_desc __user *) arg, sizeof(request))) - return -EFAULT; - DRM_DEBUG("%d, %d, %d\n", - request.size, request.low_mark, request.high_mark); - order = drm_order(request.size); + request->size, request->low_mark, request->high_mark); + order = drm_order(request->size); if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL; entry = &dma->bufs[order]; - if (request.low_mark < 0 || request.low_mark > entry->buf_count) + if (request->low_mark < 0 || request->low_mark > entry->buf_count) return -EINVAL; - if (request.high_mark < 0 || request.high_mark > entry->buf_count) + if (request->high_mark < 0 || request->high_mark > entry->buf_count) return -EINVAL; - entry->freelist.low_mark = request.low_mark; - entry->freelist.high_mark = request.high_mark; + entry->freelist.low_mark = request->low_mark; + entry->freelist.high_mark = request->high_mark; return 0; } @@ -1475,12 +1437,11 @@ int drm_markbufs(struct inode *inode, struct drm_file *file_priv, * Calls free_buffer() for each used buffer. * This function is primarily used for debugging. */ -int drm_freebufs(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_freebufs(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; struct drm_device_dma *dma = dev->dma; - struct drm_buf_free request; + struct drm_buf_free *request = data; int i; int idx; struct drm_buf *buf; @@ -1491,13 +1452,9 @@ int drm_freebufs(struct inode *inode, struct drm_file *file_priv, if (!dma) return -EINVAL; - if (copy_from_user(&request, - (struct drm_buf_free __user *) arg, sizeof(request))) - return -EFAULT; - - DRM_DEBUG("%d\n", request.count); - for (i = 0; i < request.count; i++) { - if (copy_from_user(&idx, &request.list[i], sizeof(idx))) + DRM_DEBUG("%d\n", request->count); + for (i = 0; i < request->count; i++) { + if (copy_from_user(&idx, &request->list[i], sizeof(idx))) return -EFAULT; if (idx < 0 || idx >= dma->buf_count) { DRM_ERROR("Index %d (of %d max)\n", @@ -1530,17 +1487,15 @@ int drm_freebufs(struct inode *inode, struct drm_file *file_priv, * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls * drm_mmap_dma(). */ -int drm_mapbufs(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_mapbufs(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; struct drm_device_dma *dma = dev->dma; - struct drm_buf_map __user *argp = (void __user *)arg; int retcode = 0; const int zero = 0; unsigned long virtual; unsigned long address; - struct drm_buf_map request; + struct drm_buf_map *request = data; int i; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) @@ -1557,10 +1512,7 @@ int drm_mapbufs(struct inode *inode, struct drm_file *file_priv, dev->buf_use++; /* Can't allocate more after this call */ spin_unlock(&dev->count_lock); - if (copy_from_user(&request, argp, sizeof(request))) - return -EFAULT; - - if (request.count >= dma->buf_count) { + if (request->count >= dma->buf_count) { if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) || (drm_core_check_feature(dev, DRIVER_SG) && (dma->flags & _DRM_DMA_USE_SG)) @@ -1591,28 +1543,28 @@ int drm_mapbufs(struct inode *inode, struct drm_file *file_priv, retcode = (signed long)virtual; goto done; } - request.virtual = (void __user *)virtual; + request->virtual = (void __user *)virtual; for (i = 0; i < dma->buf_count; i++) { - if (copy_to_user(&request.list[i].idx, + if (copy_to_user(&request->list[i].idx, &dma->buflist[i]->idx, - sizeof(request.list[0].idx))) { + sizeof(request->list[0].idx))) { retcode = -EFAULT; goto done; } - if (copy_to_user(&request.list[i].total, + if (copy_to_user(&request->list[i].total, &dma->buflist[i]->total, - sizeof(request.list[0].total))) { + sizeof(request->list[0].total))) { retcode = -EFAULT; goto done; } - if (copy_to_user(&request.list[i].used, + if (copy_to_user(&request->list[i].used, &zero, sizeof(zero))) { retcode = -EFAULT; goto done; } address = virtual + dma->buflist[i]->offset; /* *** */ - if (copy_to_user(&request.list[i].address, + if (copy_to_user(&request->list[i].address, &address, sizeof(address))) { retcode = -EFAULT; goto done; @@ -1620,11 +1572,8 @@ int drm_mapbufs(struct inode *inode, struct drm_file *file_priv, } } done: - request.count = dma->buf_count; - DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode); - - if (copy_to_user(argp, &request, sizeof(request))) - return -EFAULT; + request->count = dma->buf_count; + DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode); return retcode; } diff --git a/linux-core/drm_context.c b/linux-core/drm_context.c index 76e13f65..7854e89c 100644 --- a/linux-core/drm_context.c +++ b/linux-core/drm_context.c @@ -140,21 +140,16 @@ void drm_ctxbitmap_cleanup(struct drm_device *dev) * Gets the map from drm_device::ctx_idr with the handle specified and * returns its handle. */ -int drm_getsareactx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_getsareactx(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_ctx_priv_map __user *argp = (void __user *)arg; - struct drm_ctx_priv_map request; + struct drm_ctx_priv_map *request = data; struct drm_map *map; struct drm_map_list *_entry; - if (copy_from_user(&request, argp, sizeof(request))) - return -EFAULT; - mutex_lock(&dev->struct_mutex); - map = idr_find(&dev->ctx_idr, request.ctx_id); + map = idr_find(&dev->ctx_idr, request->ctx_id); if (!map) { mutex_unlock(&dev->struct_mutex); return -EINVAL; @@ -162,19 +157,17 @@ int drm_getsareactx(struct inode *inode, struct drm_file *file_priv, mutex_unlock(&dev->struct_mutex); - request.handle = NULL; + request->handle = NULL; list_for_each_entry(_entry, &dev->maplist, head) { if (_entry->map == map) { - request.handle = + request->handle = (void *)(unsigned long)_entry->user_token; break; } } - if (request.handle == NULL) + if (request->handle == NULL) return -EINVAL; - if (copy_to_user(argp, &request, sizeof(request))) - return -EFAULT; return 0; } @@ -190,22 +183,17 @@ int drm_getsareactx(struct inode *inode, struct drm_file *file_priv, * Searches the mapping specified in \p arg and update the entry in * drm_device::ctx_idr with it. */ -int drm_setsareactx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_setsareactx(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_ctx_priv_map request; + struct drm_ctx_priv_map *request = data; struct drm_map *map = NULL; struct drm_map_list *r_list = NULL; - if (copy_from_user(&request, - (struct drm_ctx_priv_map __user *) arg, sizeof(request))) - return -EFAULT; - mutex_lock(&dev->struct_mutex); list_for_each_entry(r_list, &dev->maplist, head) { if (r_list->map - && r_list->user_token == (unsigned long) request.handle) + && r_list->user_token == (unsigned long) request->handle) goto found; } bad: @@ -217,7 +205,7 @@ int drm_setsareactx(struct inode *inode, struct drm_file *file_priv, if (!map) goto bad; - if (IS_ERR(idr_replace(&dev->ctx_idr, map, request.ctx_id))) + if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id))) goto bad; mutex_unlock(&dev->struct_mutex); @@ -296,29 +284,23 @@ static int drm_context_switch_complete(struct drm_device *dev, int new) * \param arg user argument pointing to a drm_ctx_res structure. * \return zero on success or a negative number on failure. */ -int drm_resctx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_resctx(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_ctx_res res; - struct drm_ctx_res __user *argp = (void __user *)arg; + struct drm_ctx_res *res = data; struct drm_ctx ctx; int i; - if (copy_from_user(&res, argp, sizeof(res))) - return -EFAULT; - - if (res.count >= DRM_RESERVED_CONTEXTS) { + if (res->count >= DRM_RESERVED_CONTEXTS) { memset(&ctx, 0, sizeof(ctx)); for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { ctx.handle = i; - if (copy_to_user(&res.contexts[i], &ctx, sizeof(ctx))) + if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx))) return -EFAULT; } } - res.count = DRM_RESERVED_CONTEXTS; + res->count = DRM_RESERVED_CONTEXTS; - if (copy_to_user(argp, &res, sizeof(res))) - return -EFAULT; return 0; } @@ -333,32 +315,27 @@ int drm_resctx(struct inode *inode, struct drm_file *file_priv, * * Get a new handle for the context and copy to userspace. */ -int drm_addctx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_addctx(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; struct drm_ctx_list *ctx_entry; - struct drm_ctx __user *argp = (void __user *)arg; - struct drm_ctx ctx; + struct drm_ctx *ctx = data; - if (copy_from_user(&ctx, argp, sizeof(ctx))) - return -EFAULT; - - ctx.handle = drm_ctxbitmap_next(dev); - if (ctx.handle == DRM_KERNEL_CONTEXT) { + ctx->handle = drm_ctxbitmap_next(dev); + if (ctx->handle == DRM_KERNEL_CONTEXT) { /* Skip kernel's context and get a new one. */ - ctx.handle = drm_ctxbitmap_next(dev); + ctx->handle = drm_ctxbitmap_next(dev); } - DRM_DEBUG("%d\n", ctx.handle); - if (ctx.handle == -1) { + DRM_DEBUG("%d\n", ctx->handle); + if (ctx->handle == -1) { DRM_DEBUG("Not enough free contexts.\n"); /* Should this return -EBUSY instead? */ return -ENOMEM; } - if (ctx.handle != DRM_KERNEL_CONTEXT) { + if (ctx->handle != DRM_KERNEL_CONTEXT) { if (dev->driver->context_ctor) - if (!dev->driver->context_ctor(dev, ctx.handle)) { + if (!dev->driver->context_ctor(dev, ctx->handle)) { DRM_DEBUG("Running out of ctxs or memory.\n"); return -ENOMEM; } @@ -371,7 +348,7 @@ int drm_addctx(struct inode *inode, struct drm_file *file_priv, } INIT_LIST_HEAD(&ctx_entry->head); - ctx_entry->handle = ctx.handle; + ctx_entry->handle = ctx->handle; ctx_entry->tag = file_priv; mutex_lock(&dev->ctxlist_mutex); @@ -379,13 +356,10 @@ int drm_addctx(struct inode *inode, struct drm_file *file_priv, ++dev->ctx_count; mutex_unlock(&dev->ctxlist_mutex); - if (copy_to_user(argp, &ctx, sizeof(ctx))) - return -EFAULT; return 0; } -int drm_modctx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv) { /* This does nothing */ return 0; @@ -400,20 +374,13 @@ int drm_modctx(struct inode *inode, struct drm_file *file_priv, * \param arg user argument pointing to a drm_ctx structure. * \return zero on success or a negative number on failure. */ -int drm_getctx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_ctx __user *argp = (void __user *)arg; - struct drm_ctx ctx; - - if (copy_from_user(&ctx, argp, sizeof(ctx))) - return -EFAULT; + struct drm_ctx *ctx = data; /* This is 0, because we don't handle any context flags */ - ctx.flags = 0; + ctx->flags = 0; - if (copy_to_user(argp, &ctx, sizeof(ctx))) - return -EFAULT; return 0; } @@ -428,17 +395,13 @@ int drm_getctx(struct inode *inode, struct drm_file *file_priv, * * Calls context_switch(). */ -int drm_switchctx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_switchctx(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_ctx ctx; - - if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) - return -EFAULT; + struct drm_ctx *ctx = data; - DRM_DEBUG("%d\n", ctx.handle); - return drm_context_switch(dev, dev->last_context, ctx.handle); + DRM_DEBUG("%d\n", ctx->handle); + return drm_context_switch(dev, dev->last_context, ctx->handle); } /** @@ -452,17 +415,13 @@ int drm_switchctx(struct inode *inode, struct drm_file *file_priv, * * Calls context_switch_complete(). */ -int drm_newctx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_newctx(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_ctx ctx; + struct drm_ctx *ctx = data; - if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) - return -EFAULT; - - DRM_DEBUG("%d\n", ctx.handle); - drm_context_switch_complete(dev, ctx.handle); + DRM_DEBUG("%d\n", ctx->handle); + drm_context_switch_complete(dev, ctx->handle); return 0; } @@ -478,23 +437,19 @@ int drm_newctx(struct inode *inode, struct drm_file *file_priv, * * If not the special kernel context, calls ctxbitmap_free() to free the specified context. */ -int drm_rmctx(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_rmctx(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_ctx ctx; - - if (copy_from_user(&ctx, (struct drm_ctx __user *) arg, sizeof(ctx))) - return -EFAULT; + struct drm_ctx *ctx = data; - DRM_DEBUG("%d\n", ctx.handle); - if (ctx.handle == DRM_KERNEL_CONTEXT + 1) { + DRM_DEBUG("%d\n", ctx->handle); + if (ctx->handle == DRM_KERNEL_CONTEXT + 1) { file_priv->remove_auth_on_close = 1; } - if (ctx.handle != DRM_KERNEL_CONTEXT) { + if (ctx->handle != DRM_KERNEL_CONTEXT) { if (dev->driver->context_dtor) - dev->driver->context_dtor(dev, ctx.handle); - drm_ctxbitmap_free(dev, ctx.handle); + dev->driver->context_dtor(dev, ctx->handle); + drm_ctxbitmap_free(dev, ctx->handle); } mutex_lock(&dev->ctxlist_mutex); @@ -502,7 +457,7 @@ int drm_rmctx(struct inode *inode, struct drm_file *file_priv, struct drm_ctx_list *pos, *n; list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { - if (pos->handle == ctx.handle) { + if (pos->handle == ctx->handle) { list_del(&pos->head); drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST); --dev->ctx_count; diff --git a/linux-core/drm_drawable.c b/linux-core/drm_drawable.c index 2787c9a3..1839c576 100644 --- a/linux-core/drm_drawable.c +++ b/linux-core/drm_drawable.c @@ -40,11 +40,10 @@ /** * Allocate drawable ID and memory to store information about it. */ -int drm_adddraw(DRM_IOCTL_ARGS) +int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; unsigned long irqflags; - struct drm_draw draw; + struct drm_draw *draw = data; int new_id = 0; int ret; @@ -63,11 +62,9 @@ again: spin_unlock_irqrestore(&dev->drw_lock, irqflags); - draw.handle = new_id; + draw->handle = new_id; - DRM_DEBUG("%d\n", draw.handle); - - DRM_COPY_TO_USER_IOCTL((struct drm_draw __user *)data, draw, sizeof(draw)); + DRM_DEBUG("%d\n", draw->handle); return 0; } @@ -75,69 +72,61 @@ again: /** * Free drawable ID and memory to store information about it. */ -int drm_rmdraw(DRM_IOCTL_ARGS) +int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_draw draw; + struct drm_draw *draw = data; unsigned long irqflags; - DRM_COPY_FROM_USER_IOCTL(draw, (struct drm_draw __user *) data, - sizeof(draw)); - spin_lock_irqsave(&dev->drw_lock, irqflags); - drm_free(drm_get_drawable_info(dev, draw.handle), + drm_free(drm_get_drawable_info(dev, draw->handle), sizeof(struct drm_drawable_info), DRM_MEM_BUFS); - idr_remove(&dev->drw_idr, draw.handle); + idr_remove(&dev->drw_idr, draw->handle); spin_unlock_irqrestore(&dev->drw_lock, irqflags); - DRM_DEBUG("%d\n", draw.handle); + DRM_DEBUG("%d\n", draw->handle); return 0; } -int drm_update_drawable_info(DRM_IOCTL_ARGS) +int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_update_draw update; + struct drm_update_draw *update = data; unsigned long irqflags; struct drm_clip_rect *rects; struct drm_drawable_info *info; int err; - DRM_COPY_FROM_USER_IOCTL(update, (struct drm_update_draw __user *) data, - sizeof(update)); - - info = idr_find(&dev->drw_idr, update.handle); + info = idr_find(&dev->drw_idr, update->handle); if (!info) { info = drm_calloc(1, sizeof(*info), DRM_MEM_BUFS); if (!info) return -ENOMEM; - if (IS_ERR(idr_replace(&dev->drw_idr, info, update.handle))) { - DRM_ERROR("No such drawable %d\n", update.handle); + if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) { + DRM_ERROR("No such drawable %d\n", update->handle); drm_free(info, sizeof(*info), DRM_MEM_BUFS); return -EINVAL; } } - switch (update.type) { + switch (update->type) { case DRM_DRAWABLE_CLIPRECTS: - if (update.num != info->num_rects) { - rects = drm_alloc(update.num * sizeof(struct drm_clip_rect), + if (update->num != info->num_rects) { + rects = drm_alloc(update->num * sizeof(struct drm_clip_rect), DRM_MEM_BUFS); } else rects = info->rects; - if (update.num && !rects) { + if (update->num && !rects) { DRM_ERROR("Failed to allocate cliprect memory\n"); err = -ENOMEM; goto error; } - if (update.num && DRM_COPY_FROM_USER(rects, + if (update->num && DRM_COPY_FROM_USER(rects, (struct drm_clip_rect __user *) - (unsigned long)update.data, - update.num * + (unsigned long)update->data, + update->num * sizeof(*rects))) { DRM_ERROR("Failed to copy cliprects from userspace\n"); err = -EFAULT; @@ -152,15 +141,15 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) } info->rects = rects; - info->num_rects = update.num; + info->num_rects = update->num; spin_unlock_irqrestore(&dev->drw_lock, irqflags); DRM_DEBUG("Updated %d cliprects for drawable %d\n", - info->num_rects, update.handle); + info->num_rects, update->handle); break; default: - DRM_ERROR("Invalid update type %d\n", update.type); + DRM_ERROR("Invalid update type %d\n", update->type); return -EINVAL; } @@ -168,7 +157,7 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) error: if (rects != info->rects) - drm_free(rects, update.num * sizeof(struct drm_clip_rect), + drm_free(rects, update->num * sizeof(struct drm_clip_rect), DRM_MEM_BUFS); return err; diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 92b07729..85e3ba47 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -51,109 +51,102 @@ static void drm_cleanup(struct drm_device * dev); int drm_fb_loaded = 0; -static int drm_version(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg); +static int drm_version(struct drm_device *dev, void *data, + struct drm_file *file_priv); /** Ioctl table */ static struct drm_ioctl_desc drm_ioctls[] = { - [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = {drm_version, 0}, - [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = {drm_getunique, 0}, - [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = {drm_getmagic, 0}, - [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = {drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = {drm_getmap, 0}, - [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = {drm_getclient, 0}, - [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = {drm_getstats, 0}, - [DRM_IOCTL_NR(DRM_IOCTL_SET_VERSION)] = {drm_setversion, DRM_MASTER|DRM_ROOT_ONLY}, - - [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = {drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = {drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - - [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = {drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = {drm_rmmap_ioctl, DRM_AUTH}, - - [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = {drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = {drm_getsareactx, DRM_AUTH}, - - [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = {drm_addctx, DRM_AUTH|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = {drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = {drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = {drm_getctx, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = {drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = {drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = {drm_resctx, DRM_AUTH}, - - [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = {drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = {drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - - [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = {drm_lock, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = {drm_unlock, DRM_AUTH}, - - [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = {drm_noop, DRM_AUTH}, - - [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = {drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = {drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = {drm_infobufs, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = {drm_mapbufs, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = {drm_freebufs, DRM_AUTH}, + DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0), + DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), + DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), + DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0), + DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0), + DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0), + DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY), + + DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + + DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH), + + DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH), + + DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH), + + DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + + DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH), + + DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH), + + DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH), /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */ - [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = {NULL, DRM_AUTH}, + DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH), - [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = {drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), #if __OS_HAS_AGP - [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = {drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = {drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = {drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = {drm_agp_info_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = {drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = {drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = {drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = {drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), #endif - [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = {drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0}, + DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0), - // [DRM_IOCTL_NR(DRM_IOCTL_BUFOBJ)] = {drm_bo_ioctl, DRM_AUTH}, + // DRM_IOCTL_DEF(DRM_IOCTL_BUFOBJ, drm_bo_ioctl, DRM_AUTH), - [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW)] = {drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - [DRM_IOCTL_NR(DRM_IOCTL_MM_INIT)] = {drm_mm_init_ioctl, - DRM_AUTH }, - [DRM_IOCTL_NR(DRM_IOCTL_MM_TAKEDOWN)] = {drm_mm_takedown_ioctl, - DRM_AUTH }, - [DRM_IOCTL_NR(DRM_IOCTL_MM_LOCK)] = {drm_mm_lock_ioctl, - DRM_AUTH }, - [DRM_IOCTL_NR(DRM_IOCTL_MM_UNLOCK)] = {drm_mm_unlock_ioctl, - DRM_AUTH }, - - [DRM_IOCTL_NR(DRM_IOCTL_FENCE_CREATE)] = {drm_fence_create_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_FENCE_DESTROY)] = {drm_fence_destroy_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_FENCE_REFERENCE)] = {drm_fence_reference_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_FENCE_UNREFERENCE)] = {drm_fence_unreference_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_FENCE_SIGNALED)] = {drm_fence_signaled_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_FENCE_FLUSH)] = {drm_fence_flush_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_FENCE_WAIT)] = {drm_fence_wait_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_FENCE_EMIT)] = {drm_fence_emit_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_FENCE_BUFFERS)] = {drm_fence_buffers_ioctl, DRM_AUTH}, - - [DRM_IOCTL_NR(DRM_IOCTL_BO_CREATE)] = {drm_bo_create_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_BO_DESTROY)] = {drm_bo_destroy_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_BO_MAP)] = {drm_bo_map_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_BO_UNMAP)] = {drm_bo_unmap_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_BO_REFERENCE)] = {drm_bo_reference_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_BO_UNREFERENCE)] = {drm_bo_unreference_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_BO_OP)] = {drm_bo_op_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_BO_INFO)] = {drm_bo_info_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_BO_WAIT_IDLE)] = {drm_bo_wait_idle_ioctl, DRM_AUTH}, - + DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_DESTROY, drm_fence_destroy_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_REFERENCE, drm_fence_reference_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_UNREFERENCE, drm_fence_unreference_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_SIGNALED, drm_fence_signaled_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_FLUSH, drm_fence_flush_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_WAIT, drm_fence_wait_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_EMIT, drm_fence_emit_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_BUFFERS, drm_fence_buffers_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_CREATE, drm_bo_create_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_DESTROY, drm_bo_destroy_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_MAP, drm_bo_map_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_UNMAP, drm_bo_unmap_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_REFERENCE, drm_bo_reference_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_UNREFERENCE, drm_bo_unreference_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_OP, drm_bo_op_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_INFO, drm_bo_info_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_WAIT_IDLE, drm_bo_wait_idle_ioctl, DRM_AUTH), }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) @@ -545,26 +538,19 @@ module_exit(drm_core_exit); * * Fills in the version information in \p arg. */ -static int drm_version(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +static int drm_version(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_version __user *argp = (void __user *)arg; - struct drm_version version; + struct drm_version *version = data; int len; - if (copy_from_user(&version, argp, sizeof(version))) - return -EFAULT; - - version.version_major = dev->driver->major; - version.version_minor = dev->driver->minor; - version.version_patchlevel = dev->driver->patchlevel; - DRM_COPY(version.name, dev->driver->name); - DRM_COPY(version.date, dev->driver->date); - DRM_COPY(version.desc, dev->driver->desc); + version->version_major = dev->driver->major; + version->version_minor = dev->driver->minor; + version->version_patchlevel = dev->driver->patchlevel; + DRM_COPY(version->name, dev->driver->name); + DRM_COPY(version->date, dev->driver->date); + DRM_COPY(version->desc, dev->driver->desc); - if (copy_to_user(argp, &version, sizeof(version))) - return -EFAULT; return 0; } @@ -579,6 +565,11 @@ static int drm_version(struct inode *inode, struct drm_file *file_priv, * * Looks up the ioctl function in the ::ioctls table, checking for root * previleges if so required, and dispatches to the respective function. + * + * Copies data in and out according to the size and direction given in cmd, + * which must match the ioctl cmd known by the kernel. The kernel uses a 512 + * byte stack buffer to store the ioctl arguments in kernel space. Should we + * ever need much larger ioctl arguments, we may need to allocate memory. */ int drm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) @@ -589,6 +580,7 @@ int drm_ioctl(struct inode *inode, struct file *filp, drm_ioctl_t *func; unsigned int nr = DRM_IOCTL_NR(cmd); int retcode = -EINVAL; + char kdata[512]; atomic_inc(&dev->ioctl_count); atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); @@ -606,14 +598,28 @@ int drm_ioctl(struct inode *inode, struct file *filp, ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE]; else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) ioctl = &drm_ioctls[nr]; - else + else { + errno = -EINVAL; + goto err_i1; + } + + if (ioctl->cmd != cmd) { + retcode = -EINVAL; goto err_i1; + } func = ioctl->func; /* is there a local override? */ if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl) func = dev->driver->dma_ioctl; + if (cmd & IOC_IN) { + if (copy_from_user(kdata, (void __user *)arg, + _IOC_SIZE(cmd)) != 0) + retcode = -EACCES; + goto err_i1; + } + if (!func) { DRM_DEBUG("no function\n"); retcode = -EINVAL; @@ -622,8 +628,15 @@ int drm_ioctl(struct inode *inode, struct file *filp, ((ioctl->flags & DRM_MASTER) && !file_priv->master)) { retcode = -EACCES; } else { - retcode = func(inode, file_priv, cmd, arg); + retcode = func(dev, kdata, file_priv); + } + + if (cmd & IOC_OUT) { + if (copy_to_user((void __user *)arg, kdata, + _IOC_SIZE(cmd)) != 0) + retcode = -EACCES; } + err_i1: atomic_dec(&dev->ioctl_count); if (retcode) diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index 3a3035e1..c4f7da15 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -565,12 +565,11 @@ struct drm_fence_object *drm_lookup_fence_object(struct drm_file * priv, uint32_ return fence; } -int drm_fence_create_ioctl(DRM_IOCTL_ARGS) +int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; int ret; struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_arg arg; + struct drm_fence_arg *arg = data; struct drm_fence_object *fence; unsigned long flags; ret = 0; @@ -580,15 +579,14 @@ int drm_fence_create_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - if (arg.flags & DRM_FENCE_FLAG_EMIT) + if (arg->flags & DRM_FENCE_FLAG_EMIT) LOCK_TEST_WITH_RETURN(dev, file_priv); - ret = drm_fence_object_create(dev, arg.class, - arg.type, arg.flags, &fence); + ret = drm_fence_object_create(dev, arg->class, + arg->type, arg->flags, &fence); if (ret) return ret; ret = drm_fence_add_user_object(file_priv, fence, - arg.flags & + arg->flags & DRM_FENCE_FLAG_SHAREABLE); if (ret) { drm_fence_usage_deref_unlocked(&fence); @@ -600,25 +598,23 @@ int drm_fence_create_ioctl(DRM_IOCTL_ARGS) */ atomic_inc(&fence->usage); - arg.handle = fence->base.hash.key; + arg->handle = fence->base.hash.key; read_lock_irqsave(&fm->lock, flags); - arg.class = fence->class; - arg.type = fence->type; - arg.signaled = fence->signaled; + arg->class = fence->class; + arg->type = fence->type; + arg->signaled = fence->signaled; read_unlock_irqrestore(&fm->lock, flags); drm_fence_usage_deref_unlocked(&fence); - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return ret; } -int drm_fence_destroy_ioctl(DRM_IOCTL_ARGS) +int drm_fence_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; int ret; struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_arg arg; + struct drm_fence_arg *arg = data; struct drm_user_object *uo; ret = 0; @@ -627,10 +623,8 @@ int drm_fence_destroy_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - mutex_lock(&dev->struct_mutex); - uo = drm_lookup_user_object(file_priv, arg.handle); + uo = drm_lookup_user_object(file_priv, arg->handle); if (!uo || (uo->type != drm_fence_type) || uo->owner != file_priv) { mutex_unlock(&dev->struct_mutex); return -EINVAL; @@ -641,12 +635,11 @@ int drm_fence_destroy_ioctl(DRM_IOCTL_ARGS) } -int drm_fence_reference_ioctl(DRM_IOCTL_ARGS) +int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; int ret; struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_arg arg; + struct drm_fence_arg *arg = data; struct drm_fence_object *fence; struct drm_user_object *uo; unsigned long flags; @@ -657,30 +650,27 @@ int drm_fence_reference_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - ret = drm_user_object_ref(file_priv, arg.handle, drm_fence_type, &uo); + ret = drm_user_object_ref(file_priv, arg->handle, drm_fence_type, &uo); if (ret) return ret; - fence = drm_lookup_fence_object(file_priv, arg.handle); + fence = drm_lookup_fence_object(file_priv, arg->handle); read_lock_irqsave(&fm->lock, flags); - arg.class = fence->class; - arg.type = fence->type; - arg.signaled = fence->signaled; + arg->class = fence->class; + arg->type = fence->type; + arg->signaled = fence->signaled; read_unlock_irqrestore(&fm->lock, flags); drm_fence_usage_deref_unlocked(&fence); - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return ret; } -int drm_fence_unreference_ioctl(DRM_IOCTL_ARGS) +int drm_fence_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; int ret; struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_arg arg; + struct drm_fence_arg *arg = data; ret = 0; if (!fm->initialized) { @@ -688,16 +678,14 @@ int drm_fence_unreference_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - return drm_user_object_unref(file_priv, arg.handle, drm_fence_type); + return drm_user_object_unref(file_priv, arg->handle, drm_fence_type); } -int drm_fence_signaled_ioctl(DRM_IOCTL_ARGS) +int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; int ret; struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_arg arg; + struct drm_fence_arg *arg = data; struct drm_fence_object *fence; unsigned long flags; ret = 0; @@ -707,29 +695,25 @@ int drm_fence_signaled_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - - fence = drm_lookup_fence_object(file_priv, arg.handle); + fence = drm_lookup_fence_object(file_priv, arg->handle); if (!fence) return -EINVAL; read_lock_irqsave(&fm->lock, flags); - arg.class = fence->class; - arg.type = fence->type; - arg.signaled = fence->signaled; + arg->class = fence->class; + arg->type = fence->type; + arg->signaled = fence->signaled; read_unlock_irqrestore(&fm->lock, flags); drm_fence_usage_deref_unlocked(&fence); - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return ret; } -int drm_fence_flush_ioctl(DRM_IOCTL_ARGS) +int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; int ret; struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_arg arg; + struct drm_fence_arg *arg = data; struct drm_fence_object *fence; unsigned long flags; ret = 0; @@ -739,31 +723,27 @@ int drm_fence_flush_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - - fence = drm_lookup_fence_object(file_priv, arg.handle); + fence = drm_lookup_fence_object(file_priv, arg->handle); if (!fence) return -EINVAL; - ret = drm_fence_object_flush(fence, arg.type); + ret = drm_fence_object_flush(fence, arg->type); read_lock_irqsave(&fm->lock, flags); - arg.class = fence->class; - arg.type = fence->type; - arg.signaled = fence->signaled; + arg->class = fence->class; + arg->type = fence->type; + arg->signaled = fence->signaled; read_unlock_irqrestore(&fm->lock, flags); drm_fence_usage_deref_unlocked(&fence); - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return ret; } -int drm_fence_wait_ioctl(DRM_IOCTL_ARGS) +int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; int ret; struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_arg arg; + struct drm_fence_arg *arg = data; struct drm_fence_object *fence; unsigned long flags; ret = 0; @@ -773,33 +753,29 @@ int drm_fence_wait_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - - fence = drm_lookup_fence_object(file_priv, arg.handle); + fence = drm_lookup_fence_object(file_priv, arg->handle); if (!fence) return -EINVAL; ret = drm_fence_object_wait(fence, - arg.flags & DRM_FENCE_FLAG_WAIT_LAZY, - 0, arg.type); + arg->flags & DRM_FENCE_FLAG_WAIT_LAZY, + 0, arg->type); read_lock_irqsave(&fm->lock, flags); - arg.class = fence->class; - arg.type = fence->type; - arg.signaled = fence->signaled; + arg->class = fence->class; + arg->type = fence->type; + arg->signaled = fence->signaled; read_unlock_irqrestore(&fm->lock, flags); drm_fence_usage_deref_unlocked(&fence); - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return ret; } -int drm_fence_emit_ioctl(DRM_IOCTL_ARGS) +int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; int ret; struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_arg arg; + struct drm_fence_arg *arg = data; struct drm_fence_object *fence; unsigned long flags; ret = 0; @@ -809,32 +785,28 @@ int drm_fence_emit_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - LOCK_TEST_WITH_RETURN(dev, file_priv); - fence = drm_lookup_fence_object(file_priv, arg.handle); + fence = drm_lookup_fence_object(file_priv, arg->handle); if (!fence) return -EINVAL; - ret = drm_fence_object_emit(fence, arg.flags, arg.class, - arg.type); + ret = drm_fence_object_emit(fence, arg->flags, arg->class, + arg->type); read_lock_irqsave(&fm->lock, flags); - arg.class = fence->class; - arg.type = fence->type; - arg.signaled = fence->signaled; + arg->class = fence->class; + arg->type = fence->type; + arg->signaled = fence->signaled; read_unlock_irqrestore(&fm->lock, flags); drm_fence_usage_deref_unlocked(&fence); - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return ret; } -int drm_fence_buffers_ioctl(DRM_IOCTL_ARGS) +int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; int ret; struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_arg arg; + struct drm_fence_arg *arg = data; struct drm_fence_object *fence; unsigned long flags; ret = 0; @@ -844,32 +816,29 @@ int drm_fence_buffers_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized\n"); return -EINVAL; } LOCK_TEST_WITH_RETURN(dev, file_priv); - ret = drm_fence_buffer_objects(file_priv, NULL, arg.flags, + ret = drm_fence_buffer_objects(file_priv, NULL, arg->flags, NULL, &fence); if (ret) return ret; ret = drm_fence_add_user_object(file_priv, fence, - arg.flags & + arg->flags & DRM_FENCE_FLAG_SHAREABLE); if (ret) return ret; atomic_inc(&fence->usage); - arg.handle = fence->base.hash.key; + arg->handle = fence->base.hash.key; read_lock_irqsave(&fm->lock, flags); - arg.class = fence->class; - arg.type = fence->type; - arg.signaled = fence->signaled; + arg->class = fence->class; + arg->type = fence->type; + arg->signaled = fence->signaled; read_unlock_irqrestore(&fm->lock, flags); drm_fence_usage_deref_unlocked(&fence); - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return ret; } diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c index 0162f113..251ee5b5 100644 --- a/linux-core/drm_fops.c +++ b/linux-core/drm_fops.c @@ -407,7 +407,7 @@ int drm_release(struct inode *inode, struct file *filp) dev->open_count); if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) { - if (drm_i_have_hw_lock(file_priv)) { + if (drm_i_have_hw_lock(dev, file_priv)) { dev->driver->reclaim_buffers_locked(dev, file_priv); } else { unsigned long _end=jiffies + 3*DRM_HZ; @@ -447,7 +447,7 @@ int drm_release(struct inode *inode, struct file *filp) } - if (drm_i_have_hw_lock(file_priv)) { + if (drm_i_have_hw_lock(dev, file_priv)) { DRM_DEBUG("File %p released, freeing lock for context %d\n", filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); diff --git a/linux-core/drm_ioctl.c b/linux-core/drm_ioctl.c index 6f0ef149..717e23c0 100644 --- a/linux-core/drm_ioctl.c +++ b/linux-core/drm_ioctl.c @@ -49,22 +49,17 @@ * * Copies the bus id from drm_device::unique into user space. */ -int drm_getunique(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_getunique(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_unique __user *argp = (void __user *)arg; - struct drm_unique u; + struct drm_unique *u = data; - if (copy_from_user(&u, argp, sizeof(u))) - return -EFAULT; - if (u.unique_len >= dev->unique_len) { - if (copy_to_user(u.unique, dev->unique, dev->unique_len)) + if (u->unique_len >= dev->unique_len) { + if (copy_to_user(u->unique, dev->unique, dev->unique_len)) return -EFAULT; } - u.unique_len = dev->unique_len; - if (copy_to_user(argp, &u, sizeof(u))) - return -EFAULT; + u->unique_len = dev->unique_len; + return 0; } @@ -82,27 +77,23 @@ int drm_getunique(struct inode *inode, struct drm_file *file_priv, * in interface version 1.1 and will return EBUSY when setversion has requested * version 1.1 or greater. */ -int drm_setunique(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_setunique(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_unique u; + struct drm_unique *u = data; int domain, bus, slot, func, ret; if (dev->unique_len || dev->unique) return -EBUSY; - if (copy_from_user(&u, (struct drm_unique __user *) arg, sizeof(u))) - return -EFAULT; - - if (!u.unique_len || u.unique_len > 1024) + if (!u->unique_len || u->unique_len > 1024) return -EINVAL; - dev->unique_len = u.unique_len; - dev->unique = drm_alloc(u.unique_len + 1, DRM_MEM_DRIVER); + dev->unique_len = u->unique_len; + dev->unique = drm_alloc(u->unique_len + 1, DRM_MEM_DRIVER); if (!dev->unique) return -ENOMEM; - if (copy_from_user(dev->unique, u.unique, dev->unique_len)) + if (copy_from_user(dev->unique, u->unique, dev->unique_len)) return -EFAULT; dev->unique[dev->unique_len] = '\0'; @@ -174,20 +165,16 @@ static int drm_set_busid(struct drm_device * dev) * Searches for the mapping with the specified offset and copies its information * into userspace */ -int drm_getmap(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_getmap(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_map __user *argp = (void __user *)arg; - struct drm_map map; + struct drm_map *map = data; struct drm_map_list *r_list = NULL; struct list_head *list; int idx; int i; - if (copy_from_user(&map, argp, sizeof(map))) - return -EFAULT; - idx = map.offset; + idx = map->offset; mutex_lock(&dev->struct_mutex); if (idx < 0) { @@ -208,16 +195,14 @@ int drm_getmap(struct inode *inode, struct drm_file *file_priv, return -EINVAL; } - map.offset = r_list->map->offset; - map.size = r_list->map->size; - map.type = r_list->map->type; - map.flags = r_list->map->flags; - map.handle = (void *)(unsigned long) r_list->user_token; - map.mtrr = r_list->map->mtrr; + map->offset = r_list->map->offset; + map->size = r_list->map->size; + map->type = r_list->map->type; + map->flags = r_list->map->flags; + map->handle = (void *)(unsigned long) r_list->user_token; + map->mtrr = r_list->map->mtrr; mutex_unlock(&dev->struct_mutex); - if (copy_to_user(argp, &map, sizeof(map))) - return -EFAULT; return 0; } @@ -234,19 +219,15 @@ int drm_getmap(struct inode *inode, struct drm_file *file_priv, * Searches for the client with the specified index and copies its information * into userspace */ -int drm_getclient(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_getclient(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_client __user *argp = (struct drm_client __user *)arg; - struct drm_client client; + struct drm_client *client = data; struct drm_file *pt; int idx; int i; - if (copy_from_user(&client, argp, sizeof(client))) - return -EFAULT; - idx = client.idx; + idx = client->idx; mutex_lock(&dev->struct_mutex); if (list_empty(&dev->filelist)) { @@ -260,15 +241,13 @@ int drm_getclient(struct inode *inode, struct drm_file *file_priv, break; } - client.auth = pt->authenticated; - client.pid = pt->pid; - client.uid = pt->uid; - client.magic = pt->magic; - client.iocs = pt->ioctl_count; + client->auth = pt->authenticated; + client->pid = pt->pid; + client->uid = pt->uid; + client->magic = pt->magic; + client->iocs = pt->ioctl_count; mutex_unlock(&dev->struct_mutex); - if (copy_to_user(argp, &client, sizeof(client))) - return -EFAULT; return 0; } @@ -282,32 +261,29 @@ int drm_getclient(struct inode *inode, struct drm_file *file_priv, * * \return zero on success or a negative number on failure. */ -int drm_getstats(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_getstats(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_stats stats; + struct drm_stats *stats = data; int i; - memset(&stats, 0, sizeof(stats)); + memset(stats, 0, sizeof(stats)); mutex_lock(&dev->struct_mutex); for (i = 0; i < dev->counters; i++) { if (dev->types[i] == _DRM_STAT_LOCK) - stats.data[i].value - = (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0); + stats->data[i].value = + (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0); else - stats.data[i].value = atomic_read(&dev->counts[i]); - stats.data[i].type = dev->types[i]; + stats->data[i].value = atomic_read(&dev->counts[i]); + stats->data[i].type = dev->types[i]; } - stats.count = dev->counters; + stats->count = dev->counters; mutex_unlock(&dev->struct_mutex); - if (copy_to_user((struct drm_stats __user *) arg, &stats, sizeof(stats))) - return -EFAULT; return 0; } @@ -322,32 +298,21 @@ int drm_getstats(struct inode *inode, struct drm_file *file_priv, * * Sets the requested interface version */ -int drm_setversion(DRM_IOCTL_ARGS) +int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - struct drm_set_version sv; - struct drm_set_version retv; - int if_version; - struct drm_set_version __user *argp = (void __user *)data; - - if (copy_from_user(&sv, argp, sizeof(sv))) - return -EFAULT; - - retv.drm_di_major = DRM_IF_MAJOR; - retv.drm_di_minor = DRM_IF_MINOR; - retv.drm_dd_major = dev->driver->major; - retv.drm_dd_minor = dev->driver->minor; - - if (copy_to_user(argp, &retv, sizeof(retv))) - return -EFAULT; - - if (sv.drm_di_major != -1) { - if (sv.drm_di_major != DRM_IF_MAJOR || - sv.drm_di_minor < 0 || sv.drm_di_minor > DRM_IF_MINOR) - return -EINVAL; - if_version = DRM_IF_VERSION(sv.drm_di_major, sv.drm_di_minor); + struct drm_set_version *sv = data; + int if_version, retcode; + + if (sv->drm_di_major != -1) { + if (sv->drm_di_major != DRM_IF_MAJOR || + sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) { + retcode = -EINVAL; + goto done; + } + if_version = DRM_IF_VERSION(sv->drm_di_major, + sv->drm_di_minor); dev->if_version = max(if_version, dev->if_version); - if (sv.drm_di_minor >= 1) { + if (sv->drm_di_minor >= 1) { /* * Version 1.1 includes tying of DRM to specific device */ @@ -355,20 +320,30 @@ int drm_setversion(DRM_IOCTL_ARGS) } } - if (sv.drm_dd_major != -1) { - if (sv.drm_dd_major != dev->driver->major || - sv.drm_dd_minor < 0 || sv.drm_dd_minor > dev->driver->minor) - return -EINVAL; + if (sv->drm_dd_major != -1) { + if (sv->drm_dd_major != dev->driver->major || + sv->drm_dd_minor < 0 || sv->drm_dd_minor > + dev->driver->minor) { + retcode = -EINVAL; + goto done; + } if (dev->driver->set_version) - dev->driver->set_version(dev, &sv); + dev->driver->set_version(dev, sv); } - return 0; + +done: + sv->drm_di_major = DRM_IF_MAJOR; + sv->drm_di_minor = DRM_IF_MINOR; + sv->drm_dd_major = dev->driver->major; + sv->drm_dd_minor = dev->driver->minor; + + return retcode; } /** No-op ioctl. */ -int drm_noop(struct inode *inode, struct drm_file *file_priv, unsigned int cmd, - unsigned long arg) +int drm_noop(struct drm_device *dev, void *data, + struct drm_file *file_priv) { DRM_DEBUG("\n"); return 0; diff --git a/linux-core/drm_irq.c b/linux-core/drm_irq.c index 36df557b..fe4316e0 100644 --- a/linux-core/drm_irq.c +++ b/linux-core/drm_irq.c @@ -50,29 +50,24 @@ * This IOCTL is deprecated, and will now return EINVAL for any busid not equal * to that of the device that this DRM instance attached to. */ -int drm_irq_by_busid(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_irq_by_busid(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_irq_busid __user *argp = (void __user *)arg; - struct drm_irq_busid p; + struct drm_irq_busid *p = data; if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) return -EINVAL; - if (copy_from_user(&p, argp, sizeof(p))) - return -EFAULT; - - if ((p.busnum >> 8) != drm_get_pci_domain(dev) || - (p.busnum & 0xff) != dev->pdev->bus->number || - p.devnum != PCI_SLOT(dev->pdev->devfn) || p.funcnum != PCI_FUNC(dev->pdev->devfn)) + if ((p->busnum >> 8) != drm_get_pci_domain(dev) || + (p->busnum & 0xff) != dev->pdev->bus->number || + p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn)) return -EINVAL; - p.irq = dev->irq; + p->irq = dev->irq; + + DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum, + p->irq); - DRM_DEBUG("%d:%d:%d => IRQ %d\n", p.busnum, p.devnum, p.funcnum, p.irq); - if (copy_to_user(argp, &p, sizeof(p))) - return -EFAULT; return 0; } @@ -191,23 +186,20 @@ EXPORT_SYMBOL(drm_irq_uninstall); * * Calls irq_install() or irq_uninstall() according to \p arg. */ -int drm_control(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_control(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_control ctl; + struct drm_control *ctl = data; /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */ - if (copy_from_user(&ctl, (struct drm_control __user *) arg, sizeof(ctl))) - return -EFAULT; - switch (ctl.func) { + switch (ctl->func) { case DRM_INST_HANDLER: if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) return 0; if (dev->if_version < DRM_IF_VERSION(1, 2) && - ctl.irq != dev->irq) + ctl->irq != dev->irq) return -EINVAL; return drm_irq_install(dev); case DRM_UNINST_HANDLER: @@ -238,11 +230,9 @@ int drm_control(struct inode *inode, struct drm_file *file_priv, * * If a signal is not requested, then calls vblank_wait(). */ -int drm_wait_vblank(DRM_IOCTL_ARGS) +int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - union drm_wait_vblank __user *argp = (void __user *)data; - union drm_wait_vblank vblwait; + union drm_wait_vblank *vblwait = data; struct timeval now; int ret = 0; unsigned int flags, seq; @@ -250,18 +240,15 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) if ((!dev->irq) || (!dev->irq_enabled)) return -EINVAL; - if (copy_from_user(&vblwait, argp, sizeof(vblwait))) - return -EFAULT; - - if (vblwait.request.type & + if (vblwait->request.type & ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) { DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n", - vblwait.request.type, + vblwait->request.type, (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)); return -EINVAL; } - flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK; + flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK; if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ? DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL)) @@ -270,10 +257,10 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2 : &dev->vbl_received); - switch (vblwait.request.type & _DRM_VBLANK_TYPES_MASK) { + switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) { case _DRM_VBLANK_RELATIVE: - vblwait.request.sequence += seq; - vblwait.request.type &= ~_DRM_VBLANK_RELATIVE; + vblwait->request.sequence += seq; + vblwait->request.type &= ~_DRM_VBLANK_RELATIVE; case _DRM_VBLANK_ABSOLUTE: break; default: @@ -281,8 +268,8 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) } if ((flags & _DRM_VBLANK_NEXTONMISS) && - (seq - vblwait.request.sequence) <= (1<<23)) { - vblwait.request.sequence = seq + 1; + (seq - vblwait->request.sequence) <= (1<<23)) { + vblwait->request.sequence = seq + 1; } if (flags & _DRM_VBLANK_SIGNAL) { @@ -298,12 +285,13 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) * that case */ list_for_each_entry(vbl_sig, vbl_sigs, head) { - if (vbl_sig->sequence == vblwait.request.sequence - && vbl_sig->info.si_signo == vblwait.request.signal + if (vbl_sig->sequence == vblwait->request.sequence + && vbl_sig->info.si_signo == + vblwait->request.signal && vbl_sig->task == current) { spin_unlock_irqrestore(&dev->vbl_lock, irqflags); - vblwait.reply.sequence = seq; + vblwait->reply.sequence = seq; goto done; } } @@ -325,8 +313,8 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) memset((void *)vbl_sig, 0, sizeof(*vbl_sig)); - vbl_sig->sequence = vblwait.request.sequence; - vbl_sig->info.si_signo = vblwait.request.signal; + vbl_sig->sequence = vblwait->request.sequence; + vbl_sig->info.si_signo = vblwait->request.signal; vbl_sig->task = current; spin_lock_irqsave(&dev->vbl_lock, irqflags); @@ -335,25 +323,22 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) spin_unlock_irqrestore(&dev->vbl_lock, irqflags); - vblwait.reply.sequence = seq; + vblwait->reply.sequence = seq; } else { if (flags & _DRM_VBLANK_SECONDARY) { if (dev->driver->vblank_wait2) - ret = dev->driver->vblank_wait2(dev, &vblwait.request.sequence); + ret = dev->driver->vblank_wait2(dev, &vblwait->request.sequence); } else if (dev->driver->vblank_wait) ret = dev->driver->vblank_wait(dev, - &vblwait.request.sequence); + &vblwait->request.sequence); do_gettimeofday(&now); - vblwait.reply.tval_sec = now.tv_sec; - vblwait.reply.tval_usec = now.tv_usec; + vblwait->reply.tval_sec = now.tv_sec; + vblwait->reply.tval_usec = now.tv_usec; } done: - if (copy_to_user(argp, &vblwait, sizeof(vblwait))) - return -EFAULT; - return ret; } diff --git a/linux-core/drm_lock.c b/linux-core/drm_lock.c index 54e34e14..b8e4a5d9 100644 --- a/linux-core/drm_lock.c +++ b/linux-core/drm_lock.c @@ -48,31 +48,26 @@ static int drm_notifier(void *priv); * * Add the current task to the lock wait queue, and attempt to take to lock. */ -int drm_lock(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; DECLARE_WAITQUEUE(entry, current); - struct drm_lock lock; + struct drm_lock *lock = data; int ret = 0; ++file_priv->lock_count; - if (copy_from_user(&lock, (struct drm_lock __user *) arg, sizeof(lock))) - return -EFAULT; - - if (lock.context == DRM_KERNEL_CONTEXT) { + if (lock->context == DRM_KERNEL_CONTEXT) { DRM_ERROR("Process %d using kernel context %d\n", - current->pid, lock.context); + current->pid, lock->context); return -EINVAL; } DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", - lock.context, current->pid, - dev->lock.hw_lock->lock, lock.flags); + lock->context, current->pid, + dev->lock.hw_lock->lock, lock->flags); if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)) - if (lock.context < 0) + if (lock->context < 0) return -EINVAL; add_wait_queue(&dev->lock.lock_queue, &entry); @@ -86,7 +81,7 @@ int drm_lock(struct inode *inode, struct drm_file *file_priv, ret = -EINTR; break; } - if (drm_lock_take(&dev->lock, lock.context)) { + if (drm_lock_take(&dev->lock, lock->context)) { dev->lock.file_priv = file_priv; dev->lock.lock_time = jiffies; atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); @@ -106,7 +101,8 @@ int drm_lock(struct inode *inode, struct drm_file *file_priv, __set_current_state(TASK_RUNNING); remove_wait_queue(&dev->lock.lock_queue, &entry); - DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" ); + DRM_DEBUG("%d %s\n", lock->context, + ret ? "interrupted" : "has lock"); if (ret) return ret; sigemptyset(&dev->sigmask); @@ -114,24 +110,26 @@ int drm_lock(struct inode *inode, struct drm_file *file_priv, sigaddset(&dev->sigmask, SIGTSTP); sigaddset(&dev->sigmask, SIGTTIN); sigaddset(&dev->sigmask, SIGTTOU); - dev->sigdata.context = lock.context; + dev->sigdata.context = lock->context; dev->sigdata.lock = dev->lock.hw_lock; block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask); - if (dev->driver->dma_ready && (lock.flags & _DRM_LOCK_READY)) + if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY)) dev->driver->dma_ready(dev); - if (dev->driver->dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT)) { + if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT)) + { if (dev->driver->dma_quiescent(dev)) { - DRM_DEBUG( "%d waiting for DMA quiescent\n", lock.context); + DRM_DEBUG("%d waiting for DMA quiescent\n", + lock->context); return -EBUSY; } } if (dev->driver->kernel_context_switch && - dev->last_context != lock.context) { + dev->last_context != lock->context) { dev->driver->kernel_context_switch(dev, dev->last_context, - lock.context); + lock->context); } return 0; @@ -148,19 +146,14 @@ int drm_lock(struct inode *inode, struct drm_file *file_priv, * * Transfer and free the lock. */ -int drm_unlock(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_lock lock; + struct drm_lock *lock = data; unsigned long irqflags; - if (copy_from_user(&lock, (struct drm_lock __user *) arg, sizeof(lock))) - return -EFAULT; - - if (lock.context == DRM_KERNEL_CONTEXT) { + if (lock->context == DRM_KERNEL_CONTEXT) { DRM_ERROR("Process %d using kernel context %d\n", - current->pid, lock.context); + current->pid, lock->context); return -EINVAL; } @@ -182,7 +175,7 @@ int drm_unlock(struct inode *inode, struct drm_file *file_priv, if (dev->driver->kernel_context_switch_unlock) dev->driver->kernel_context_switch_unlock(dev); else { - if (drm_lock_free(&dev->lock,lock.context)) { + if (drm_lock_free(&dev->lock,lock->context)) { /* FIXME: Should really bail out here. */ } } @@ -389,9 +382,8 @@ void drm_idlelock_release(struct drm_lock_data *lock_data) EXPORT_SYMBOL(drm_idlelock_release); -int drm_i_have_hw_lock(struct drm_file *file_priv) +int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv) { - DRM_DEVICE; return (file_priv->lock_count && dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && diff --git a/linux-core/drm_memory_debug.c b/linux-core/drm_memory_debug.c index c124f8f8..c196ee2b 100644 --- a/linux-core/drm_memory_debug.c +++ b/linux-core/drm_memory_debug.c @@ -291,7 +291,7 @@ void drm_free_pages(unsigned long address, int order, int area) #if __OS_HAS_AGP -DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type) +DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type) { DRM_AGP_MEM *handle; diff --git a/linux-core/drm_memory_debug.h b/linux-core/drm_memory_debug.h index 9d0dedfb..b055ac00 100644 --- a/linux-core/drm_memory_debug.h +++ b/linux-core/drm_memory_debug.h @@ -277,7 +277,7 @@ void drm_free_pages (unsigned long address, int order, int area) { #if __OS_HAS_AGP -DRM_AGP_MEM *drm_alloc_agp (drm_device_t *dev, int pages, u32 type) { +DRM_AGP_MEM *drm_alloc_agp (struct drm_device *dev, int pages, u32 type) { DRM_AGP_MEM *handle; if (!pages) { diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index f792dc84..e5f2b69c 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -213,15 +213,24 @@ extern int drm_fence_object_create(struct drm_device *dev, uint32_t type, extern int drm_fence_add_user_object(struct drm_file * priv, struct drm_fence_object * fence, int shareable); -extern int drm_fence_create_ioctl(DRM_IOCTL_ARGS); -extern int drm_fence_destroy_ioctl(DRM_IOCTL_ARGS); -extern int drm_fence_reference_ioctl(DRM_IOCTL_ARGS); -extern int drm_fence_unreference_ioctl(DRM_IOCTL_ARGS); -extern int drm_fence_signaled_ioctl(DRM_IOCTL_ARGS); -extern int drm_fence_flush_ioctl(DRM_IOCTL_ARGS); -extern int drm_fence_wait_ioctl(DRM_IOCTL_ARGS); -extern int drm_fence_emit_ioctl(DRM_IOCTL_ARGS); -extern int drm_fence_buffers_ioctl(DRM_IOCTL_ARGS); +extern int drm_fence_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_fence_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_fence_reference_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_fence_unreference_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_fence_flush_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_fence_wait_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_fence_emit_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); /************************************************** *TTMs */ @@ -437,21 +446,21 @@ struct drm_bo_driver { * buffer objects (drm_bo.c) */ -extern int drm_bo_create_ioctl(DRM_IOCTL_ARGS); -extern int drm_bo_destroy_ioctl(DRM_IOCTL_ARGS); -extern int drm_bo_map_ioctl(DRM_IOCTL_ARGS); -extern int drm_bo_unmap_ioctl(DRM_IOCTL_ARGS); -extern int drm_bo_reference_ioctl(DRM_IOCTL_ARGS); -extern int drm_bo_unreference_ioctl(DRM_IOCTL_ARGS); -extern int drm_bo_wait_idle_ioctl(DRM_IOCTL_ARGS); -extern int drm_bo_info_ioctl(DRM_IOCTL_ARGS); -extern int drm_bo_op_ioctl(DRM_IOCTL_ARGS); - - -extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS); -extern int drm_mm_takedown_ioctl(DRM_IOCTL_ARGS); -extern int drm_mm_lock_ioctl(DRM_IOCTL_ARGS); -extern int drm_mm_unlock_ioctl(DRM_IOCTL_ARGS); +extern int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); + + +extern int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_bo_driver_finish(struct drm_device *dev); extern int drm_bo_driver_init(struct drm_device *dev); extern int drm_bo_pci_offset(struct drm_device *dev, diff --git a/linux-core/drm_os_linux.h b/linux-core/drm_os_linux.h index 3f143833..2688479a 100644 --- a/linux-core/drm_os_linux.h +++ b/linux-core/drm_os_linux.h @@ -6,8 +6,6 @@ #include /* For task queue support */ #include -/** Ioctl arguments */ -#define DRM_IOCTL_ARGS struct inode *inode, struct drm_file *file_priv, unsigned int cmd, unsigned long data /** Current process ID */ #define DRM_CURRENTPID current->pid #define DRM_SUSER(p) capable(CAP_SYS_ADMIN) @@ -48,8 +46,6 @@ #define DRM_WRITEMEMORYBARRIER() wmb() /** Read/write memory barrier */ #define DRM_MEMORYBARRIER() mb() -/** DRM device local declaration */ -#define DRM_DEVICE struct drm_device *dev = file_priv->head->dev /** IRQ handler arguments and return type and values */ #define DRM_IRQ_ARGS int irq, void *arg @@ -89,14 +85,6 @@ static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size) #define MTRR_TYPE_WRCOMB 1 #endif -/** For data going into the kernel through the ioctl argument */ -#define DRM_COPY_FROM_USER_IOCTL(arg1, arg2, arg3) \ - if ( copy_from_user(&arg1, arg2, arg3) ) \ - return -EFAULT -/** For data going from the kernel through the ioctl argument */ -#define DRM_COPY_TO_USER_IOCTL(arg1, arg2, arg3) \ - if ( copy_to_user(arg1, &arg2, arg3) ) \ - return -EFAULT /** Other copying of data to kernel space */ #define DRM_COPY_FROM_USER(arg1, arg2, arg3) \ copy_from_user(arg1, arg2, arg3) diff --git a/linux-core/drm_scatter.c b/linux-core/drm_scatter.c index 58696347..3c0f672e 100644 --- a/linux-core/drm_scatter.c +++ b/linux-core/drm_scatter.c @@ -187,49 +187,28 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request) } EXPORT_SYMBOL(drm_sg_alloc); -int drm_sg_alloc_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_sg_alloc_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_scatter_gather __user *argp = (void __user *)arg; - struct drm_scatter_gather request; - int ret; + struct drm_scatter_gather *request = data; - if (copy_from_user(&request, argp, sizeof(request))) - return -EFAULT; - - ret = drm_sg_alloc(dev, &request); - if ( ret ) return ret; - - if (copy_to_user(argp, &request, sizeof(request))) { - drm_sg_cleanup(dev->sg); - return -EFAULT; - } - - - return 0; + return drm_sg_alloc(dev, request); } -int drm_sg_free(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +int drm_sg_free(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - struct drm_scatter_gather request; + struct drm_scatter_gather *request = data; struct drm_sg_mem *entry; if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL; - if (copy_from_user(&request, - (struct drm_scatter_gather __user *) arg, - sizeof(request))) - return -EFAULT; - entry = dev->sg; dev->sg = NULL; - if (!entry || entry->handle != request.handle) + if (!entry || entry->handle != request->handle) return -EINVAL; DRM_DEBUG("sg free virtual = %p\n", entry->virtual); diff --git a/linux-core/ffb_context.c b/linux-core/ffb_context.c index e6ae60c3..586c3503 100644 --- a/linux-core/ffb_context.c +++ b/linux-core/ffb_context.c @@ -13,7 +13,7 @@ #include "drmP.h" #include "ffb_drv.h" -static int ffb_alloc_queue(drm_device_t * dev, int is_2d_only) { +static int ffb_alloc_queue(struct drm_device * dev, int is_2d_only) { ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; int i; @@ -351,7 +351,7 @@ static void FFBWait(ffb_fbcPtr ffb) } while (--limit); } -int ffb_context_switch(drm_device_t * dev, int old, int new) { +int ffb_context_switch(struct drm_device * dev, int old, int new) { ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; #if DRM_DMA_HISTOGRAM @@ -401,7 +401,7 @@ int ffb_resctx(struct inode * inode, struct file * filp, unsigned int cmd, int ffb_addctx(struct inode * inode, struct file * filp, unsigned int cmd, unsigned long arg) { drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; + struct drm_device *dev = priv->dev; drm_ctx_t ctx; int idx; @@ -421,7 +421,7 @@ int ffb_addctx(struct inode * inode, struct file * filp, unsigned int cmd, int ffb_modctx(struct inode * inode, struct file * filp, unsigned int cmd, unsigned long arg) { drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; + struct drm_device *dev = priv->dev; ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; struct ffb_hw_context *hwctx; drm_ctx_t ctx; @@ -449,7 +449,7 @@ int ffb_modctx(struct inode * inode, struct file * filp, unsigned int cmd, int ffb_getctx(struct inode * inode, struct file * filp, unsigned int cmd, unsigned long arg) { drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; + struct drm_device *dev = priv->dev; ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; struct ffb_hw_context *hwctx; drm_ctx_t ctx; @@ -480,7 +480,7 @@ int ffb_getctx(struct inode * inode, struct file * filp, unsigned int cmd, int ffb_switchctx(struct inode * inode, struct file * filp, unsigned int cmd, unsigned long arg) { drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; + struct drm_device *dev = priv->dev; drm_ctx_t ctx; if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) @@ -504,7 +504,7 @@ int ffb_rmctx(struct inode * inode, struct file * filp, unsigned int cmd, unsigned long arg) { drm_ctx_t ctx; drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; + struct drm_device *dev = priv->dev; ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; int idx; @@ -523,7 +523,7 @@ int ffb_rmctx(struct inode * inode, struct file * filp, unsigned int cmd, return 0; } -static void ffb_driver_reclaim_buffers_locked(drm_device_t * dev) +static void ffb_driver_reclaim_buffers_locked(struct drm_device * dev) { ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; int context = _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock); @@ -537,13 +537,13 @@ static void ffb_driver_reclaim_buffers_locked(drm_device_t * dev) } } -static void ffb_driver_lastclose(drm_device_t * dev) +static void ffb_driver_lastclose(struct drm_device * dev) { if (dev->dev_private) kfree(dev->dev_private); } -static void ffb_driver_unload(drm_device_t * dev) +static void ffb_driver_unload(struct drm_device * dev) { if (ffb_position != NULL) kfree(ffb_position); @@ -571,7 +571,7 @@ unsigned long ffb_driver_get_map_ofs(drm_map_t * map) return (map->offset & 0xffffffff); } -unsigned long ffb_driver_get_reg_ofs(drm_device_t * dev) +unsigned long ffb_driver_get_reg_ofs(struct drm_device * dev) { ffb_dev_priv_t *ffb_priv = (ffb_dev_priv_t *) dev->dev_private; diff --git a/linux-core/ffb_drv.c b/linux-core/ffb_drv.c index 9c88f061..f2b4cc7f 100644 --- a/linux-core/ffb_drv.c +++ b/linux-core/ffb_drv.c @@ -114,7 +114,7 @@ static void ffb_apply_upa_parent_ranges(int parent, return; } -static int ffb_init_one(drm_device_t *dev, int prom_node, int parent_node, +static int ffb_init_one(struct drm_device *dev, int prom_node, int parent_node, int instance) { struct linux_prom64_registers regs[2*PROMREG_MAX]; @@ -167,7 +167,7 @@ static int __init ffb_scan_siblings(int root, int instance) static drm_map_t *ffb_find_map(struct file *filp, unsigned long off) { drm_file_t *priv = filp->private_data; - drm_device_t *dev; + struct drm_device *dev; drm_map_list_t *r_list; struct list_head *list; drm_map_t *map; @@ -237,10 +237,10 @@ unsigned long ffb_get_unmapped_area(struct file *filp, /* This functions must be here since it references drm_numdevs) * which drm_drv.h declares. */ -static int ffb_driver_firstopen(drm_device_t *dev) +static int ffb_driver_firstopen(struct drm_device *dev) { ffb_dev_priv_t *ffb_priv; - drm_device_t *temp_dev; + struct drm_device *temp_dev; int ret = 0; int i; diff --git a/linux-core/ffb_drv.h b/linux-core/ffb_drv.h index f76b0d92..bad3c94d 100644 --- a/linux-core/ffb_drv.h +++ b/linux-core/ffb_drv.h @@ -281,4 +281,4 @@ extern unsigned long ffb_get_unmapped_area(struct file *filp, unsigned long pgoff, unsigned long flags); extern unsigned long ffb_driver_get_map_ofs(drm_map_t *map) -extern unsigned long ffb_driver_get_reg_ofs(drm_device_t *dev) +extern unsigned long ffb_driver_get_reg_ofs(struct drm_device *dev) diff --git a/linux-core/i810_dma.c b/linux-core/i810_dma.c index 1e74d792..7c37b4bb 100644 --- a/linux-core/i810_dma.c +++ b/linux-core/i810_dma.c @@ -448,98 +448,29 @@ static int i810_dma_initialize(struct drm_device * dev, return 0; } -/* i810 DRM version 1.1 used a smaller init structure with different - * ordering of values than is currently used (drm >= 1.2). There is - * no defined way to detect the XFree version to correct this problem, - * however by checking using this procedure we can detect the correct - * thing to do. - * - * #1 Read the Smaller init structure from user-space - * #2 Verify the overlay_physical is a valid physical address, or NULL - * If it isn't then we have a v1.1 client. Fix up params. - * If it is, then we have a 1.2 client... get the rest of the data. - */ -static int i810_dma_init_compat(drm_i810_init_t * init, unsigned long arg) +static int i810_dma_init(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - - /* Get v1.1 init data */ - if (copy_from_user(init, (drm_i810_pre12_init_t __user *) arg, - sizeof(drm_i810_pre12_init_t))) { - return -EFAULT; - } - - if ((!init->overlay_physical) || (init->overlay_physical > 4096)) { - - /* This is a v1.2 client, just get the v1.2 init data */ - DRM_INFO("Using POST v1.2 init.\n"); - if (copy_from_user(init, (drm_i810_init_t __user *) arg, - sizeof(drm_i810_init_t))) { - return -EFAULT; - } - } else { - - /* This is a v1.1 client, fix the params */ - DRM_INFO("Using PRE v1.2 init.\n"); - init->pitch_bits = init->h; - init->pitch = init->w; - init->h = init->overlay_physical; - init->w = init->overlay_offset; - init->overlay_physical = 0; - init->overlay_offset = 0; - } - - return 0; -} - -static int i810_dma_init(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) -{ - struct drm_device *dev = file_priv->head->dev; drm_i810_private_t *dev_priv; - drm_i810_init_t init; + drm_i810_init_t *init = data; int retcode = 0; - /* Get only the init func */ - if (copy_from_user - (&init, (void __user *)arg, sizeof(drm_i810_init_func_t))) - return -EFAULT; - - switch (init.func) { - case I810_INIT_DMA: - /* This case is for backward compatibility. It - * handles XFree 4.1.0 and 4.2.0, and has to - * do some parameter checking as described below. - * It will someday go away. - */ - retcode = i810_dma_init_compat(&init, arg); - if (retcode) - return retcode; - - dev_priv = drm_alloc(sizeof(drm_i810_private_t), - DRM_MEM_DRIVER); - if (dev_priv == NULL) - return -ENOMEM; - retcode = i810_dma_initialize(dev, dev_priv, &init); - break; - - default: + switch (init->func) { case I810_INIT_DMA_1_4: DRM_INFO("Using v1.4 init.\n"); - if (copy_from_user(&init, (drm_i810_init_t __user *) arg, - sizeof(drm_i810_init_t))) { - return -EFAULT; - } dev_priv = drm_alloc(sizeof(drm_i810_private_t), DRM_MEM_DRIVER); if (dev_priv == NULL) return -ENOMEM; - retcode = i810_dma_initialize(dev, dev_priv, &init); + retcode = i810_dma_initialize(dev, dev_priv, init); break; case I810_CLEANUP_DMA: DRM_INFO("DMA Cleanup\n"); retcode = i810_dma_cleanup(dev); break; + default: + return -EINVAL; } return retcode; @@ -1016,45 +947,38 @@ static void i810_reclaim_buffers(struct drm_device *dev, } } -static int i810_flush_ioctl(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +static int i810_flush_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - LOCK_TEST_WITH_RETURN(dev, file_priv); i810_flush_queue(dev); return 0; } -static int i810_dma_vertex(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +static int i810_dma_vertex(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; struct drm_device_dma *dma = dev->dma; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; u32 *hw_status = dev_priv->hw_status_page; drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) dev_priv->sarea_priv; - drm_i810_vertex_t vertex; - - if (copy_from_user - (&vertex, (drm_i810_vertex_t __user *) arg, sizeof(vertex))) - return -EFAULT; + drm_i810_vertex_t *vertex = data; LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n", - vertex.idx, vertex.used, vertex.discard); + vertex->idx, vertex->used, vertex->discard); - if (vertex.idx < 0 || vertex.idx > dma->buf_count) + if (vertex->idx < 0 || vertex->idx > dma->buf_count) return -EINVAL; i810_dma_dispatch_vertex(dev, - dma->buflist[vertex.idx], - vertex.discard, vertex.used); + dma->buflist[vertex->idx], + vertex->discard, vertex->used); - atomic_add(vertex.used, &dev->counts[_DRM_STAT_SECONDARY]); + atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]); atomic_inc(&dev->counts[_DRM_STAT_DMA]); sarea_priv->last_enqueue = dev_priv->counter - 1; sarea_priv->last_dispatch = (int)hw_status[5]; @@ -1062,15 +986,10 @@ static int i810_dma_vertex(struct inode *inode, struct drm_file *file_priv, return 0; } -static int i810_clear_bufs(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +static int i810_clear_bufs(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - drm_i810_clear_t clear; - - if (copy_from_user - (&clear, (drm_i810_clear_t __user *) arg, sizeof(clear))) - return -EFAULT; + drm_i810_clear_t *clear = data; LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -1079,16 +998,14 @@ static int i810_clear_bufs(struct inode *inode, struct drm_file *file_priv, return -EINVAL; } - i810_dma_dispatch_clear(dev, clear.flags, - clear.clear_color, clear.clear_depth); + i810_dma_dispatch_clear(dev, clear->flags, + clear->clear_color, clear->clear_depth); return 0; } -static int i810_swap_bufs(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +static int i810_swap_bufs(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; - DRM_DEBUG("i810_swap_bufs\n"); LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -1097,11 +1014,9 @@ static int i810_swap_bufs(struct inode *inode, struct drm_file *file_priv, return 0; } -static int i810_getage(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, - unsigned long arg) +static int i810_getage(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; u32 *hw_status = dev_priv->hw_status_page; drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) @@ -1111,45 +1026,39 @@ static int i810_getage(struct inode *inode, struct drm_file *file_priv, return 0; } -static int i810_getbuf(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +static int i810_getbuf(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; int retcode = 0; - drm_i810_dma_t d; + drm_i810_dma_t *d = data; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; u32 *hw_status = dev_priv->hw_status_page; drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) dev_priv->sarea_priv; - if (copy_from_user(&d, (drm_i810_dma_t __user *) arg, sizeof(d))) - return -EFAULT; - LOCK_TEST_WITH_RETURN(dev, file_priv); - d.granted = 0; + d->granted = 0; - retcode = i810_dma_get_buffer(dev, &d, file_priv); + retcode = i810_dma_get_buffer(dev, d, file_priv); DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n", - current->pid, retcode, d.granted); + current->pid, retcode, d->granted); - if (copy_to_user((void __user *) arg, &d, sizeof(d))) - return -EFAULT; sarea_priv->last_dispatch = (int)hw_status[5]; return retcode; } -static int i810_copybuf(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +static int i810_copybuf(struct drm_device *dev, void *data, + struct drm_file *file_priv) { /* Never copy - 2.4.x doesn't need it */ return 0; } -static int i810_docopy(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +static int i810_docopy(struct drm_device *dev, void *data, + struct drm_file *file_priv) { /* Never copy - 2.4.x doesn't need it */ return 0; @@ -1215,29 +1124,25 @@ static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf, ADVANCE_LP_RING(); } -static int i810_dma_mc(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +static int i810_dma_mc(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; struct drm_device_dma *dma = dev->dma; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; u32 *hw_status = dev_priv->hw_status_page; drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) dev_priv->sarea_priv; - drm_i810_mc_t mc; - - if (copy_from_user(&mc, (drm_i810_mc_t __user *) arg, sizeof(mc))) - return -EFAULT; + drm_i810_mc_t *mc = data; LOCK_TEST_WITH_RETURN(dev, file_priv); - if (mc.idx >= dma->buf_count || mc.idx < 0) + if (mc->idx >= dma->buf_count || mc->idx < 0) return -EINVAL; - i810_dma_dispatch_mc(dev, dma->buflist[mc.idx], mc.used, - mc.last_render); + i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used, + mc->last_render); - atomic_add(mc.used, &dev->counts[_DRM_STAT_SECONDARY]); + atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]); atomic_inc(&dev->counts[_DRM_STAT_DMA]); sarea_priv->last_enqueue = dev_priv->counter - 1; sarea_priv->last_dispatch = (int)hw_status[5]; @@ -1245,44 +1150,38 @@ static int i810_dma_mc(struct inode *inode, struct drm_file *file_priv, return 0; } -static int i810_rstatus(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +static int i810_rstatus(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; return (int)(((u32 *) (dev_priv->hw_status_page))[4]); } -static int i810_ov0_info(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +static int i810_ov0_info(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; - drm_i810_overlay_t data; + drm_i810_overlay_t *ov = data; + + ov->offset = dev_priv->overlay_offset; + ov->physical = dev_priv->overlay_physical; - data.offset = dev_priv->overlay_offset; - data.physical = dev_priv->overlay_physical; - if (copy_to_user - ((drm_i810_overlay_t __user *) arg, &data, sizeof(data))) - return -EFAULT; return 0; } -static int i810_fstatus(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +static int i810_fstatus(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; LOCK_TEST_WITH_RETURN(dev, file_priv); return I810_READ(0x30008); } -static int i810_ov0_flip(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +static int i810_ov0_flip(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -1316,10 +1215,9 @@ static int i810_do_cleanup_pageflip(struct drm_device * dev) return 0; } -static int i810_flip_bufs(struct inode *inode, struct drm_file *file_priv, - unsigned int cmd, unsigned long arg) +static int i810_flip_bufs(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - struct drm_device *dev = file_priv->head->dev; drm_i810_private_t *dev_priv = dev->dev_private; DRM_DEBUG("%s\n", __FUNCTION__); @@ -1373,21 +1271,21 @@ int i810_driver_dma_quiescent(struct drm_device * dev) } struct drm_ioctl_desc i810_ioctls[] = { - [DRM_IOCTL_NR(DRM_I810_INIT)] = {i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_I810_VERTEX)] = {i810_dma_vertex, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_CLEAR)] = {i810_clear_bufs, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_FLUSH)] = {i810_flush_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_GETAGE)] = {i810_getage, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_GETBUF)] = {i810_getbuf, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_SWAP)] = {i810_swap_bufs, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_COPY)] = {i810_copybuf, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_DOCOPY)] = {i810_docopy, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_OV0INFO)] = {i810_ov0_info, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_FSTATUS)] = {i810_fstatus, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_OV0FLIP)] = {i810_ov0_flip, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_MC)] = {i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_I810_RSTATUS)] = {i810_rstatus, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_FLIP)] = {i810_flip_bufs, DRM_AUTH} + DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_GETAGE, i810_getage, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_GETBUF, i810_getbuf, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_SWAP, i810_swap_bufs, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_COPY, i810_copybuf, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_DOCOPY, i810_docopy, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_OV0INFO, i810_ov0_info, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_FSTATUS, i810_fstatus, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_OV0FLIP, i810_ov0_flip, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_I810_RSTATUS, i810_rstatus, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_FLIP, i810_flip_bufs, DRM_AUTH) }; int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls); diff --git a/linux-core/i810_drm.h b/linux-core/i810_drm.h index db59550d..eff61b4d 100644 --- a/linux-core/i810_drm.h +++ b/linux-core/i810_drm.h @@ -124,29 +124,6 @@ typedef struct _drm_i810_init { unsigned int pitch_bits; } drm_i810_init_t; -/* This is the init structure prior to v1.2 */ -typedef struct _drm_i810_pre12_init { - drm_i810_init_func_t func; -#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0) - int ring_map_idx; - int buffer_map_idx; -#else - unsigned int mmio_offset; - unsigned int buffers_offset; -#endif - int sarea_priv_offset; - unsigned int ring_start; - unsigned int ring_end; - unsigned int ring_size; - unsigned int front_offset; - unsigned int back_offset; - unsigned int depth_offset; - unsigned int w; - unsigned int h; - unsigned int pitch; - unsigned int pitch_bits; -} drm_i810_pre12_init_t; - /* Warning: If you change the SAREA structure you must change the Xserver * structure as well */ diff --git a/linux-core/sis_mm.c b/linux-core/sis_mm.c index 0e9ed65d..7e162a8e 100644 --- a/linux-core/sis_mm.c +++ b/linux-core/sis_mm.c @@ -81,15 +81,12 @@ unsigned long sis_sman_mm_offset(void *private, void *ref) #endif -static int sis_fb_init(DRM_IOCTL_ARGS) +static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_sis_private_t *dev_priv = dev->dev_private; - drm_sis_fb_t fb; + drm_sis_fb_t *fb = data; int ret; - DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_fb_t __user *) data, sizeof(fb)); - mutex_lock(&dev->struct_mutex); #if defined(__linux__) && defined(CONFIG_FB_SIS) { @@ -104,7 +101,7 @@ static int sis_fb_init(DRM_IOCTL_ARGS) } #else ret = drm_sman_set_range(&dev_priv->sman, VIDEO_TYPE, 0, - fb.size >> SIS_MM_ALIGN_SHIFT); + fb->size >> SIS_MM_ALIGN_SHIFT); #endif if (ret) { @@ -114,25 +111,22 @@ static int sis_fb_init(DRM_IOCTL_ARGS) } dev_priv->vram_initialized = 1; - dev_priv->vram_offset = fb.offset; + dev_priv->vram_offset = fb->offset; mutex_unlock(&dev->struct_mutex); - DRM_DEBUG("offset = %u, size = %u", fb.offset, fb.size); + DRM_DEBUG("offset = %u, size = %u", fb->offset, fb->size); return 0; } static int sis_drm_alloc(struct drm_device * dev, struct drm_file *file_priv, - unsigned long data, int pool) + void *data, int pool) { drm_sis_private_t *dev_priv = dev->dev_private; - drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *) data; - drm_sis_mem_t mem; + drm_sis_mem_t *mem = data; int retval = 0; struct drm_memblock_item *item; - DRM_COPY_FROM_USER_IOCTL(mem, argp, sizeof(mem)); - mutex_lock(&dev->struct_mutex); if (0 == ((pool == 0) ? dev_priv->vram_initialized : @@ -142,70 +136,62 @@ static int sis_drm_alloc(struct drm_device * dev, struct drm_file *file_priv, return -EINVAL; } - mem.size = (mem.size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT; - item = drm_sman_alloc(&dev_priv->sman, pool, mem.size, 0, + mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT; + item = drm_sman_alloc(&dev_priv->sman, pool, mem->size, 0, (unsigned long)file_priv); mutex_unlock(&dev->struct_mutex); if (item) { - mem.offset = ((pool == 0) ? + mem->offset = ((pool == 0) ? dev_priv->vram_offset : dev_priv->agp_offset) + (item->mm-> offset(item->mm, item->mm_info) << SIS_MM_ALIGN_SHIFT); - mem.free = item->user_hash.key; - mem.size = mem.size << SIS_MM_ALIGN_SHIFT; + mem->free = item->user_hash.key; + mem->size = mem->size << SIS_MM_ALIGN_SHIFT; } else { - mem.offset = 0; - mem.size = 0; - mem.free = 0; + mem->offset = 0; + mem->size = 0; + mem->free = 0; retval = -ENOMEM; } - DRM_COPY_TO_USER_IOCTL(argp, mem, sizeof(mem)); - - DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem.size, - mem.offset); + DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem->size, + mem->offset); return retval; } -static int sis_drm_free(DRM_IOCTL_ARGS) +static int sis_drm_free(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_sis_private_t *dev_priv = dev->dev_private; - drm_sis_mem_t mem; + drm_sis_mem_t *mem = data; int ret; - DRM_COPY_FROM_USER_IOCTL(mem, (drm_sis_mem_t __user *) data, - sizeof(mem)); - mutex_lock(&dev->struct_mutex); - ret = drm_sman_free_key(&dev_priv->sman, mem.free); + ret = drm_sman_free_key(&dev_priv->sman, mem->free); mutex_unlock(&dev->struct_mutex); - DRM_DEBUG("free = 0x%lx\n", mem.free); + DRM_DEBUG("free = 0x%lx\n", mem->free); return ret; } -static int sis_fb_alloc(DRM_IOCTL_ARGS) +static int sis_fb_alloc(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; return sis_drm_alloc(dev, file_priv, data, VIDEO_TYPE); } -static int sis_ioctl_agp_init(DRM_IOCTL_ARGS) +static int sis_ioctl_agp_init(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; drm_sis_private_t *dev_priv = dev->dev_private; - drm_sis_agp_t agp; + drm_sis_agp_t *agp = data; int ret; dev_priv = dev->dev_private; - DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_agp_t __user *) data, - sizeof(agp)); mutex_lock(&dev->struct_mutex); ret = drm_sman_set_range(&dev_priv->sman, AGP_TYPE, 0, - agp.size >> SIS_MM_ALIGN_SHIFT); + agp->size >> SIS_MM_ALIGN_SHIFT); if (ret) { DRM_ERROR("AGP memory manager initialisation error\n"); @@ -214,16 +200,16 @@ static int sis_ioctl_agp_init(DRM_IOCTL_ARGS) } dev_priv->agp_initialized = 1; - dev_priv->agp_offset = agp.offset; + dev_priv->agp_offset = agp->offset; mutex_unlock(&dev->struct_mutex); - DRM_DEBUG("offset = %u, size = %u", agp.offset, agp.size); + DRM_DEBUG("offset = %u, size = %u", agp->offset, agp->size); return 0; } -static int sis_ioctl_agp_alloc(DRM_IOCTL_ARGS) +static int sis_ioctl_agp_alloc(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; return sis_drm_alloc(dev, file_priv, data, AGP_TYPE); } @@ -335,14 +321,12 @@ void sis_reclaim_buffers_locked(struct drm_device * dev, } struct drm_ioctl_desc sis_ioctls[] = { - [DRM_IOCTL_NR(DRM_SIS_FB_ALLOC)] = {sis_fb_alloc, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_SIS_FB_FREE)] = {sis_drm_free, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_SIS_AGP_INIT)] = - {sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_SIS_AGP_ALLOC)] = {sis_ioctl_agp_alloc, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_SIS_AGP_FREE)] = {sis_drm_free, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_SIS_FB_INIT)] = - {sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY} + DRM_IOCTL_DEF(DRM_SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH), + DRM_IOCTL_DEF(DRM_SIS_FB_FREE, sis_drm_free, DRM_AUTH), + DRM_IOCTL_DEF(DRM_SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH), + DRM_IOCTL_DEF(DRM_SIS_AGP_FREE, sis_drm_free, DRM_AUTH), + DRM_IOCTL_DEF(DRM_SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), }; int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls); diff --git a/linux-core/via_dmablit.c b/linux-core/via_dmablit.c index 10289a89..d44c26f4 100644 --- a/linux-core/via_dmablit.c +++ b/linux-core/via_dmablit.c @@ -792,18 +792,15 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer) */ int -via_dma_blit_sync( DRM_IOCTL_ARGS ) +via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv ) { - drm_via_blitsync_t sync; + drm_via_blitsync_t *sync = data; int err; - DRM_DEVICE; - DRM_COPY_FROM_USER_IOCTL(sync, (drm_via_blitsync_t *)data, sizeof(sync)); - - if (sync.engine >= VIA_NUM_BLIT_ENGINES) + if (sync->engine >= VIA_NUM_BLIT_ENGINES) return -EINVAL; - err = via_dmablit_sync(dev, sync.sync_handle, sync.engine); + err = via_dmablit_sync(dev, sync->sync_handle, sync->engine); if (-EINTR == err) err = -EAGAIN; @@ -819,17 +816,12 @@ via_dma_blit_sync( DRM_IOCTL_ARGS ) */ int -via_dma_blit( DRM_IOCTL_ARGS ) +via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv ) { - drm_via_dmablit_t xfer; + drm_via_dmablit_t *xfer = data; int err; - DRM_DEVICE; - - DRM_COPY_FROM_USER_IOCTL(xfer, (drm_via_dmablit_t __user *)data, sizeof(xfer)); - - err = via_dmablit(dev, &xfer); - DRM_COPY_TO_USER_IOCTL((void __user *)data, xfer, sizeof(xfer)); + err = via_dmablit(dev, xfer); return err; } diff --git a/linux-core/via_mm.c b/linux-core/via_mm.c index 411c3d52..35ca6bfc 100644 --- a/linux-core/via_mm.c +++ b/linux-core/via_mm.c @@ -33,18 +33,15 @@ #define VIA_MM_ALIGN_SHIFT 4 #define VIA_MM_ALIGN_MASK ( (1 << VIA_MM_ALIGN_SHIFT) - 1) -int via_agp_init(DRM_IOCTL_ARGS) +int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_via_agp_t agp; + drm_via_agp_t *agp = data; drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; int ret; - DRM_COPY_FROM_USER_IOCTL(agp, (drm_via_agp_t __user *) data, - sizeof(agp)); mutex_lock(&dev->struct_mutex); ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_AGP, 0, - agp.size >> VIA_MM_ALIGN_SHIFT); + agp->size >> VIA_MM_ALIGN_SHIFT); if (ret) { DRM_ERROR("AGP memory manager initialisation error\n"); @@ -53,25 +50,22 @@ int via_agp_init(DRM_IOCTL_ARGS) } dev_priv->agp_initialized = 1; - dev_priv->agp_offset = agp.offset; + dev_priv->agp_offset = agp->offset; mutex_unlock(&dev->struct_mutex); - DRM_DEBUG("offset = %u, size = %u", agp.offset, agp.size); + DRM_DEBUG("offset = %u, size = %u", agp->offset, agp->size); return 0; } -int via_fb_init(DRM_IOCTL_ARGS) +int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_via_fb_t fb; + drm_via_fb_t *fb = data; drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; int ret; - DRM_COPY_FROM_USER_IOCTL(fb, (drm_via_fb_t __user *) data, sizeof(fb)); - mutex_lock(&dev->struct_mutex); ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_VIDEO, 0, - fb.size >> VIA_MM_ALIGN_SHIFT); + fb->size >> VIA_MM_ALIGN_SHIFT); if (ret) { DRM_ERROR("VRAM memory manager initialisation error\n"); @@ -80,10 +74,10 @@ int via_fb_init(DRM_IOCTL_ARGS) } dev_priv->vram_initialized = 1; - dev_priv->vram_offset = fb.offset; + dev_priv->vram_offset = fb->offset; mutex_unlock(&dev->struct_mutex); - DRM_DEBUG("offset = %u, size = %u", fb.offset, fb.size); + DRM_DEBUG("offset = %u, size = %u", fb->offset, fb->size); return 0; @@ -123,25 +117,21 @@ void via_lastclose(struct drm_device *dev) mutex_unlock(&dev->struct_mutex); } -int via_mem_alloc(DRM_IOCTL_ARGS) +int via_mem_alloc(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; - - drm_via_mem_t mem; + drm_via_mem_t *mem = data; int retval = 0; struct drm_memblock_item *item; drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; unsigned long tmpSize; - DRM_COPY_FROM_USER_IOCTL(mem, (drm_via_mem_t __user *) data, - sizeof(mem)); - - if (mem.type > VIA_MEM_AGP) { + if (mem->type > VIA_MEM_AGP) { DRM_ERROR("Unknown memory type allocation\n"); return -EINVAL; } mutex_lock(&dev->struct_mutex); - if (0 == ((mem.type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized : + if (0 == ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized : dev_priv->agp_initialized)) { DRM_ERROR ("Attempt to allocate from uninitialized memory manager.\n"); @@ -149,42 +139,37 @@ int via_mem_alloc(DRM_IOCTL_ARGS) return -EINVAL; } - tmpSize = (mem.size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT; - item = drm_sman_alloc(&dev_priv->sman, mem.type, tmpSize, 0, + tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT; + item = drm_sman_alloc(&dev_priv->sman, mem->type, tmpSize, 0, (unsigned long)file_priv); mutex_unlock(&dev->struct_mutex); if (item) { - mem.offset = ((mem.type == VIA_MEM_VIDEO) ? + mem->offset = ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_offset : dev_priv->agp_offset) + (item->mm-> offset(item->mm, item->mm_info) << VIA_MM_ALIGN_SHIFT); - mem.index = item->user_hash.key; + mem->index = item->user_hash.key; } else { - mem.offset = 0; - mem.size = 0; - mem.index = 0; + mem->offset = 0; + mem->size = 0; + mem->index = 0; DRM_DEBUG("Video memory allocation failed\n"); retval = -ENOMEM; } - DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem, sizeof(mem)); return retval; } -int via_mem_free(DRM_IOCTL_ARGS) +int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_via_private_t *dev_priv = dev->dev_private; - drm_via_mem_t mem; + drm_via_mem_t *mem = data; int ret; - DRM_COPY_FROM_USER_IOCTL(mem, (drm_via_mem_t __user *) data, - sizeof(mem)); - mutex_lock(&dev->struct_mutex); - ret = drm_sman_free_key(&dev_priv->sman, mem.index); + ret = drm_sman_free_key(&dev_priv->sman, mem->index); mutex_unlock(&dev->struct_mutex); - DRM_DEBUG("free = 0x%lx\n", mem.index); + DRM_DEBUG("free = 0x%lx\n", mem->index); return ret; } -- cgit v1.2.3 From f68ad6d1abdce7d3c11cc2e90745c0d1e565fe77 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Sat, 21 Jul 2007 21:50:25 +1000 Subject: fix drm no-compile due to BSD :-) --- linux-core/drm_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 85e3ba47..dba0a7ba 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -599,7 +599,7 @@ int drm_ioctl(struct inode *inode, struct file *filp, else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) ioctl = &drm_ioctls[nr]; else { - errno = -EINVAL; + retcode = -EINVAL; goto err_i1; } -- cgit v1.2.3 From b43b0b2b32a31bcb81042659ffcc95b8975e42cf Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Sat, 21 Jul 2007 22:11:41 +1000 Subject: fix missing brace placement for IOC_IN --- linux-core/drm_drv.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index dba0a7ba..8a675790 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -615,9 +615,10 @@ int drm_ioctl(struct inode *inode, struct file *filp, if (cmd & IOC_IN) { if (copy_from_user(kdata, (void __user *)arg, - _IOC_SIZE(cmd)) != 0) + _IOC_SIZE(cmd)) != 0) { retcode = -EACCES; - goto err_i1; + goto err_i1; + } } if (!func) { -- cgit v1.2.3 From 0844c46759b96d52c4952fceb96f7c6bb74b2ce7 Mon Sep 17 00:00:00 2001 From: Pekka Paalanen Date: Sat, 21 Jul 2007 23:13:25 +0300 Subject: Fix misc ioctl issues, makes Nouveau run. Debug print fix in drm_release(). Forgotten local variable init in drm_setversion(). Unnecessary put_user() in drm_addmap_ioctl(). ioctl->cmd check broken in drm_ioctl(); workaround. --- linux-core/drm_bufs.c | 3 +-- linux-core/drm_drv.c | 10 ++++++++-- linux-core/drm_fops.c | 2 +- linux-core/drm_ioctl.c | 2 +- 4 files changed, 11 insertions(+), 6 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c index e8864df0..665bc65d 100644 --- a/linux-core/drm_bufs.c +++ b/linux-core/drm_bufs.c @@ -343,8 +343,7 @@ int drm_addmap_ioctl(struct drm_device *dev, void *data, return err; /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */ - if (put_user((void *)(unsigned long)maplist->user_token, &map->handle)) - return -EFAULT; + map->handle = (void *)(unsigned long)maplist->user_token; return 0; } diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 8a675790..cc676bda 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -602,12 +602,18 @@ int drm_ioctl(struct inode *inode, struct file *filp, retcode = -EINVAL; goto err_i1; } - +#if 0 + /* + * This check is disabled, because driver private ioctl->cmd + * are not the ioctl commands with size and direction bits but + * just the indices. The DRM core ioctl->cmd are the proper ioctl + * commands. The drivers' ioctl tables need to be fixed. + */ if (ioctl->cmd != cmd) { retcode = -EINVAL; goto err_i1; } - +#endif func = ioctl->func; /* is there a local override? */ if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl) diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c index 251ee5b5..ab5f4ca5 100644 --- a/linux-core/drm_fops.c +++ b/linux-core/drm_fops.c @@ -403,7 +403,7 @@ int drm_release(struct inode *inode, struct file *filp) */ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", - current->pid, (long)old_encode_dev(dev), + current->pid, (long)old_encode_dev(file_priv->head->device), dev->open_count); if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) { diff --git a/linux-core/drm_ioctl.c b/linux-core/drm_ioctl.c index 717e23c0..f3f757da 100644 --- a/linux-core/drm_ioctl.c +++ b/linux-core/drm_ioctl.c @@ -301,7 +301,7 @@ int drm_getstats(struct drm_device *dev, void *data, int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_set_version *sv = data; - int if_version, retcode; + int if_version, retcode = 0; if (sv->drm_di_major != -1) { if (sv->drm_di_major != DRM_IF_MAJOR || -- cgit v1.2.3 From 5d6fdd9d7924fde8ce62631e6bdce8d5fe33fc3d Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Sat, 21 Jul 2007 20:34:56 -0700 Subject: Clean up xgi_cmd_info and associated code. There were numerous unnecessary fields in xgi_cmd_info. The remaining fields had pretty crummy names. Cut out the cruft, and rename the rest. As a result, the unused parameter "triggerCounter" to triggerHWCommandList can be removed. --- linux-core/xgi_cmdlist.c | 36 +++++++++++++++--------------------- 1 file changed, 15 insertions(+), 21 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index b93541f3..5c31fa27 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -35,8 +35,7 @@ struct xgi_cmdring_info s_cmdring; static void addFlush2D(struct xgi_info * info); static unsigned int get_batch_command(enum xgi_batch_type type); -static void triggerHWCommandList(struct xgi_info * info, - unsigned int triggerCounter); +static void triggerHWCommandList(struct xgi_info * info); static void xgi_cmdlist_reset(void); int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) @@ -83,14 +82,14 @@ unsigned int get_batch_command(enum xgi_batch_type type) static void xgi_submit_cmdlist(struct xgi_info * info, const struct xgi_cmd_info * pCmdInfo) { - const unsigned int cmd = get_batch_command(pCmdInfo->_firstBeginType); + const unsigned int cmd = get_batch_command(pCmdInfo->type); u32 begin[4]; - begin[0] = (cmd << 24) | (BEGIN_VALID_MASK) | - (BEGIN_BEGIN_IDENTIFICATION_MASK & pCmdInfo->_curDebugID); - begin[1] = BEGIN_LINK_ENABLE_MASK | pCmdInfo->_firstSize; - begin[2] = pCmdInfo->_firstBeginAddr >> 4; + begin[0] = (cmd << 24) | BEGIN_VALID_MASK + | (BEGIN_BEGIN_IDENTIFICATION_MASK & pCmdInfo->id); + begin[1] = BEGIN_LINK_ENABLE_MASK | pCmdInfo->size; + begin[2] = pCmdInfo->hw_addr >> 4; begin[3] = 0; if (s_cmdring._lastBatchStartAddr == 0) { @@ -127,7 +126,7 @@ static void xgi_submit_cmdlist(struct xgi_info * info, DRM_INFO("s_cmdring._lastBatchStartAddr != 0\n"); - if (pCmdInfo->_firstBeginType == BTYPE_3D) { + if (pCmdInfo->type == BTYPE_3D) { addFlush2D(info); } @@ -141,10 +140,10 @@ static void xgi_submit_cmdlist(struct xgi_info * info, wmb(); lastBatchVirtAddr[0] = begin[0]; - triggerHWCommandList(info, pCmdInfo->_beginCount); + triggerHWCommandList(info); } - s_cmdring._lastBatchStartAddr = pCmdInfo->_lastBeginAddr; + s_cmdring._lastBatchStartAddr = pCmdInfo->hw_addr; DRM_INFO("%s: exit\n", __func__); } @@ -159,7 +158,7 @@ int xgi_submit_cmdlist_ioctl(DRM_IOCTL_ARGS) (struct xgi_cmd_info __user *) data, sizeof(cmd_list)); - if (cmd_list._firstBeginType > BTYPE_CTRL) { + if (cmd_list.type > BTYPE_CTRL) { return DRM_ERR(EINVAL); } @@ -234,18 +233,13 @@ void xgi_cmdlist_cleanup(struct xgi_info * info) } } -static void triggerHWCommandList(struct xgi_info * info, - unsigned int triggerCounter) +static void triggerHWCommandList(struct xgi_info * info) { static unsigned int s_triggerID = 1; - //Fix me, currently we just trigger one time - while (triggerCounter--) { - dwWriteReg(info->mmio_map, - BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS, - 0x05000000 + (0x0ffff & s_triggerID++)); - // xgi_waitfor_pci_idle(info); - } + dwWriteReg(info->mmio_map, + BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS, + 0x05000000 + (0x0ffff & s_triggerID++)); } @@ -284,7 +278,7 @@ static void addFlush2D(struct xgi_info * info) lastBatchVirtAddr[0] = (get_batch_command(BTYPE_CTRL) << 24) | (BEGIN_VALID_MASK); - triggerHWCommandList(info, 1); + triggerHWCommandList(info); s_cmdring._cmdRingOffset += 0x20; s_cmdring._lastBatchStartAddr = flushBatchHWAddr; -- cgit v1.2.3 From 3265a61f895a1d35072984e9cdc71aad898647fa Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Sat, 21 Jul 2007 20:39:22 -0700 Subject: Make s_cmdring a field in the xgi_info structure instead of a global. --- linux-core/xgi_cmdlist.c | 54 +++++++++++++++++++++++------------------------- linux-core/xgi_cmdlist.h | 1 + linux-core/xgi_drv.h | 3 +++ 3 files changed, 30 insertions(+), 28 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 5c31fa27..a040fa15 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -31,12 +31,10 @@ #include "xgi_misc.h" #include "xgi_cmdlist.h" -struct xgi_cmdring_info s_cmdring; - static void addFlush2D(struct xgi_info * info); static unsigned int get_batch_command(enum xgi_batch_type type); static void triggerHWCommandList(struct xgi_info * info); -static void xgi_cmdlist_reset(void); +static void xgi_cmdlist_reset(struct xgi_info * info); int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) { @@ -51,11 +49,11 @@ int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) return err; } - s_cmdring._cmdRingSize = mem_alloc.size; - s_cmdring._cmdRingBuffer = mem_alloc.hw_addr; - s_cmdring._cmdRingAllocOffset = mem_alloc.offset; - s_cmdring._lastBatchStartAddr = 0; - s_cmdring._cmdRingOffset = 0; + info->cmdring._cmdRingSize = mem_alloc.size; + info->cmdring._cmdRingBuffer = mem_alloc.hw_addr; + info->cmdring._cmdRingAllocOffset = mem_alloc.offset; + info->cmdring._lastBatchStartAddr = 0; + info->cmdring._cmdRingOffset = 0; return 0; } @@ -92,7 +90,7 @@ static void xgi_submit_cmdlist(struct xgi_info * info, begin[2] = pCmdInfo->hw_addr >> 4; begin[3] = 0; - if (s_cmdring._lastBatchStartAddr == 0) { + if (info->cmdring._lastBatchStartAddr == 0) { const unsigned int portOffset = BASE_3D_ENG + (cmd << 2); @@ -124,7 +122,7 @@ static void xgi_submit_cmdlist(struct xgi_info * info, } else { u32 *lastBatchVirtAddr; - DRM_INFO("s_cmdring._lastBatchStartAddr != 0\n"); + DRM_INFO("info->cmdring._lastBatchStartAddr != 0\n"); if (pCmdInfo->type == BTYPE_3D) { addFlush2D(info); @@ -132,7 +130,7 @@ static void xgi_submit_cmdlist(struct xgi_info * info, lastBatchVirtAddr = xgi_find_pcie_virt(info, - s_cmdring._lastBatchStartAddr); + info->cmdring._lastBatchStartAddr); lastBatchVirtAddr[1] = begin[1]; lastBatchVirtAddr[2] = begin[2]; @@ -143,7 +141,7 @@ static void xgi_submit_cmdlist(struct xgi_info * info, triggerHWCommandList(info); } - s_cmdring._lastBatchStartAddr = pCmdInfo->hw_addr; + info->cmdring._lastBatchStartAddr = pCmdInfo->hw_addr; DRM_INFO("%s: exit\n", __func__); } @@ -188,7 +186,7 @@ int xgi_state_change(struct xgi_info * info, unsigned int to, // stop to received batch } else if ((from == STATE_CONSOLE) && (to == STATE_GRAPHIC)) { DRM_INFO("[kd] I see, now is to enterVT\n"); - xgi_cmdlist_reset(); + xgi_cmdlist_reset(info); } else if ((from == STATE_GRAPHIC) && ((to == STATE_LOGOUT) || (to == STATE_REBOOT) @@ -217,19 +215,19 @@ int xgi_state_change_ioctl(DRM_IOCTL_ARGS) } -void xgi_cmdlist_reset(void) +void xgi_cmdlist_reset(struct xgi_info * info) { - s_cmdring._lastBatchStartAddr = 0; - s_cmdring._cmdRingOffset = 0; + info->cmdring._lastBatchStartAddr = 0; + info->cmdring._cmdRingOffset = 0; } void xgi_cmdlist_cleanup(struct xgi_info * info) { - if (s_cmdring._cmdRingBuffer != 0) { - xgi_pcie_free(info, s_cmdring._cmdRingAllocOffset, NULL); - s_cmdring._cmdRingBuffer = 0; - s_cmdring._cmdRingOffset = 0; - s_cmdring._cmdRingSize = 0; + if (info->cmdring._cmdRingBuffer != 0) { + xgi_pcie_free(info, info->cmdring._cmdRingAllocOffset, NULL); + info->cmdring._cmdRingBuffer = 0; + info->cmdring._cmdRingOffset = 0; + info->cmdring._cmdRingSize = 0; } } @@ -250,11 +248,11 @@ static void addFlush2D(struct xgi_info * info) u32 *lastBatchVirtAddr; /* check buf is large enough to contain a new flush batch */ - if ((s_cmdring._cmdRingOffset + 0x20) >= s_cmdring._cmdRingSize) { - s_cmdring._cmdRingOffset = 0; + if ((info->cmdring._cmdRingOffset + 0x20) >= info->cmdring._cmdRingSize) { + info->cmdring._cmdRingOffset = 0; } - flushBatchHWAddr = s_cmdring._cmdRingBuffer + s_cmdring._cmdRingOffset; + flushBatchHWAddr = info->cmdring._cmdRingBuffer + info->cmdring._cmdRingOffset; flushBatchVirtAddr = xgi_find_pcie_virt(info, flushBatchHWAddr); /* not using memcpy for I assume the address is discrete */ @@ -267,9 +265,9 @@ static void addFlush2D(struct xgi_info * info) *(flushBatchVirtAddr + 6) = FLUSH_2D; *(flushBatchVirtAddr + 7) = FLUSH_2D; - // ASSERT(s_cmdring._lastBatchStartAddr != NULL); + // ASSERT(info->cmdring._lastBatchStartAddr != NULL); lastBatchVirtAddr = - xgi_find_pcie_virt(info, s_cmdring._lastBatchStartAddr); + xgi_find_pcie_virt(info, info->cmdring._lastBatchStartAddr); lastBatchVirtAddr[1] = BEGIN_LINK_ENABLE_MASK + 0x08; lastBatchVirtAddr[2] = flushBatchHWAddr >> 4; @@ -280,6 +278,6 @@ static void addFlush2D(struct xgi_info * info) triggerHWCommandList(info); - s_cmdring._cmdRingOffset += 0x20; - s_cmdring._lastBatchStartAddr = flushBatchHWAddr; + info->cmdring._cmdRingOffset += 0x20; + info->cmdring._lastBatchStartAddr = flushBatchHWAddr; } diff --git a/linux-core/xgi_cmdlist.h b/linux-core/xgi_cmdlist.h index 08029386..7f2c54ac 100644 --- a/linux-core/xgi_cmdlist.h +++ b/linux-core/xgi_cmdlist.h @@ -66,6 +66,7 @@ struct xgi_cmdring_info { u32 _cmdRingOffset; }; +struct xgi_info; extern int xgi_cmdlist_initialize(struct xgi_info * info, size_t size); extern int xgi_state_change(struct xgi_info * info, unsigned int to, diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 20965876..0aab04d8 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -42,6 +42,7 @@ #define DRIVER_MINOR 8 #define DRIVER_PATCHLEVEL 0 +#include "xgi_cmdlist.h" #include "xgi_drm.h" struct xgi_aperture { @@ -90,6 +91,8 @@ struct xgi_info { struct semaphore fb_sem; struct semaphore pcie_sem; + + struct xgi_cmdring_info cmdring; }; enum PcieOwner { -- cgit v1.2.3 From 1a0775760c0eecbb238f0e928b185c267c1c3783 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Sat, 21 Jul 2007 21:35:06 -0700 Subject: Rename and document fields of xgi_cmdring_info. --- linux-core/xgi_cmdlist.c | 71 +++++++++++++++++++++--------------------------- linux-core/xgi_cmdlist.h | 33 ++++++++++++++++++---- 2 files changed, 59 insertions(+), 45 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index a040fa15..53bada50 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -49,11 +49,12 @@ int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) return err; } - info->cmdring._cmdRingSize = mem_alloc.size; - info->cmdring._cmdRingBuffer = mem_alloc.hw_addr; - info->cmdring._cmdRingAllocOffset = mem_alloc.offset; - info->cmdring._lastBatchStartAddr = 0; - info->cmdring._cmdRingOffset = 0; + info->cmdring.ptr = xgi_find_pcie_virt(info, mem_alloc.offset); + info->cmdring.size = mem_alloc.size; + info->cmdring.ring_hw_base = mem_alloc.hw_addr; + info->cmdring.ring_gart_base = mem_alloc.offset; + info->cmdring.last_ptr = NULL; + info->cmdring.ring_offset = 0; return 0; } @@ -90,7 +91,7 @@ static void xgi_submit_cmdlist(struct xgi_info * info, begin[2] = pCmdInfo->hw_addr >> 4; begin[3] = 0; - if (info->cmdring._lastBatchStartAddr == 0) { + if (info->cmdring.last_ptr == NULL) { const unsigned int portOffset = BASE_3D_ENG + (cmd << 2); @@ -120,28 +121,22 @@ static void xgi_submit_cmdlist(struct xgi_info * info, dwWriteReg(info->mmio_map, portOffset + 8, begin[2]); dwWriteReg(info->mmio_map, portOffset + 12, begin[3]); } else { - u32 *lastBatchVirtAddr; - - DRM_INFO("info->cmdring._lastBatchStartAddr != 0\n"); + DRM_INFO("info->cmdring.last_ptr != NULL\n"); if (pCmdInfo->type == BTYPE_3D) { addFlush2D(info); } - lastBatchVirtAddr = - xgi_find_pcie_virt(info, - info->cmdring._lastBatchStartAddr); - - lastBatchVirtAddr[1] = begin[1]; - lastBatchVirtAddr[2] = begin[2]; - lastBatchVirtAddr[3] = begin[3]; + info->cmdring.last_ptr[1] = begin[1]; + info->cmdring.last_ptr[2] = begin[2]; + info->cmdring.last_ptr[3] = begin[3]; wmb(); - lastBatchVirtAddr[0] = begin[0]; + info->cmdring.last_ptr[0] = begin[0]; triggerHWCommandList(info); } - info->cmdring._lastBatchStartAddr = pCmdInfo->hw_addr; + info->cmdring.last_ptr = xgi_find_pcie_virt(info, pCmdInfo->hw_addr); DRM_INFO("%s: exit\n", __func__); } @@ -217,17 +212,17 @@ int xgi_state_change_ioctl(DRM_IOCTL_ARGS) void xgi_cmdlist_reset(struct xgi_info * info) { - info->cmdring._lastBatchStartAddr = 0; - info->cmdring._cmdRingOffset = 0; + info->cmdring.last_ptr = NULL; + info->cmdring.ring_offset = 0; } void xgi_cmdlist_cleanup(struct xgi_info * info) { - if (info->cmdring._cmdRingBuffer != 0) { - xgi_pcie_free(info, info->cmdring._cmdRingAllocOffset, NULL); - info->cmdring._cmdRingBuffer = 0; - info->cmdring._cmdRingOffset = 0; - info->cmdring._cmdRingSize = 0; + if (info->cmdring.ring_hw_base != 0) { + xgi_pcie_free(info, info->cmdring.ring_gart_base, NULL); + info->cmdring.ring_hw_base = 0; + info->cmdring.ring_offset = 0; + info->cmdring.size = 0; } } @@ -245,15 +240,15 @@ static void addFlush2D(struct xgi_info * info) { u32 *flushBatchVirtAddr; u32 flushBatchHWAddr; - u32 *lastBatchVirtAddr; /* check buf is large enough to contain a new flush batch */ - if ((info->cmdring._cmdRingOffset + 0x20) >= info->cmdring._cmdRingSize) { - info->cmdring._cmdRingOffset = 0; + if ((info->cmdring.ring_offset + 0x20) >= info->cmdring.size) { + info->cmdring.ring_offset = 0; } - flushBatchHWAddr = info->cmdring._cmdRingBuffer + info->cmdring._cmdRingOffset; - flushBatchVirtAddr = xgi_find_pcie_virt(info, flushBatchHWAddr); + flushBatchHWAddr = info->cmdring.ring_hw_base + info->cmdring.ring_offset; + flushBatchVirtAddr = info->cmdring.ptr + + (info->cmdring.ring_offset / 4); /* not using memcpy for I assume the address is discrete */ *(flushBatchVirtAddr + 0) = 0x10000000; @@ -265,19 +260,15 @@ static void addFlush2D(struct xgi_info * info) *(flushBatchVirtAddr + 6) = FLUSH_2D; *(flushBatchVirtAddr + 7) = FLUSH_2D; - // ASSERT(info->cmdring._lastBatchStartAddr != NULL); - lastBatchVirtAddr = - xgi_find_pcie_virt(info, info->cmdring._lastBatchStartAddr); - - lastBatchVirtAddr[1] = BEGIN_LINK_ENABLE_MASK + 0x08; - lastBatchVirtAddr[2] = flushBatchHWAddr >> 4; - lastBatchVirtAddr[3] = 0; + info->cmdring.last_ptr[1] = BEGIN_LINK_ENABLE_MASK + 0x08; + info->cmdring.last_ptr[2] = flushBatchHWAddr >> 4; + info->cmdring.last_ptr[3] = 0; wmb(); - lastBatchVirtAddr[0] = (get_batch_command(BTYPE_CTRL) << 24) + info->cmdring.last_ptr[0] = (get_batch_command(BTYPE_CTRL) << 24) | (BEGIN_VALID_MASK); triggerHWCommandList(info); - info->cmdring._cmdRingOffset += 0x20; - info->cmdring._lastBatchStartAddr = flushBatchHWAddr; + info->cmdring.ring_offset += 0x20; + info->cmdring.last_ptr = flushBatchVirtAddr; } diff --git a/linux-core/xgi_cmdlist.h b/linux-core/xgi_cmdlist.h index 7f2c54ac..5b444cf3 100644 --- a/linux-core/xgi_cmdlist.h +++ b/linux-core/xgi_cmdlist.h @@ -59,11 +59,34 @@ typedef enum { } CMD_SIZE; struct xgi_cmdring_info { - unsigned int _cmdRingSize; - u32 _cmdRingBuffer; - unsigned long _cmdRingAllocOffset; - u32 _lastBatchStartAddr; - u32 _cmdRingOffset; + /** + * Kernel space pointer to the base of the command ring. + */ + u32 * ptr; + + /** + * Size, in bytes, of the command ring. + */ + unsigned int size; + + /** + * Base address of the command ring from the hardware's PoV. + */ + unsigned int ring_hw_base; + + /** + * Offset, in bytes, from the base of PCI-e GART space to the start + * of the ring. + */ + unsigned long ring_gart_base; + + u32 * last_ptr; + + /** + * Offset, in bytes, from the start of the ring to the next available + * location to store a command. + */ + unsigned int ring_offset; }; struct xgi_info; -- cgit v1.2.3 From 699207cf2fa0c5255365dd28eeb3dd760f362818 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Sat, 21 Jul 2007 21:37:45 -0700 Subject: Remove some extraneous debug messages. --- linux-core/xgi_cmdlist.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 53bada50..98d80ef0 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -97,8 +97,6 @@ static void xgi_submit_cmdlist(struct xgi_info * info, /* Enable PCI Trigger Mode */ - DRM_INFO("Enable PCI Trigger Mode \n"); - dwWriteReg(info->mmio_map, BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | @@ -113,9 +111,6 @@ static void xgi_submit_cmdlist(struct xgi_info * info, /* Send PCI begin command */ - DRM_INFO("portOffset=%d, beginPort=%d\n", - portOffset, cmd << 2); - dwWriteReg(info->mmio_map, portOffset, begin[0]); dwWriteReg(info->mmio_map, portOffset + 4, begin[1]); dwWriteReg(info->mmio_map, portOffset + 8, begin[2]); -- cgit v1.2.3 From 94203840fe53edaf1556d1a0a8a27773c24a7fc5 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Sat, 21 Jul 2007 23:00:01 -0700 Subject: Bump version. --- linux-core/xgi_drv.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 0aab04d8..16102950 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -36,10 +36,10 @@ #define DRIVER_NAME "xgi" #define DRIVER_DESC "XGI XP5 / XP10 / XG47" -#define DRIVER_DATE "20070710" +#define DRIVER_DATE "20070721" #define DRIVER_MAJOR 0 -#define DRIVER_MINOR 8 +#define DRIVER_MINOR 9 #define DRIVER_PATCHLEVEL 0 #include "xgi_cmdlist.h" -- cgit v1.2.3 From 7e6d08f670a55d79ee037144aa29104e4e8fc700 Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Mon, 23 Jul 2007 18:15:00 +1000 Subject: drm_rmmap_ioctl(): remove dead code This patch removes some obviously dead code spotted by the Coverity checker. Signed-off-by: Adrian Bunk --- linux-core/drm_bufs.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c index 665bc65d..f9987ca6 100644 --- a/linux-core/drm_bufs.c +++ b/linux-core/drm_bufs.c @@ -467,11 +467,6 @@ int drm_rmmap_ioctl(struct drm_device *dev, void *data, return -EINVAL; } - if (!map) { - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } - /* Register and framebuffer maps are permanent */ if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) { mutex_unlock(&dev->struct_mutex); -- cgit v1.2.3 From 2097d743f287d994bd001baedd39e3bc808999c6 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 23 Jul 2007 13:26:28 -0700 Subject: Eliminate XGI_CHECK_PCI_CONFIG. Based on review comments from airlied, XGI_CHECK_PCI_CONFIG is removed. He believes (and I tend to agree) that this is a largely unnecessary workaround for a bug elsewhere. --- linux-core/xgi_drv.c | 38 -------------------------------------- 1 file changed, 38 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index bcb6946d..19a9a10d 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -139,40 +139,6 @@ MODULE_LICENSE("GPL and additional rights"); void xgi_kern_isr_bh(struct drm_device *dev); -/* - * verify access to pci config space wasn't disabled behind our back - * unfortunately, XFree86 enables/disables memory access in pci config space at - * various times (such as restoring initial pci config space settings during vt - * switches or when doing mulicard). As a result, all of our register accesses - * are garbage at this point. add a check to see if access was disabled and - * reenable any such access. - */ -#define XGI_CHECK_PCI_CONFIG(xgi) \ - xgi_check_pci_config(xgi, __LINE__) - -static inline void xgi_check_pci_config(struct xgi_info * info, int line) -{ - u16 cmd; - bool flag = 0; - - pci_read_config_word(info->dev->pdev, PCI_COMMAND, &cmd); - if (!(cmd & PCI_COMMAND_MASTER)) { - DRM_INFO("restoring bus mastering! (%d)\n", line); - cmd |= PCI_COMMAND_MASTER; - flag = 1; - } - - if (!(cmd & PCI_COMMAND_MEMORY)) { - DRM_INFO("restoring MEM access! (%d)\n", line); - cmd |= PCI_COMMAND_MEMORY; - flag = 1; - } - - if (flag) - pci_write_config_word(info->dev->pdev, PCI_COMMAND, cmd); -} - - int xgi_bootstrap(DRM_IOCTL_ARGS) { DRM_DEVICE; @@ -262,8 +228,6 @@ irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS) //DRM_INFO("xgi_kern_isr \n"); - //XGI_CHECK_PCI_CONFIG(info); - //xgi_dvi_irq_handler(info); if (need_to_run_bottom_half) { @@ -280,8 +244,6 @@ void xgi_kern_isr_bh(struct drm_device *dev) DRM_INFO("xgi_kern_isr_bh \n"); //xgi_dvi_irq_handler(info); - - XGI_CHECK_PCI_CONFIG(info); } int xgi_driver_load(struct drm_device *dev, unsigned long flags) -- cgit v1.2.3 From 03e932e32be6ae3de6994c6893c813a34623ad7d Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Mon, 23 Jul 2007 15:11:12 -0700 Subject: linux: Make DRM_IOCTL_GET_CLIENT return EINVAL when it can't find client #idx. Fixes the getclient test and dritest -c. --- linux-core/drm_ioctl.c | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_ioctl.c b/linux-core/drm_ioctl.c index f3f757da..9d52fd8a 100644 --- a/linux-core/drm_ioctl.c +++ b/linux-core/drm_ioctl.c @@ -229,26 +229,23 @@ int drm_getclient(struct drm_device *dev, void *data, idx = client->idx; mutex_lock(&dev->struct_mutex); - - if (list_empty(&dev->filelist)) { - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } i = 0; list_for_each_entry(pt, &dev->filelist, lhead) { - if (i++ >= idx) - break; + if (i++ >= idx) { + client->auth = pt->authenticated; + client->pid = pt->pid; + client->uid = pt->uid; + client->magic = pt->magic; + client->iocs = pt->ioctl_count; + mutex_unlock(&dev->struct_mutex); + + return 0; + } } - - client->auth = pt->authenticated; - client->pid = pt->pid; - client->uid = pt->uid; - client->magic = pt->magic; - client->iocs = pt->ioctl_count; mutex_unlock(&dev->struct_mutex); - return 0; + return -EINVAL; } /** -- cgit v1.2.3 From 388a2c54eea7575a5b046da3df09f7a1c63551d6 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 23 Jul 2007 18:50:07 -0700 Subject: Minor log message clean up. --- linux-core/xgi_cmdlist.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 98d80ef0..f5fc1b94 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -116,7 +116,7 @@ static void xgi_submit_cmdlist(struct xgi_info * info, dwWriteReg(info->mmio_map, portOffset + 8, begin[2]); dwWriteReg(info->mmio_map, portOffset + 12, begin[3]); } else { - DRM_INFO("info->cmdring.last_ptr != NULL\n"); + DRM_DEBUG("info->cmdring.last_ptr != NULL\n"); if (pCmdInfo->type == BTYPE_3D) { addFlush2D(info); @@ -132,7 +132,6 @@ static void xgi_submit_cmdlist(struct xgi_info * info, } info->cmdring.last_ptr = xgi_find_pcie_virt(info, pCmdInfo->hw_addr); - DRM_INFO("%s: exit\n", __func__); } @@ -172,19 +171,17 @@ int xgi_state_change(struct xgi_info * info, unsigned int to, #define STATE_SHUTDOWN 5 if ((from == STATE_GRAPHIC) && (to == STATE_CONSOLE)) { - DRM_INFO("[kd] I see, now is to leaveVT\n"); - // stop to received batch + DRM_INFO("Leaving graphical mode (probably VT switch)\n"); } else if ((from == STATE_CONSOLE) && (to == STATE_GRAPHIC)) { - DRM_INFO("[kd] I see, now is to enterVT\n"); + DRM_INFO("Entering graphical mode (probably VT switch)\n"); xgi_cmdlist_reset(info); } else if ((from == STATE_GRAPHIC) && ((to == STATE_LOGOUT) || (to == STATE_REBOOT) || (to == STATE_SHUTDOWN))) { - DRM_INFO("[kd] I see, not is to exit from X\n"); - // stop to received batch + DRM_INFO("Leaving graphical mode (probably X shutting down)\n"); } else { - DRM_ERROR("[kd] Should not happen\n"); + DRM_ERROR("Invalid state change.\n"); return DRM_ERR(EINVAL); } -- cgit v1.2.3 From 46214fc3979ed60b32289ade1b8efbba1c8bf732 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 23 Jul 2007 18:50:52 -0700 Subject: Minor log message clean up. --- linux-core/xgi_regs.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h index b211626a..f5311c1e 100644 --- a/linux-core/xgi_regs.h +++ b/linux-core/xgi_regs.h @@ -76,9 +76,10 @@ static inline u8 IN3CFB(struct drm_map * map, u8 index) */ static inline void dwWriteReg(struct drm_map * map, u32 addr, u32 data) { +#ifdef XGI_MMIO_DEBUG DRM_INFO("mmio_map->handle = 0x%p, addr = 0x%x, data = 0x%x\n", map->handle, addr, data); - +#endif DRM_WRITE32(map, addr, data); } -- cgit v1.2.3 From 887cb31ee9ec04e45829500f095aa4a3bc1095ea Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 24 Jul 2007 13:27:44 -0700 Subject: Fix bug preventing X server from restarting. The core DRM lastclose routine automatically destroys all mappings and releases SG memory. XP10 DRM and DDX assumed this data stayed around until module unload. xgi_bootstrap was reworked to recreate all these mappings. In addition, the drm_addmap for the GART backing store was moved into the kernel. This causes a change to the ioctl protocol and a version bump. --- linux-core/xgi_drv.c | 126 ++++++++++++++++++++++++++++++++++----------------- linux-core/xgi_drv.h | 4 +- 2 files changed, 87 insertions(+), 43 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 19a9a10d..11d6e950 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -64,6 +64,7 @@ static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); static int xgi_driver_load(struct drm_device *dev, unsigned long flags); static int xgi_driver_unload(struct drm_device *dev); static void xgi_driver_preclose(struct drm_device * dev, DRMFILE filp); +static void xgi_driver_lastclose(drm_device_t * dev); static irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS); @@ -75,6 +76,7 @@ static struct drm_driver driver = { .load = xgi_driver_load, .unload = xgi_driver_unload, .preclose = xgi_driver_preclose, + .lastclose = xgi_driver_lastclose, .dma_quiescent = NULL, .irq_preinstall = NULL, .irq_postinstall = NULL, @@ -144,26 +146,25 @@ int xgi_bootstrap(DRM_IOCTL_ARGS) DRM_DEVICE; struct xgi_info *info = dev->dev_private; struct xgi_bootstrap bs; + struct drm_map_list *maplist; int err; DRM_COPY_FROM_USER_IOCTL(bs, (struct xgi_bootstrap __user *) data, sizeof(bs)); - if (info->bootstrap_done) { - return 0; - } + if (info->mmio_map == NULL) { + err = drm_addmap(dev, info->mmio.base, info->mmio.size, + _DRM_REGISTERS, _DRM_KERNEL, + &info->mmio_map); + if (err) { + DRM_ERROR("Unable to map MMIO region: %d\n", err); + return err; + } - err = drm_addmap(dev, info->mmio.base, info->mmio.size, - _DRM_REGISTERS, _DRM_KERNEL, - &info->mmio_map); - if (err) { - DRM_ERROR("Unable to map MMIO region: %d\n", err); - return err; + xgi_enable_mmio(info); } - xgi_enable_mmio(info); - //xgi_enable_ge(info); info->fb.size = IN3CFB(info->mmio_map, 0x54) * 8 * 1024 * 1024; @@ -172,38 +173,64 @@ int xgi_bootstrap(DRM_IOCTL_ARGS) if ((info->fb.base == 0) || (info->fb.size == 0)) { - DRM_ERROR("frame buffer appears to be wrong: 0x%lx 0x%x\n", + DRM_ERROR("framebuffer appears to be wrong: 0x%lx 0x%x\n", (unsigned long) info->fb.base, info->fb.size); return DRM_ERR(EINVAL); } /* Init the resource manager */ - err = xgi_fb_heap_init(info); - if (err) { - DRM_ERROR("xgi_fb_heap_init() failed\n"); - return err; + if (!info->fb_heap.initialized) { + err = xgi_fb_heap_init(info); + if (err) { + DRM_ERROR("Unable to initialize FB heap.\n"); + return err; + } } - - info->pcie.size = bs.gart_size * (1024 * 1024); + info->pcie.size = bs.gart.size; /* Init the resource manager */ - err = xgi_pcie_heap_init(info); - if (err) { - DRM_ERROR("xgi_pcie_heap_init() failed\n"); - return err; + if (!info->pcie_heap.initialized) { + err = xgi_pcie_heap_init(info); + if (err) { + DRM_ERROR("Unable to initialize GART heap.\n"); + return err; + } + + /* Alloc 1M bytes for cmdbuffer which is flush2D batch array */ + err = xgi_cmdlist_initialize(info, 0x100000); + if (err) { + DRM_ERROR("xgi_cmdlist_initialize() failed\n"); + return err; + } } - /* Alloc 1M bytes for cmdbuffer which is flush2D batch array */ - err = xgi_cmdlist_initialize(info, 0x100000); - if (err) { - DRM_ERROR("xgi_cmdlist_initialize() failed\n"); - return err; + + if (info->pcie_map == NULL) { + err = drm_addmap(info->dev, 0, info->pcie.size, + _DRM_SCATTER_GATHER, _DRM_LOCKED, + & info->pcie_map); + if (err) { + DRM_ERROR("Could not add map for GART backing " + "store.\n"); + return err; + } } - info->bootstrap_done = 1; + + maplist = drm_find_matching_map(dev, info->pcie_map); + if (maplist == NULL) { + DRM_ERROR("Could not find GART backing store map.\n"); + return DRM_ERR(EINVAL); + } + + bs.gart = *info->pcie_map; + bs.gart.handle = (void *)(unsigned long) maplist->user_token; + DRM_COPY_TO_USER_IOCTL((struct xgi_bootstrap __user *) data, + bs, sizeof(bs)); + return 0; } @@ -217,6 +244,33 @@ void xgi_driver_preclose(struct drm_device * dev, DRMFILE filp) } +void xgi_driver_lastclose(drm_device_t * dev) +{ + struct xgi_info * info = dev->dev_private; + + if (info != NULL) { + /* The core DRM lastclose routine will destroy all of our + * mappings for us. NULL out the pointers here so that + * xgi_bootstrap can do the right thing. + */ + info->pcie_map = NULL; + info->mmio_map = NULL; + info->fb_map = NULL; + + xgi_cmdlist_cleanup(info); + + if (info->fb_heap.initialized) { + xgi_mem_heap_cleanup(&info->fb_heap); + } + + if (info->pcie_heap.initialized) { + xgi_mem_heap_cleanup(&info->pcie_heap); + xgi_pcie_lut_cleanup(info); + } + } +} + + /* * driver receives an interrupt if someone waiting, then hand it off. */ @@ -298,23 +352,13 @@ int xgi_driver_unload(struct drm_device *dev) { struct xgi_info * info = dev->dev_private; - xgi_cmdlist_cleanup(info); - if (info->fb_map != NULL) { - drm_rmmap(info->dev, info->fb_map); - } - - if (info->mmio_map != NULL) { - drm_rmmap(info->dev, info->mmio_map); - } - - xgi_mem_heap_cleanup(&info->fb_heap); - xgi_mem_heap_cleanup(&info->pcie_heap); - xgi_pcie_lut_cleanup(info); - if (xgi_mem_block_cache) { kmem_cache_destroy(xgi_mem_block_cache); xgi_mem_block_cache = NULL; } + drm_free(info, sizeof(*info), DRM_MEM_DRIVER); + dev->dev_private = NULL; + return 0; } diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 16102950..ae5fe07e 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -36,10 +36,10 @@ #define DRIVER_NAME "xgi" #define DRIVER_DESC "XGI XP5 / XP10 / XG47" -#define DRIVER_DATE "20070721" +#define DRIVER_DATE "20070723" #define DRIVER_MAJOR 0 -#define DRIVER_MINOR 9 +#define DRIVER_MINOR 10 #define DRIVER_PATCHLEVEL 0 #include "xgi_cmdlist.h" -- cgit v1.2.3 From 2ef2997ee38ff359c331b6a3febf194bd46e4962 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 24 Jul 2007 13:29:29 -0700 Subject: Fix flags for serveral ioctls. --- linux-core/xgi_drv.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 11d6e950..3b520850 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -42,7 +42,7 @@ static struct pci_device_id pciidlist[] = { static int xgi_bootstrap(DRM_IOCTL_ARGS); static drm_ioctl_desc_t xgi_ioctls[] = { - [DRM_IOCTL_NR(DRM_XGI_BOOTSTRAP)] = {xgi_bootstrap, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_XGI_BOOTSTRAP)] = {xgi_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, [DRM_IOCTL_NR(DRM_XGI_FB_ALLOC)] = {xgi_fb_alloc_ioctl, DRM_AUTH}, [DRM_IOCTL_NR(DRM_XGI_FB_FREE)] = {xgi_fb_free_ioctl, DRM_AUTH}, @@ -55,7 +55,7 @@ static drm_ioctl_desc_t xgi_ioctls[] = { [DRM_IOCTL_NR(DRM_XGI_DEBUG_INFO)] = {xgi_restore_registers_ioctl, DRM_AUTH}, [DRM_IOCTL_NR(DRM_XGI_SUBMIT_CMDLIST)] = {xgi_submit_cmdlist_ioctl, DRM_AUTH}, [DRM_IOCTL_NR(DRM_XGI_TEST_RWINKERNEL)] = {xgi_test_rwinkernel_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_XGI_STATE_CHANGE)] = {xgi_state_change_ioctl, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_XGI_STATE_CHANGE)] = {xgi_state_change_ioctl, DRM_AUTH|DRM_MASTER}, }; static const int xgi_max_ioctl = DRM_ARRAY_SIZE(xgi_ioctls); -- cgit v1.2.3 From 8e64d2ae862d5fa02e23c68db6b55393e1f86005 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 24 Jul 2007 13:36:02 -0700 Subject: Fix license formatting. --- linux-core/xgi_cmdlist.c | 46 ++++++++++++++++++++++------------------------ linux-core/xgi_cmdlist.h | 46 ++++++++++++++++++++++------------------------ linux-core/xgi_drv.c | 46 ++++++++++++++++++++++------------------------ linux-core/xgi_drv.h | 46 ++++++++++++++++++++++------------------------ linux-core/xgi_fb.c | 46 ++++++++++++++++++++++------------------------ linux-core/xgi_misc.c | 46 ++++++++++++++++++++++------------------------ linux-core/xgi_misc.h | 44 +++++++++++++++++++++----------------------- linux-core/xgi_pcie.c | 46 ++++++++++++++++++++++------------------------ linux-core/xgi_regs.h | 44 +++++++++++++++++++++----------------------- 9 files changed, 196 insertions(+), 214 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index f5fc1b94..e4f9dbcd 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -1,29 +1,27 @@ - /**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * + * All Rights Reserved. + * * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. ***************************************************************************/ #include "xgi_drv.h" diff --git a/linux-core/xgi_cmdlist.h b/linux-core/xgi_cmdlist.h index 5b444cf3..604c9aac 100644 --- a/linux-core/xgi_cmdlist.h +++ b/linux-core/xgi_cmdlist.h @@ -1,29 +1,27 @@ - /**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * + * All Rights Reserved. + * * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. ***************************************************************************/ #ifndef _XGI_CMDLIST_H_ diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 3b520850..2c3384b0 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -1,29 +1,27 @@ - /**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * + * All Rights Reserved. + * * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. ***************************************************************************/ #include "drmP.h" diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index ae5fe07e..2061189a 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -1,29 +1,27 @@ - /**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * + * All Rights Reserved. + * * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. ***************************************************************************/ #ifndef _XGI_DRV_H_ diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index a5885198..3d3b2ae0 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -1,29 +1,27 @@ - /**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * + * All Rights Reserved. + * * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. ***************************************************************************/ #include "xgi_drv.h" diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 7f3d9d6e..5e8c3da8 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -1,29 +1,27 @@ - /**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * + * All Rights Reserved. + * * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. ***************************************************************************/ #include "xgi_drv.h" diff --git a/linux-core/xgi_misc.h b/linux-core/xgi_misc.h index 10638b2d..af19a11a 100644 --- a/linux-core/xgi_misc.h +++ b/linux-core/xgi_misc.h @@ -1,29 +1,27 @@ - /**************************************************************************** * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * + * + * All Rights Reserved. + * * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. ***************************************************************************/ #ifndef _XGI_MISC_H_ diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 9dee888b..537e82f5 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -1,29 +1,27 @@ - /**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * + * All Rights Reserved. + * * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. ***************************************************************************/ #include "xgi_drv.h" diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h index f5311c1e..34268a56 100644 --- a/linux-core/xgi_regs.h +++ b/linux-core/xgi_regs.h @@ -1,29 +1,27 @@ - /**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan. - * * - * All Rights Reserved. * - * * + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * + * All Rights Reserved. + * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation on the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial - * portions of the Software. - * * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. ***************************************************************************/ #ifndef _XGI_REGS_H_ -- cgit v1.2.3 From 75a68635a8f7b0d4fb31031832cc282a39a4a1e7 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 24 Jul 2007 15:53:50 -0700 Subject: Pass correct offset to xgi_find_pcie_virt. The wrong offset was being passed to xgi_find_pcie_virt. This would cause an oops in addFlush2D. --- linux-core/xgi_cmdlist.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index e4f9dbcd..8ba8dc75 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -47,7 +47,7 @@ int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) return err; } - info->cmdring.ptr = xgi_find_pcie_virt(info, mem_alloc.offset); + info->cmdring.ptr = xgi_find_pcie_virt(info, mem_alloc.hw_offset); info->cmdring.size = mem_alloc.size; info->cmdring.ring_hw_base = mem_alloc.hw_addr; info->cmdring.ring_gart_base = mem_alloc.offset; -- cgit v1.2.3 From 2bafeb673f14b1e3799bf00817138c0b8211635e Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 24 Jul 2007 16:17:30 -0700 Subject: Fix typo on previous commit. Sigh... --- linux-core/xgi_cmdlist.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 8ba8dc75..490e9f39 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -47,7 +47,7 @@ int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) return err; } - info->cmdring.ptr = xgi_find_pcie_virt(info, mem_alloc.hw_offset); + info->cmdring.ptr = xgi_find_pcie_virt(info, mem_alloc.hw_addr); info->cmdring.size = mem_alloc.size; info->cmdring.ring_hw_base = mem_alloc.hw_addr; info->cmdring.ring_gart_base = mem_alloc.offset; -- cgit v1.2.3 From e2d00715cc1d3919aadd5066ef7355049f417bd2 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Thu, 26 Jul 2007 13:25:21 +1000 Subject: drm: fix size argument for copy to/from user --- linux-core/drm_bo.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 75d89e46..b47b657f 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1720,7 +1720,7 @@ int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr if (next != 0) { curuserarg = (void __user *)next; if (copy_from_user(&curarg, curuserarg, - sizeof(arg)) != 0) + sizeof(curarg)) != 0) return -EFAULT; arg = &curarg; } @@ -1768,7 +1768,7 @@ int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr arg->d.rep.bo_info = rep; if (arg != data) { if (copy_to_user(curuserarg, &curarg, - sizeof(arg)) != 0) + sizeof(curarg)) != 0) return -EFAULT; } } while (next != 0); -- cgit v1.2.3 From 4175dd818110bd10d6d09190d30c271e89202b18 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Thu, 26 Jul 2007 15:26:36 +1000 Subject: drm/bo: set the req pointer for each buffer to validate --- linux-core/drm_bo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index b47b657f..467d03ff 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1729,7 +1729,7 @@ int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr next = arg->next; continue; } - + req = &arg->d.req; ret = 0; switch (req->op) { case drm_bo_validate: -- cgit v1.2.3 From cf2d569daca6954d11a796f4d110148ae2e0c827 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Thu, 26 Jul 2007 10:14:17 -0700 Subject: Replace NO_MOVE/NO_EVICT flags to buffer objects with an ioctl to set pinning. This cleans up the create/validate interfaces for this very uncommon path, and makes pinned object creation much easier to use for the X Server. --- linux-core/drm_bo.c | 169 +++++++++++++++++++++++++++++++++-------------- linux-core/drm_drv.c | 1 + linux-core/drm_objects.h | 3 +- 3 files changed, 122 insertions(+), 51 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 75d89e46..53885a3e 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -80,8 +80,7 @@ void drm_bo_add_to_lru(struct drm_buffer_object * bo) DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); - if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)) - || bo->mem.mem_type != bo->pinned_mem_type) { + if (!bo->pinned || bo->mem.mem_type != bo->pinned_mem_type) { man = &bo->dev->bm.man[bo->mem.mem_type]; list_add_tail(&bo->lru, &man->lru); } else { @@ -733,7 +732,7 @@ static int drm_bo_mem_force_space(struct drm_device * dev, atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); mutex_lock(&entry->mutex); - BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)); + BUG_ON(entry->pinned); ret = drm_bo_evict(entry, mem_type, no_wait); mutex_unlock(&entry->mutex); @@ -893,18 +892,6 @@ static int drm_bo_new_mask(struct drm_buffer_object * bo, DRM_ERROR("User buffers are not supported yet\n"); return -EINVAL; } - if (bo->type == drm_bo_type_fake && - !(new_mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) { - DRM_ERROR("Fake buffers must be pinned.\n"); - return -EINVAL; - } - - if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) { - DRM_ERROR - ("DRM_BO_FLAG_NO_EVICT is only available to priviliged " - "processes\n"); - return -EPERM; - } new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_READ); @@ -1382,6 +1369,12 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo, return ret; } + if (bo->pinned && bo->pinned_mem_type != bo->mem.mem_type) { + DRM_ERROR("Attempt to validate pinned buffer into different memory " + "type\n"); + return -EINVAL; + } + /* * We're switching command submission mechanism, * or cannot simply rely on the hardware serializing for us. @@ -1425,37 +1418,6 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo, } } - /* - * Pinned buffers. - */ - - if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) { - bo->pinned_mem_type = bo->mem.mem_type; - mutex_lock(&dev->struct_mutex); - list_del_init(&bo->pinned_lru); - drm_bo_add_to_pinned_lru(bo); - - if (bo->pinned_node != bo->mem.mm_node) { - if (bo->pinned_node != NULL) - drm_mm_put_block(bo->pinned_node); - bo->pinned_node = bo->mem.mm_node; - } - - mutex_unlock(&dev->struct_mutex); - - } else if (bo->pinned_node != NULL) { - - mutex_lock(&dev->struct_mutex); - - if (bo->pinned_node != bo->mem.mm_node) - drm_mm_put_block(bo->pinned_node); - - list_del_init(&bo->pinned_lru); - bo->pinned_node = NULL; - mutex_unlock(&dev->struct_mutex); - - } - /* * We might need to add a TTM. */ @@ -1533,6 +1495,10 @@ static int drm_bo_handle_validate(struct drm_file *file_priv, return ret; } +/** + * Fills out the generic buffer object ioctl reply with the information for + * the BO with id of handle. + */ static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle, struct drm_bo_info_rep *rep) { @@ -1948,6 +1914,110 @@ int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file * return 0; } +/** + * Pins or unpins the given buffer object in the given memory area. + * + * Pinned buffers will not be evicted from or move within their memory area. + * Must be called with the hardware lock held for pinning. + */ +static int +drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, + int pin) +{ + int ret = 0; + + mutex_lock(&bo->mutex); + if (bo->pinned == pin) { + mutex_unlock(&bo->mutex); + return 0; + } + + if (pin) { + ret = drm_bo_wait_unfenced(bo, 0, 0); + if (ret) { + mutex_unlock(&bo->mutex); + return ret; + } + + /* Validate the buffer into its pinned location, with no pending + * fence. + */ + ret = drm_buffer_object_validate(bo, 0, 0, 0); + if (ret) { + mutex_unlock(&bo->mutex); + return ret; + } + + /* Add our buffer to the pinned list */ + bo->pinned_mem_type = bo->mem.mem_type; + mutex_lock(&dev->struct_mutex); + list_del_init(&bo->pinned_lru); + drm_bo_add_to_pinned_lru(bo); + + if (bo->pinned_node != bo->mem.mm_node) { + if (bo->pinned_node != NULL) + drm_mm_put_block(bo->pinned_node); + bo->pinned_node = bo->mem.mm_node; + } + + mutex_unlock(&dev->struct_mutex); + + } else { + mutex_lock(&dev->struct_mutex); + + /* Remove our buffer from the pinned list */ + if (bo->pinned_node != bo->mem.mm_node) + drm_mm_put_block(bo->pinned_node); + + list_del_init(&bo->pinned_lru); + bo->pinned_node = NULL; + mutex_unlock(&dev->struct_mutex); + } + bo->pinned = pin; + mutex_unlock(&bo->mutex); + return 0; +} + +int drm_bo_set_pin_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_bo_set_pin_arg *arg = data; + struct drm_bo_set_pin_req *req = &arg->d.req; + struct drm_bo_info_rep *rep = &arg->d.rep; + struct drm_buffer_object *bo; + int ret; + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + if (req->pin < 0 || req->pin > 1) { + DRM_ERROR("Bad arguments to set_pin\n"); + return -EINVAL; + } + + if (req->pin) + LOCK_TEST_WITH_RETURN(dev, file_priv); + + mutex_lock(&dev->struct_mutex); + bo = drm_lookup_buffer_object(file_priv, req->handle, 1); + mutex_unlock(&dev->struct_mutex); + if (!bo) { + return -EINVAL; + } + + ret = drm_bo_set_pin(dev, bo, req->pin); + if (ret) { + drm_bo_usage_deref_unlocked(&bo); + return ret; + } + + drm_bo_fill_rep_arg(bo, rep); + drm_bo_usage_deref_unlocked(&bo); + + return 0; +} /** @@ -2009,11 +2079,10 @@ static int drm_bo_leave_list(struct drm_buffer_object * bo, mutex_unlock(&dev->struct_mutex); } - if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) { - DRM_ERROR("A DRM_BO_NO_EVICT buffer present at " + if (bo->pinned) { + DRM_ERROR("A pinned buffer was present at " "cleanup. Removing flag and evicting.\n"); - bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT; - bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT; + bo->pinned = 0; } if (bo->mem.mem_type == mem_type) diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index cc676bda..93dfcdb5 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -147,6 +147,7 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_BO_OP, drm_bo_op_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_BO_INFO, drm_bo_info_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_BO_WAIT_IDLE, drm_bo_wait_idle_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_SET_PIN, drm_bo_set_pin_ioctl, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index e5f2b69c..64f71651 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -362,6 +362,7 @@ struct drm_buffer_object { struct mutex mutex; /* For pinned buffers */ + int pinned; struct drm_mm_node *pinned_node; uint32_t pinned_mem_type; struct list_head pinned_lru; @@ -455,7 +456,7 @@ extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct d extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); - +int drm_bo_set_pin_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -- cgit v1.2.3 From f9c27aa50b715a7d21858f1ce9e4785120bd0c36 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Thu, 26 Jul 2007 11:17:41 -0700 Subject: Copy the important parts of object_validate into object_create(). This should let us allocate buffers without holding the hardware lock. While here, add DRM_DEBUG info for the drm_bo ioctls, so you can see something more specific than just the cmd value per ioctl. --- linux-core/drm_bo.c | 61 ++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 56 insertions(+), 5 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 53885a3e..a2356c8a 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1565,6 +1565,7 @@ int drm_buffer_object_create(struct drm_device *dev, { struct drm_buffer_manager *bm = &dev->bm; struct drm_buffer_object *bo; + struct drm_bo_driver *driver = dev->driver->bo_driver; int ret = 0; unsigned long num_pages; @@ -1624,10 +1625,28 @@ int drm_buffer_object_create(struct drm_device *dev, if (ret) goto out_err; } - ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK); + + bo->fence_class = 0; + ret = driver->fence_type(bo, &bo->fence_type); + if (ret) { + DRM_ERROR("Driver did not support given buffer permissions\n"); + goto out_err; + } + + if (bo->type == drm_bo_type_fake) { + ret = drm_bo_check_fake(dev, &bo->mem); + if (ret) + goto out_err; + } + + ret = drm_bo_add_ttm(bo); if (ret) goto out_err; + mutex_lock(&dev->struct_mutex); + drm_bo_add_to_lru(bo); + mutex_unlock(&dev->struct_mutex); + mutex_unlock(&bo->mutex); *buf_obj = bo; return 0; @@ -1677,6 +1696,8 @@ int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr void __user *curuserarg = NULL; int ret; + DRM_DEBUG("drm_bo_op_ioctl\n"); + if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -1749,14 +1770,15 @@ int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fil struct drm_buffer_object *entry; int ret = 0; + DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align, %d type\n", + (int)(req->size / 1024), req->page_alignment * 4, req->type); + if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; } - - ret = drm_bo_lock_test(dev, file_priv); - if (ret) - goto out; + if (req->type == drm_bo_type_fake) + LOCK_TEST_WITH_RETURN(dev, file_priv); ret = drm_buffer_object_create(file_priv->head->dev, req->size, req->type, req->mask, @@ -1787,6 +1809,8 @@ int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *fi struct drm_user_object *uo; int ret = 0; + DRM_DEBUG("drm_bo_destroy_ioctl: buffer %d\n", arg->handle); + if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -1810,6 +1834,9 @@ int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_p struct drm_bo_info_req *req = &arg->d.req; struct drm_bo_info_rep *rep = &arg->d.rep; int ret; + + DRM_DEBUG("drm_bo_map_ioctl: buffer %d\n", req->handle); + if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -1827,6 +1854,9 @@ int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file { struct drm_bo_handle_arg *arg = data; int ret; + + DRM_DEBUG("drm_bo_unmap_ioctl: buffer %d\n", arg->handle); + if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -1845,6 +1875,8 @@ int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file * struct drm_user_object *uo; int ret; + DRM_DEBUG("drm_bo_reference_ioctl: buffer %d\n", req->handle); + if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -1867,6 +1899,8 @@ int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file struct drm_bo_handle_arg *arg = data; int ret = 0; + DRM_DEBUG("drm_bo_unreference_ioctl: buffer %d\n", arg->handle); + if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -1883,6 +1917,8 @@ int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_ struct drm_bo_info_rep *rep = &arg->d.rep; int ret; + DRM_DEBUG("drm_bo_info_ioctl: buffer %d\n", req->handle); + if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -1901,6 +1937,9 @@ int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file * struct drm_bo_info_req *req = &arg->d.req; struct drm_bo_info_rep *rep = &arg->d.rep; int ret; + + DRM_DEBUG("drm_bo_wait_idle_ioctl: buffer %d\n", req->handle); + if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -1987,6 +2026,9 @@ int drm_bo_set_pin_ioctl(struct drm_device *dev, void *data, struct drm_buffer_object *bo; int ret; + DRM_DEBUG("drm_bo_set_pin_ioctl: buffer %d, pin %d\n", + req->handle, req->pin); + if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -2395,6 +2437,9 @@ int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_ struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; + DRM_DEBUG("drm_mm_init_ioctl: type %d, 0x%08llx offset, %dkb\n", + arg->mem_type, arg->p_offset * PAGE_SIZE, (int)(arg->p_size * 4)); + if (!driver) { DRM_ERROR("Buffer objects are not supported by this driver\n"); return -EINVAL; @@ -2449,6 +2494,8 @@ int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *f struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; + DRM_DEBUG("drm_mm_takedown_ioctl: %d type\n", arg->mem_type); + if (!driver) { DRM_ERROR("Buffer objects are not supported by this driver\n"); return -EINVAL; @@ -2486,6 +2533,8 @@ int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_ struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; + DRM_DEBUG("drm_mm_lock_ioctl: %d type\n", arg->mem_type); + if (!driver) { DRM_ERROR("Buffer objects are not supported by this driver\n"); return -EINVAL; @@ -2508,6 +2557,8 @@ int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *fil struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; + DRM_DEBUG("drm_mm_unlock_ioctl\n"); + if (!driver) { DRM_ERROR("Buffer objects are not supported by this driver\n"); return -EINVAL; -- cgit v1.2.3 From 3c8ebd94e48589711f44d23e85d713a1ed980f37 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Thu, 26 Jul 2007 11:26:12 -0700 Subject: debug print ioctl return value as -integer rather than fffffwhatever. --- linux-core/drm_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 93dfcdb5..816b8a20 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -648,7 +648,7 @@ int drm_ioctl(struct inode *inode, struct file *filp, err_i1: atomic_dec(&dev->ioctl_count); if (retcode) - DRM_DEBUG("ret = %x\n", retcode); + DRM_DEBUG("ret = %d\n", retcode); return retcode; } EXPORT_SYMBOL(drm_ioctl); -- cgit v1.2.3 From b89cc0346500d9875d4acebc611db8f9ee3463f7 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 26 Jul 2007 16:58:59 -0700 Subject: Eliminate unnecessary (and now wrong) call gto drm_sg_free. --- linux-core/xgi_pcie.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 537e82f5..dc5a50b8 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -121,10 +121,6 @@ static int xgi_pcie_lut_init(struct xgi_info * info) void xgi_pcie_lut_cleanup(struct xgi_info * info) { - if (info->dev->sg) { - drm_sg_free(info->dev, info->dev->sg->handle); - } - if (info->lut_handle) { drm_pci_free(info->dev, info->lut_handle); info->lut_handle = NULL; -- cgit v1.2.3 From c37ed9eca57a42b98cc67ca98dbf5135f5ab7aba Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 26 Jul 2007 17:01:16 -0700 Subject: Eliminate use of DRM_ERR. --- linux-core/xgi_cmdlist.c | 4 ++-- linux-core/xgi_drv.c | 10 +++++----- linux-core/xgi_fb.c | 8 ++++---- linux-core/xgi_pcie.c | 10 +++++----- 4 files changed, 16 insertions(+), 16 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 490e9f39..10ee9764 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -144,7 +144,7 @@ int xgi_submit_cmdlist_ioctl(DRM_IOCTL_ARGS) sizeof(cmd_list)); if (cmd_list.type > BTYPE_CTRL) { - return DRM_ERR(EINVAL); + return -EINVAL; } xgi_submit_cmdlist(info, &cmd_list); @@ -180,7 +180,7 @@ int xgi_state_change(struct xgi_info * info, unsigned int to, DRM_INFO("Leaving graphical mode (probably X shutting down)\n"); } else { DRM_ERROR("Invalid state change.\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } return 0; diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 2c3384b0..ec87df0a 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -173,7 +173,7 @@ int xgi_bootstrap(DRM_IOCTL_ARGS) if ((info->fb.base == 0) || (info->fb.size == 0)) { DRM_ERROR("framebuffer appears to be wrong: 0x%lx 0x%x\n", (unsigned long) info->fb.base, info->fb.size); - return DRM_ERR(EINVAL); + return -EINVAL; } @@ -221,7 +221,7 @@ int xgi_bootstrap(DRM_IOCTL_ARGS) maplist = drm_find_matching_map(dev, info->pcie_map); if (maplist == NULL) { DRM_ERROR("Could not find GART backing store map.\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } bs.gart = *info->pcie_map; @@ -303,7 +303,7 @@ int xgi_driver_load(struct drm_device *dev, unsigned long flags) struct xgi_info *info = drm_alloc(sizeof(*info), DRM_MEM_DRIVER); if (!info) - return DRM_ERR(ENOMEM); + return -ENOMEM; (void) memset(info, 0, sizeof(*info)); dev->dev_private = info; @@ -322,7 +322,7 @@ int xgi_driver_load(struct drm_device *dev, unsigned long flags) if ((info->mmio.base == 0) || (info->mmio.size == 0)) { DRM_ERROR("mmio appears to be wrong: 0x%lx 0x%x\n", (unsigned long) info->mmio.base, info->mmio.size); - return DRM_ERR(EINVAL); + return -EINVAL; } @@ -339,7 +339,7 @@ int xgi_driver_load(struct drm_device *dev, unsigned long flags) SLAB_HWCACHE_ALIGN, NULL, NULL); if (xgi_mem_block_cache == NULL) { - return DRM_ERR(ENOMEM); + return -ENOMEM; } diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index 3d3b2ae0..10343c13 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -45,7 +45,7 @@ int xgi_mem_heap_init(struct xgi_mem_heap *heap, unsigned int start, block = kmem_cache_alloc(xgi_mem_block_cache, GFP_KERNEL); if (!block) { - return DRM_ERR(ENOMEM); + return -ENOMEM; } block->offset = start; @@ -189,11 +189,11 @@ int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, if (&block->list == &heap->used_list) { DRM_ERROR("can't find block: 0x%lx to free!\n", offset); - return DRM_ERR(ENOENT); + return -ENOENT; } if (block->filp != filp) { - return DRM_ERR(EPERM); + return -EPERM; } used_block = block; @@ -265,7 +265,7 @@ int xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, alloc->location = XGI_MEMLOC_LOCAL; alloc->size = 0; DRM_ERROR("Video RAM allocation failed\n"); - return DRM_ERR(ENOMEM); + return -ENOMEM; } else { DRM_INFO("Video RAM allocation succeeded: 0x%p\n", (char *)block->offset); diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index dc5a50b8..4c369a2a 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -85,7 +85,7 @@ static int xgi_pcie_lut_init(struct xgi_info * info) DMA_31BIT_MASK); if (info->lut_handle == NULL) { DRM_ERROR("cannot allocate PCIE lut page!\n"); - return DRM_ERR(ENOMEM); + return -ENOMEM; } lut = info->lut_handle->vaddr; @@ -97,7 +97,7 @@ static int xgi_pcie_lut_init(struct xgi_info * info) DMA_BIDIRECTIONAL); if (dma_mapping_error(info->dev->sg->busaddr[i])) { DRM_ERROR("cannot map GART backing store for DMA!\n"); - return DRM_ERR(-(info->dev->sg->busaddr[i])); + return info->dev->sg->busaddr[i]; } lut[i] = info->dev->sg->busaddr[i]; @@ -184,7 +184,7 @@ int xgi_pcie_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, alloc->location = XGI_MEMLOC_INVALID; alloc->size = 0; DRM_ERROR("PCIE RAM allocation failed\n"); - return DRM_ERR(ENOMEM); + return -ENOMEM; } else { DRM_INFO("PCIE RAM allocation succeeded: offset = 0x%lx\n", block->offset); @@ -325,7 +325,7 @@ int xgi_test_rwinkernel_ioctl(DRM_IOCTL_ARGS) DRM_INFO("input GE HW addr is 0x%x\n", address); if (address == 0) { - return DRM_ERR(EFAULT); + return -EFAULT; } virtaddr = (u32 *)xgi_find_pcie_virt(info, address); @@ -337,7 +337,7 @@ int xgi_test_rwinkernel_ioctl(DRM_IOCTL_ARGS) *virtaddr = 0x00f00fff; DRM_INFO("modified [virtaddr] = 0x%x\n", *virtaddr); } else { - return DRM_ERR(EFAULT); + return -EFAULT; } return 0; -- cgit v1.2.3 From cd51f131389297f923798daef6c734ba93f4422b Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 27 Jul 2007 15:45:59 -0700 Subject: Convert to new ioctl interface between core DRM and device-specific module. --- linux-core/xgi_cmdlist.c | 36 +++++++++----------------------- linux-core/xgi_drv.c | 53 ++++++++++++++++++++++-------------------------- linux-core/xgi_drv.h | 48 ++++++++++++++++++++++++++----------------- linux-core/xgi_fb.c | 42 +++++++++++++------------------------- linux-core/xgi_misc.c | 14 ++++++------- linux-core/xgi_pcie.c | 52 ++++++++++++++++------------------------------- 6 files changed, 101 insertions(+), 144 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 10ee9764..1d0ee754 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -76,9 +76,12 @@ unsigned int get_batch_command(enum xgi_batch_type type) } -static void xgi_submit_cmdlist(struct xgi_info * info, - const struct xgi_cmd_info * pCmdInfo) +int xgi_submit_cmdlist(struct drm_device * dev, void * data, + struct drm_file * filp) { + struct xgi_info *const info = dev->dev_private; + const struct xgi_cmd_info *const pCmdInfo = + (struct xgi_cmd_info *) data; const unsigned int cmd = get_batch_command(pCmdInfo->type); u32 begin[4]; @@ -130,24 +133,6 @@ static void xgi_submit_cmdlist(struct xgi_info * info, } info->cmdring.last_ptr = xgi_find_pcie_virt(info, pCmdInfo->hw_addr); -} - - -int xgi_submit_cmdlist_ioctl(DRM_IOCTL_ARGS) -{ - DRM_DEVICE; - struct xgi_cmd_info cmd_list; - struct xgi_info *info = dev->dev_private; - - DRM_COPY_FROM_USER_IOCTL(cmd_list, - (struct xgi_cmd_info __user *) data, - sizeof(cmd_list)); - - if (cmd_list.type > BTYPE_CTRL) { - return -EINVAL; - } - - xgi_submit_cmdlist(info, &cmd_list); return 0; } @@ -187,16 +172,15 @@ int xgi_state_change(struct xgi_info * info, unsigned int to, } -int xgi_state_change_ioctl(DRM_IOCTL_ARGS) +int xgi_state_change_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp) { - DRM_DEVICE; - struct xgi_state_info state; + struct xgi_state_info *const state = + (struct xgi_state_info *) data; struct xgi_info *info = dev->dev_private; - DRM_COPY_FROM_USER_IOCTL(state, (struct xgi_state_info __user *) data, - sizeof(state)); - return xgi_state_change(info, state._toState, state._fromState); + return xgi_state_change(info, state->_toState, state->_fromState); } diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index ec87df0a..0b094a31 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -37,23 +37,23 @@ static struct pci_device_id pciidlist[] = { xgi_PCI_IDS }; -static int xgi_bootstrap(DRM_IOCTL_ARGS); +static int xgi_bootstrap(struct drm_device *, void *, struct drm_file *); -static drm_ioctl_desc_t xgi_ioctls[] = { - [DRM_IOCTL_NR(DRM_XGI_BOOTSTRAP)] = {xgi_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, +static struct drm_ioctl_desc xgi_ioctls[] = { + DRM_IOCTL_DEF(DRM_XGI_BOOTSTRAP, xgi_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - [DRM_IOCTL_NR(DRM_XGI_FB_ALLOC)] = {xgi_fb_alloc_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_XGI_FB_FREE)] = {xgi_fb_free_ioctl, DRM_AUTH}, + DRM_IOCTL_DEF(DRM_XGI_FB_ALLOC, xgi_fb_alloc_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_XGI_FB_FREE, xgi_fb_free_ioctl, DRM_AUTH), - [DRM_IOCTL_NR(DRM_XGI_PCIE_ALLOC)] = {xgi_pcie_alloc_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_XGI_PCIE_FREE)] = {xgi_pcie_free_ioctl, DRM_AUTH}, + DRM_IOCTL_DEF(DRM_XGI_PCIE_ALLOC, xgi_pcie_alloc_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_XGI_PCIE_FREE, xgi_pcie_free_ioctl, DRM_AUTH), - [DRM_IOCTL_NR(DRM_XGI_GE_RESET)] = {xgi_ge_reset_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_XGI_DUMP_REGISTER)] = {xgi_dump_register_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_XGI_DEBUG_INFO)] = {xgi_restore_registers_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_XGI_SUBMIT_CMDLIST)] = {xgi_submit_cmdlist_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_XGI_TEST_RWINKERNEL)] = {xgi_test_rwinkernel_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_XGI_STATE_CHANGE)] = {xgi_state_change_ioctl, DRM_AUTH|DRM_MASTER}, + DRM_IOCTL_DEF(DRM_XGI_GE_RESET, xgi_ge_reset_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_XGI_DUMP_REGISTER, xgi_dump_register_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_XGI_DEBUG_INFO, xgi_restore_registers_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_XGI_SUBMIT_CMDLIST, xgi_submit_cmdlist, DRM_AUTH), + DRM_IOCTL_DEF(DRM_XGI_TEST_RWINKERNEL, xgi_test_rwinkernel_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_XGI_STATE_CHANGE, xgi_state_change_ioctl, DRM_AUTH|DRM_MASTER), }; static const int xgi_max_ioctl = DRM_ARRAY_SIZE(xgi_ioctls); @@ -61,8 +61,9 @@ static const int xgi_max_ioctl = DRM_ARRAY_SIZE(xgi_ioctls); static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); static int xgi_driver_load(struct drm_device *dev, unsigned long flags); static int xgi_driver_unload(struct drm_device *dev); -static void xgi_driver_preclose(struct drm_device * dev, DRMFILE filp); -static void xgi_driver_lastclose(drm_device_t * dev); +static void xgi_driver_preclose(struct drm_device * dev, + struct drm_file * filp); +static void xgi_driver_lastclose(struct drm_device * dev); static irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS); @@ -139,18 +140,15 @@ MODULE_LICENSE("GPL and additional rights"); void xgi_kern_isr_bh(struct drm_device *dev); -int xgi_bootstrap(DRM_IOCTL_ARGS) +int xgi_bootstrap(struct drm_device * dev, void * data, + struct drm_file * filp) { - DRM_DEVICE; struct xgi_info *info = dev->dev_private; - struct xgi_bootstrap bs; + struct xgi_bootstrap * bs = (struct xgi_bootstrap *) data; struct drm_map_list *maplist; int err; - DRM_COPY_FROM_USER_IOCTL(bs, (struct xgi_bootstrap __user *) data, - sizeof(bs)); - if (info->mmio_map == NULL) { err = drm_addmap(dev, info->mmio.base, info->mmio.size, _DRM_REGISTERS, _DRM_KERNEL, @@ -187,7 +185,7 @@ int xgi_bootstrap(DRM_IOCTL_ARGS) } - info->pcie.size = bs.gart.size; + info->pcie.size = bs->gart.size; /* Init the resource manager */ if (!info->pcie_heap.initialized) { @@ -224,16 +222,13 @@ int xgi_bootstrap(DRM_IOCTL_ARGS) return -EINVAL; } - bs.gart = *info->pcie_map; - bs.gart.handle = (void *)(unsigned long) maplist->user_token; - DRM_COPY_TO_USER_IOCTL((struct xgi_bootstrap __user *) data, - bs, sizeof(bs)); - + bs->gart = *info->pcie_map; + bs->gart.handle = (void *)(unsigned long) maplist->user_token; return 0; } -void xgi_driver_preclose(struct drm_device * dev, DRMFILE filp) +void xgi_driver_preclose(struct drm_device * dev, struct drm_file * filp) { struct xgi_info * info = dev->dev_private; @@ -242,7 +237,7 @@ void xgi_driver_preclose(struct drm_device * dev, DRMFILE filp) } -void xgi_driver_lastclose(drm_device_t * dev) +void xgi_driver_lastclose(struct drm_device * dev) { struct xgi_info * info = dev->dev_private; diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 2061189a..8dec1fa1 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -52,7 +52,7 @@ struct xgi_mem_block { struct list_head list; unsigned long offset; unsigned long size; - DRMFILE filp; + struct drm_file * filp; unsigned int owner; }; @@ -111,7 +111,7 @@ extern struct kmem_cache *xgi_mem_block_cache; extern struct xgi_mem_block *xgi_mem_alloc(struct xgi_mem_heap * heap, unsigned long size, enum PcieOwner owner); extern int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, - DRMFILE filp); + struct drm_file * filp); extern int xgi_mem_heap_init(struct xgi_mem_heap * heap, unsigned int start, unsigned int end); extern void xgi_mem_heap_cleanup(struct xgi_mem_heap * heap); @@ -119,34 +119,44 @@ extern void xgi_mem_heap_cleanup(struct xgi_mem_heap * heap); extern int xgi_fb_heap_init(struct xgi_info * info); extern int xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, - DRMFILE filp); + struct drm_file * filp); extern int xgi_fb_free(struct xgi_info * info, unsigned long offset, - DRMFILE filp); + struct drm_file * filp); extern int xgi_pcie_heap_init(struct xgi_info * info); extern void xgi_pcie_lut_cleanup(struct xgi_info * info); extern int xgi_pcie_alloc(struct xgi_info * info, - struct xgi_mem_alloc * alloc, DRMFILE filp); + struct xgi_mem_alloc * alloc, struct drm_file * filp); extern int xgi_pcie_free(struct xgi_info * info, unsigned long offset, - DRMFILE filp); + struct drm_file * filp); extern void *xgi_find_pcie_virt(struct xgi_info * info, u32 address); -extern void xgi_pcie_free_all(struct xgi_info *, DRMFILE); -extern void xgi_fb_free_all(struct xgi_info *, DRMFILE); - -extern int xgi_fb_alloc_ioctl(DRM_IOCTL_ARGS); -extern int xgi_fb_free_ioctl(DRM_IOCTL_ARGS); -extern int xgi_pcie_alloc_ioctl(DRM_IOCTL_ARGS); -extern int xgi_pcie_free_ioctl(DRM_IOCTL_ARGS); -extern int xgi_ge_reset_ioctl(DRM_IOCTL_ARGS); -extern int xgi_dump_register_ioctl(DRM_IOCTL_ARGS); -extern int xgi_restore_registers_ioctl(DRM_IOCTL_ARGS); -extern int xgi_submit_cmdlist_ioctl(DRM_IOCTL_ARGS); -extern int xgi_test_rwinkernel_ioctl(DRM_IOCTL_ARGS); -extern int xgi_state_change_ioctl(DRM_IOCTL_ARGS); +extern void xgi_pcie_free_all(struct xgi_info *, struct drm_file *); +extern void xgi_fb_free_all(struct xgi_info *, struct drm_file *); + +extern int xgi_fb_alloc_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp); +extern int xgi_fb_free_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp); +extern int xgi_pcie_alloc_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp); +extern int xgi_pcie_free_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp); +extern int xgi_ge_reset_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp); +extern int xgi_dump_register_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp); +extern int xgi_restore_registers_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp); +extern int xgi_submit_cmdlist(struct drm_device * dev, void * data, + struct drm_file * filp); +extern int xgi_test_rwinkernel_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp); +extern int xgi_state_change_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp); #endif diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index 10343c13..9c60a874 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -94,7 +94,7 @@ struct xgi_mem_block *xgi_mem_new_node(void) block->offset = 0; block->size = 0; block->owner = PCIE_INVALID; - block->filp = (DRMFILE) -1; + block->filp = (struct drm_file *) -1; return block; } @@ -173,7 +173,7 @@ struct xgi_mem_block *xgi_mem_alloc(struct xgi_mem_heap * heap, } int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, - DRMFILE filp) + struct drm_file * filp) { struct xgi_mem_block *used_block = NULL, *block; struct xgi_mem_block *prev, *next; @@ -246,7 +246,7 @@ int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, int xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, - DRMFILE filp) + struct drm_file * filp) { struct xgi_mem_block *block; @@ -282,29 +282,19 @@ int xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, } -int xgi_fb_alloc_ioctl(DRM_IOCTL_ARGS) +int xgi_fb_alloc_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp) { - DRM_DEVICE; - struct xgi_mem_alloc alloc; + struct xgi_mem_alloc *alloc = + (struct xgi_mem_alloc *) data; struct xgi_info *info = dev->dev_private; - int err; - DRM_COPY_FROM_USER_IOCTL(alloc, (struct xgi_mem_alloc __user *) data, - sizeof(alloc)); - - err = xgi_fb_alloc(info, & alloc, filp); - if (err) { - return err; - } - - DRM_COPY_TO_USER_IOCTL((struct xgi_mem_alloc __user *) data, - alloc, sizeof(alloc)); - - return 0; + return xgi_fb_alloc(info, alloc, filp); } -int xgi_fb_free(struct xgi_info * info, unsigned long offset, DRMFILE filp) +int xgi_fb_free(struct xgi_info * info, unsigned long offset, + struct drm_file * filp) { int err = 0; @@ -320,16 +310,12 @@ int xgi_fb_free(struct xgi_info * info, unsigned long offset, DRMFILE filp) } -int xgi_fb_free_ioctl(DRM_IOCTL_ARGS) +int xgi_fb_free_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp) { - DRM_DEVICE; struct xgi_info *info = dev->dev_private; - u32 offset; - - DRM_COPY_FROM_USER_IOCTL(offset, (unsigned long __user *) data, - sizeof(offset)); - return xgi_fb_free(info, offset, filp); + return xgi_fb_free(info, *(u32 *) data, filp); } @@ -342,7 +328,7 @@ int xgi_fb_heap_init(struct xgi_info * info) /** * Free all blocks associated with a particular file handle. */ -void xgi_fb_free_all(struct xgi_info * info, DRMFILE filp) +void xgi_fb_free_all(struct xgi_info * info, struct drm_file * filp) { if (!info->fb_heap.initialized) { return; diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 5e8c3da8..6c029782 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -27,9 +27,9 @@ #include "xgi_drv.h" #include "xgi_regs.h" -int xgi_ge_reset_ioctl(DRM_IOCTL_ARGS) +int xgi_ge_reset_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp) { - DRM_DEVICE; struct xgi_info *info = dev->dev_private; xgi_disable_ge(info); @@ -436,25 +436,23 @@ void xgi_dump_register(struct xgi_info * info) } -int xgi_dump_register_ioctl(DRM_IOCTL_ARGS) +int xgi_dump_register_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp) { - DRM_DEVICE; struct xgi_info *info = dev->dev_private; xgi_dump_register(info); - return 0; } -int xgi_restore_registers_ioctl(DRM_IOCTL_ARGS) +int xgi_restore_registers_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp) { - DRM_DEVICE; struct xgi_info *info = dev->dev_private; OUT3X5B(info->mmio_map, 0x13, 0); OUT3X5B(info->mmio_map, 0x8b, 2); - return 0; } diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 4c369a2a..b91471b8 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -33,7 +33,7 @@ static struct xgi_mem_block *xgi_pcie_cmdlist_block = NULL; static struct xgi_mem_block *xgi_pcie_scratchpad_block = NULL; static int xgi_pcie_free_locked(struct xgi_info * info, - unsigned long offset, DRMFILE filp); + unsigned long offset, struct drm_file * filp); static int xgi_pcie_lut_init(struct xgi_info * info) { @@ -148,7 +148,7 @@ int xgi_pcie_heap_init(struct xgi_info * info) int xgi_pcie_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, - DRMFILE filp) + struct drm_file * filp) { struct xgi_mem_block *block; @@ -199,32 +199,21 @@ int xgi_pcie_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, } -int xgi_pcie_alloc_ioctl(DRM_IOCTL_ARGS) +int xgi_pcie_alloc_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp) { - DRM_DEVICE; - struct xgi_mem_alloc alloc; + struct xgi_mem_alloc *const alloc = + (struct xgi_mem_alloc *) data; struct xgi_info *info = dev->dev_private; - int err; - - DRM_COPY_FROM_USER_IOCTL(alloc, (struct xgi_mem_alloc __user *) data, - sizeof(alloc)); - - err = xgi_pcie_alloc(info, & alloc, filp); - if (err) { - return err; - } - - DRM_COPY_TO_USER_IOCTL((struct xgi_mem_alloc __user *) data, - alloc, sizeof(alloc)); - return 0; + return xgi_pcie_alloc(info, alloc, filp); } /** * Free all blocks associated with a particular file handle. */ -void xgi_pcie_free_all(struct xgi_info * info, DRMFILE filp) +void xgi_pcie_free_all(struct xgi_info * info, struct drm_file * filp) { if (!info->pcie_heap.initialized) { return; @@ -252,8 +241,8 @@ void xgi_pcie_free_all(struct xgi_info * info, DRMFILE filp) } -int xgi_pcie_free_locked(struct xgi_info * info, - unsigned long offset, DRMFILE filp) +int xgi_pcie_free_locked(struct xgi_info * info, unsigned long offset, + struct drm_file * filp) { const bool isvertex = (xgi_pcie_vertex_block && (xgi_pcie_vertex_block->offset == offset)); @@ -266,7 +255,8 @@ int xgi_pcie_free_locked(struct xgi_info * info, } -int xgi_pcie_free(struct xgi_info * info, unsigned long offset, DRMFILE filp) +int xgi_pcie_free(struct xgi_info * info, unsigned long offset, + struct drm_file * filp) { int err; @@ -282,16 +272,12 @@ int xgi_pcie_free(struct xgi_info * info, unsigned long offset, DRMFILE filp) } -int xgi_pcie_free_ioctl(DRM_IOCTL_ARGS) +int xgi_pcie_free_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp) { - DRM_DEVICE; struct xgi_info *info = dev->dev_private; - u32 offset; - - DRM_COPY_FROM_USER_IOCTL(offset, (unsigned long __user *) data, - sizeof(offset)); - return xgi_pcie_free(info, offset, filp); + return xgi_pcie_free(info, *(u32 *) data, filp); } @@ -312,15 +298,13 @@ void *xgi_find_pcie_virt(struct xgi_info * info, u32 address) /* address -- GE hw address */ -int xgi_test_rwinkernel_ioctl(DRM_IOCTL_ARGS) +int xgi_test_rwinkernel_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp) { - DRM_DEVICE; struct xgi_info *info = dev->dev_private; - u32 address; + u32 address = *(u32 *) data; u32 *virtaddr = 0; - DRM_COPY_FROM_USER_IOCTL(address, (unsigned long __user *) data, - sizeof(address)); DRM_INFO("input GE HW addr is 0x%x\n", address); -- cgit v1.2.3 From 2ac80e79e424aa6577e556b2df01caea9e480852 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 30 Jul 2007 09:59:19 -0700 Subject: Use OUT3C5B macro instead of assuming little-endian byte order. --- linux-core/xgi_regs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h index 34268a56..b3a47f8e 100644 --- a/linux-core/xgi_regs.h +++ b/linux-core/xgi_regs.h @@ -130,7 +130,7 @@ static inline void xgi_enable_ge(struct xgi_info * info) int wait = 0; // Enable GE - DRM_WRITE16(info->mmio_map, 0x3C4, 0x9211); + OUT3C5B(info->mmio_map, 0x11, 0x92); // Save and close dynamic gating bOld3cf2a = IN3CFB(info->mmio_map, 0x2a); -- cgit v1.2.3 From 01628a430d476f5875270d7137fc083ba85cef90 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 30 Jul 2007 10:02:46 -0700 Subject: Use DRM_READ/DRM_WRITE macros instead of directly accessing MMIO space. --- linux-core/xgi_misc.c | 119 ++++++++++++++++++++++++++++---------------------- 1 file changed, 68 insertions(+), 51 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 6c029782..c75a5841 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -27,6 +27,8 @@ #include "xgi_drv.h" #include "xgi_regs.h" +#include + int xgi_ge_reset_ioctl(struct drm_device * dev, void * data, struct drm_file * filp) { @@ -46,47 +48,43 @@ int xgi_ge_reset_ioctl(struct drm_device * dev, void * data, static unsigned int s_invalid_begin = 0; -static bool xgi_validate_signal(volatile u8 *mmio_vbase) +static bool xgi_validate_signal(struct drm_map * map) { - volatile u32 *const ge_3d_status = - (volatile u32 *)(mmio_vbase + 0x2800); - const u32 old_ge_status = ge_3d_status[0x00]; - - if (old_ge_status & 0x001c0000) { + if (DRM_READ32(map, 0x2800) & 0x001c0000) { u16 check; /* Check Read back status */ - *(mmio_vbase + 0x235c) = 0x80; - check = *((volatile u16 *)(mmio_vbase + 0x2360)); + DRM_WRITE8(map, 0x235c, 0x80); + check = DRM_READ16(map, 0x2360); if ((check & 0x3f) != ((check & 0x3f00) >> 8)) { return FALSE; } /* Check RO channel */ - *(mmio_vbase + 0x235c) = 0x83; - check = *((volatile u16 *)(mmio_vbase + 0x2360)); + DRM_WRITE8(map, 0x235c, 0x83); + check = DRM_READ16(map, 0x2360); if ((check & 0x0f) != ((check & 0xf0) >> 4)) { return FALSE; } /* Check RW channel */ - *(mmio_vbase + 0x235c) = 0x88; - check = *((volatile u16 *)(mmio_vbase + 0x2360)); + DRM_WRITE8(map, 0x235c, 0x88); + check = DRM_READ16(map, 0x2360); if ((check & 0x0f) != ((check & 0xf0) >> 4)) { return FALSE; } /* Check RO channel outstanding */ - *(mmio_vbase + 0x235c) = 0x8f; - check = *((volatile u16 *)(mmio_vbase + 0x2360)); + DRM_WRITE8(map, 0x235c, 0x8f); + check = DRM_READ16(map, 0x2360); if (0 != (check & 0x3ff)) { return FALSE; } /* Check RW channel outstanding */ - *(mmio_vbase + 0x235c) = 0x90; - check = *((volatile u16 *)(mmio_vbase + 0x2360)); + DRM_WRITE8(map, 0x235c, 0x90); + check = DRM_READ16(map, 0x2360); if (0 != (check & 0x3ff)) { return FALSE; } @@ -98,14 +96,12 @@ static bool xgi_validate_signal(volatile u8 *mmio_vbase) } -static void xgi_ge_hang_reset(volatile u8 *mmio_vbase) +static void xgi_ge_hang_reset(struct drm_map * map) { - volatile u32 *const ge_3d_status = - (volatile u32 *)(mmio_vbase + 0x2800); int time_out = 0xffff; - *(mmio_vbase + 0xb057) = 8; - while (0 != (ge_3d_status[0x00] & 0xf0000000)) { + DRM_WRITE8(map, 0xb057, 8); + while (0 != (DRM_READ32(map, 0x2800) & 0xf0000000)) { while (0 != ((--time_out) & 0xfff)) /* empty */ ; @@ -116,57 +112,53 @@ static void xgi_ge_hang_reset(volatile u8 *mmio_vbase) u8 old_36; DRM_INFO("Can not reset back 0x%x!\n", - ge_3d_status[0x00]); + DRM_READ32(map, 0x2800)); - *(mmio_vbase + 0xb057) = 0; + DRM_WRITE8(map, 0xb057, 0); /* Have to use 3x5.36 to reset. */ /* Save and close dynamic gating */ - old_3ce = *(mmio_vbase + 0x3ce); - *(mmio_vbase + 0x3ce) = 0x2a; - old_3cf = *(mmio_vbase + 0x3cf); - *(mmio_vbase + 0x3cf) = old_3cf & 0xfe; + old_3ce = DRM_READ8(map, 0x3ce); + DRM_WRITE8(map, 0x3ce, 0x2a); + old_3cf = DRM_READ8(map, 0x3cf); + DRM_WRITE8(map, 0x3cf, old_3cf & 0xfe); /* Reset GE */ - old_index = *(mmio_vbase + 0x3d4); - *(mmio_vbase + 0x3d4) = 0x36; - old_36 = *(mmio_vbase + 0x3d5); - *(mmio_vbase + 0x3d5) = old_36 | 0x10; - + old_index = DRM_READ8(map, 0x3d4); + DRM_WRITE8(map, 0x3d4, 0x36); + old_36 = DRM_READ8(map, 0x3d5); + DRM_WRITE8(map, 0x3d5, old_36 | 0x10); + while (0 != ((--time_out) & 0xfff)) /* empty */ ; - *(mmio_vbase + 0x3d5) = old_36; - *(mmio_vbase + 0x3d4) = old_index; + DRM_WRITE8(map, 0x3d5, old_36); + DRM_WRITE8(map, 0x3d4, old_index); /* Restore dynamic gating */ - *(mmio_vbase + 0x3cf) = old_3cf; - *(mmio_vbase + 0x3ce) = old_3ce; + DRM_WRITE8(map, 0x3cf, old_3cf); + DRM_WRITE8(map, 0x3ce, old_3ce); break; } } - *(mmio_vbase + 0xb057) = 0; + DRM_WRITE8(map, 0xb057, 0); } bool xgi_ge_irq_handler(struct xgi_info * info) { - volatile u8 *const mmio_vbase = info->mmio_map->handle; - volatile u32 *const ge_3d_status = - (volatile u32 *)(mmio_vbase + 0x2800); - const u32 int_status = ge_3d_status[4]; + const u32 int_status = DRM_READ32(info->mmio_map, 0x2810); bool is_support_auto_reset = FALSE; /* Check GE on/off */ if (0 == (0xffffc0f0 & int_status)) { - u32 old_pcie_cmd_fetch_Addr = ge_3d_status[0x0a]; - if (0 != (0x1000 & int_status)) { /* We got GE stall interrupt. */ - ge_3d_status[0x04] = int_status | 0x04000000; + DRM_WRITE32(info->mmio_map, 0x2810, + int_status | 0x04000000); if (is_support_auto_reset) { static cycles_t last_tick; @@ -174,7 +166,7 @@ bool xgi_ge_irq_handler(struct xgi_info * info) /* OE II is busy. */ - if (!xgi_validate_signal(mmio_vbase)) { + if (!xgi_validate_signal(info->mmio_map)) { /* Nothing but skip. */ } else if (0 == continue_int_count++) { last_tick = get_cycles(); @@ -189,13 +181,14 @@ bool xgi_ge_irq_handler(struct xgi_info * info) /* GE Hung up, need reset. */ DRM_INFO("Reset GE!\n"); - xgi_ge_hang_reset(mmio_vbase); + xgi_ge_hang_reset(info->mmio_map); } } } } else if (0 != (0x1 & int_status)) { s_invalid_begin++; - ge_3d_status[0x04] = (int_status & ~0x01) | 0x04000000; + DRM_WRITE32(info->mmio_map, 0x2810, + (int_status & ~0x01) | 0x04000000); } return TRUE; @@ -456,14 +449,38 @@ int xgi_restore_registers_ioctl(struct drm_device * dev, void * data, return 0; } + +#define WHOLD_GE_STATUS 0x2800 + +/* Test everything except the "whole GE busy" bit, the "master engine busy" + * bit, and the reserved bits [26:21]. + */ +#define IDLE_MASK ~((1U<<31) | (1U<<28) | (0x3f<<21)) + void xgi_waitfor_pci_idle(struct xgi_info * info) { -#define WHOLD_GE_STATUS 0x2800 -#define IDLE_MASK ~0x90200000 + unsigned int idleCount = 0; + u32 old_status = 0; + unsigned int same_count = 0; - int idleCount = 0; while (idleCount < 5) { - if (DRM_READ32(info->mmio_map, WHOLD_GE_STATUS) & IDLE_MASK) { + const u32 status = DRM_READ32(info->mmio_map, WHOLD_GE_STATUS) + & IDLE_MASK; + + if (status == old_status) { + same_count++; + + if ((same_count % 100) == 0) { + DRM_ERROR("GE status stuck at 0x%08x for %u iterations!\n", + old_status, same_count); + } + } else { + old_status = status; + same_count = 0; + } + + if (status != 0) { + msleep(1); idleCount = 0; } else { idleCount++; -- cgit v1.2.3 From 2fc697a7d270d57463eb5a16a0c65bd8e14c9893 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 30 Jul 2007 10:20:15 -0700 Subject: Fix GE shut-down sequence. When the GE is shut down, an empty command packet without a begin-link must be sent. After this command is sent, wait for the hardware to go idle. Finally, turn off the GE and disable MMIO. --- linux-core/xgi_cmdlist.c | 64 +++++++++++++++++++++++++++++++----------------- linux-core/xgi_drv.c | 8 ++++-- 2 files changed, 48 insertions(+), 24 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 1d0ee754..4bb147c4 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -29,7 +29,7 @@ #include "xgi_misc.h" #include "xgi_cmdlist.h" -static void addFlush2D(struct xgi_info * info); +static void xgi_emit_flush(struct xgi_info * info, bool link); static unsigned int get_batch_command(enum xgi_batch_type type); static void triggerHWCommandList(struct xgi_info * info); static void xgi_cmdlist_reset(struct xgi_info * info); @@ -120,7 +120,7 @@ int xgi_submit_cmdlist(struct drm_device * dev, void * data, DRM_DEBUG("info->cmdring.last_ptr != NULL\n"); if (pCmdInfo->type == BTYPE_3D) { - addFlush2D(info); + xgi_emit_flush(info, TRUE); } info->cmdring.last_ptr[1] = begin[1]; @@ -190,9 +190,18 @@ void xgi_cmdlist_reset(struct xgi_info * info) info->cmdring.ring_offset = 0; } + void xgi_cmdlist_cleanup(struct xgi_info * info) { if (info->cmdring.ring_hw_base != 0) { + /* If command lists have been issued, terminate the command + * list chain with a flush command. + */ + if (info->cmdring.last_ptr != NULL) { + xgi_emit_flush(info, FALSE); + xgi_waitfor_pci_idle(info); + } + xgi_pcie_free(info, info->cmdring.ring_gart_base, NULL); info->cmdring.ring_hw_base = 0; info->cmdring.ring_offset = 0; @@ -210,32 +219,43 @@ static void triggerHWCommandList(struct xgi_info * info) } -static void addFlush2D(struct xgi_info * info) +/** + * Emit a flush to the CRTL command stream. + * @info XGI info structure + * @link Emit (or don't emit) link information at start of flush command. + * + * This function assumes info->cmdring.ptr is non-NULL. + */ +static void xgi_emit_flush(struct xgi_info * info, bool link) { - u32 *flushBatchVirtAddr; - u32 flushBatchHWAddr; + static const u32 flush_command[8] = { + (0x10 << 24), + BEGIN_LINK_ENABLE_MASK | (0x00004), + 0x00000000, 0x00000000, + + /* Flush everything with the default 32 clock delay. + */ + 0x003fffff, 0x003fffff, 0x003fffff, 0x003fffff + }; + const unsigned int base = (link) ? 0 : 4; + const unsigned int flush_size = (8 - base) * sizeof(u32); + u32 *batch_addr; + u32 hw_addr; /* check buf is large enough to contain a new flush batch */ - if ((info->cmdring.ring_offset + 0x20) >= info->cmdring.size) { + if ((info->cmdring.ring_offset + flush_size) >= info->cmdring.size) { info->cmdring.ring_offset = 0; } - flushBatchHWAddr = info->cmdring.ring_hw_base + info->cmdring.ring_offset; - flushBatchVirtAddr = info->cmdring.ptr + hw_addr = info->cmdring.ring_hw_base + + info->cmdring.ring_offset; + batch_addr = info->cmdring.ptr + (info->cmdring.ring_offset / 4); - /* not using memcpy for I assume the address is discrete */ - *(flushBatchVirtAddr + 0) = 0x10000000; - *(flushBatchVirtAddr + 1) = 0x80000004; /* size = 0x04 dwords */ - *(flushBatchVirtAddr + 2) = 0x00000000; - *(flushBatchVirtAddr + 3) = 0x00000000; - *(flushBatchVirtAddr + 4) = FLUSH_2D; - *(flushBatchVirtAddr + 5) = FLUSH_2D; - *(flushBatchVirtAddr + 6) = FLUSH_2D; - *(flushBatchVirtAddr + 7) = FLUSH_2D; - - info->cmdring.last_ptr[1] = BEGIN_LINK_ENABLE_MASK + 0x08; - info->cmdring.last_ptr[2] = flushBatchHWAddr >> 4; + (void) memcpy(batch_addr, & flush_command[base], flush_size); + + info->cmdring.last_ptr[1] = BEGIN_LINK_ENABLE_MASK | (flush_size / 4); + info->cmdring.last_ptr[2] = hw_addr >> 4; info->cmdring.last_ptr[3] = 0; wmb(); info->cmdring.last_ptr[0] = (get_batch_command(BTYPE_CTRL) << 24) @@ -243,6 +263,6 @@ static void addFlush2D(struct xgi_info * info) triggerHWCommandList(info); - info->cmdring.ring_offset += 0x20; - info->cmdring.last_ptr = flushBatchVirtAddr; + info->cmdring.ring_offset += flush_size; + info->cmdring.last_ptr = (link) ? batch_addr : NULL; } diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 0b094a31..201062ee 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -242,6 +242,12 @@ void xgi_driver_lastclose(struct drm_device * dev) struct xgi_info * info = dev->dev_private; if (info != NULL) { + if (info->mmio_map != NULL) { + xgi_cmdlist_cleanup(info); + xgi_disable_ge(info); + xgi_disable_mmio(info); + } + /* The core DRM lastclose routine will destroy all of our * mappings for us. NULL out the pointers here so that * xgi_bootstrap can do the right thing. @@ -250,8 +256,6 @@ void xgi_driver_lastclose(struct drm_device * dev) info->mmio_map = NULL; info->fb_map = NULL; - xgi_cmdlist_cleanup(info); - if (info->fb_heap.initialized) { xgi_mem_heap_cleanup(&info->fb_heap); } -- cgit v1.2.3 From 08919d8a70558dc61c430be5ed6e4a2bed7429b2 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 30 Jul 2007 12:01:52 -0700 Subject: Move additional GE initialization into the kernel. This code comes directly from the X server. --- linux-core/xgi_drv.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) (limited to 'linux-core') diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 201062ee..33b3a51d 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -138,6 +138,57 @@ MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL and additional rights"); +void xgi_engine_init(struct xgi_info * info) +{ + u8 temp; + + + OUT3C5B(info->mmio_map, 0x11, 0x92); + + /* -------> copy from OT2D + * PCI Retry Control Register. + * disable PCI read retry & enable write retry in mem. (10xx xxxx)b + */ + temp = IN3X5B(info->mmio_map, 0x55); + OUT3X5B(info->mmio_map, 0x55, (temp & 0xbf) | 0x80); + + xgi_enable_ge(info); + + /* Enable linear addressing of the card. */ + temp = IN3X5B(info->mmio_map, 0x21); + OUT3X5B(info->mmio_map, 0x21, temp | 0x20); + + /* Enable 32-bit internal data path */ + temp = IN3X5B(info->mmio_map, 0x2A); + OUT3X5B(info->mmio_map, 0x2A, temp | 0x40); + + /* Enable PCI burst write ,disable burst read and enable MMIO. */ + /* + * 0x3D4.39 Enable PCI burst write, disable burst read and enable MMIO. + * 7 ---- Pixel Data Format 1: big endian 0: little endian + * 6 5 4 3---- Memory Data with Big Endian Format, BE[3:0]# with Big Endian Format + * 2 ---- PCI Burst Write Enable + * 1 ---- PCI Burst Read Enable + * 0 ---- MMIO Control + */ + temp = IN3X5B(info->mmio_map, 0x39); + OUT3X5B(info->mmio_map, 0x39, (temp | 0x05) & 0xfd); + + /* enable GEIO decode */ + /* temp = IN3X5B(info->mmio_map, 0x29); + * OUT3X5B(info->mmio_map, 0x29, temp | 0x08); + */ + + /* Enable graphic engine I/O PCI retry function*/ + /* temp = IN3X5B(info->mmio_map, 0x62); + * OUT3X5B(info->mmio_map, 0x62, temp | 0x50); + */ + + /* protect all register except which protected by 3c5.0e.7 */ + /* OUT3C5B(info->mmio_map, 0x11, 0x87); */ +} + + void xgi_kern_isr_bh(struct drm_device *dev); int xgi_bootstrap(struct drm_device * dev, void * data, @@ -159,6 +210,7 @@ int xgi_bootstrap(struct drm_device * dev, void * data, } xgi_enable_mmio(info); + xgi_engine_init(info); } -- cgit v1.2.3 From 283eaa25594347267df4e6e5eedbb9d17bb3682c Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 31 Jul 2007 09:22:45 +1000 Subject: drm: fix fencing refcount error This extra increase was causing fence leaks on my system, due to create/user add already increasing it twice no need for a 3rd go. --- linux-core/drm_fence.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index c4f7da15..fdb2a4db 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -597,7 +597,6 @@ int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file * * usage > 0. No need to lock dev->struct_mutex; */ - atomic_inc(&fence->usage); arg->handle = fence->base.hash.key; read_lock_irqsave(&fm->lock, flags); @@ -830,7 +829,7 @@ int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file DRM_FENCE_FLAG_SHAREABLE); if (ret) return ret; - atomic_inc(&fence->usage); + arg->handle = fence->base.hash.key; read_lock_irqsave(&fm->lock, flags); -- cgit v1.2.3 From c395d27a725f170645704bfc0d27b1e935b53c83 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Wed, 25 Jul 2007 14:32:15 +1000 Subject: drm/fence: shut up lockdep --- linux-core/drm_fence.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index fdb2a4db..2f16f7ef 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -520,9 +520,10 @@ void drm_fence_manager_init(struct drm_device * dev) struct drm_fence_class_manager *class; struct drm_fence_driver *fed = dev->driver->fence_driver; int i; + unsigned long flags; rwlock_init(&fm->lock); - write_lock(&fm->lock); + write_lock_irqsave(&fm->lock, flags); fm->initialized = 0; if (!fed) goto out_unlock; @@ -541,7 +542,7 @@ void drm_fence_manager_init(struct drm_device * dev) atomic_set(&fm->count, 0); out_unlock: - write_unlock(&fm->lock); + write_unlock_irqrestore(&fm->lock, flags); } void drm_fence_manager_takedown(struct drm_device * dev) -- cgit v1.2.3 From f83000c8b388f18f677238b9342fd6a7e262394b Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 31 Jul 2007 17:27:00 -0700 Subject: Refactor register dumping code. --- linux-core/xgi_misc.c | 183 ++++++++++---------------------------------------- 1 file changed, 35 insertions(+), 148 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index c75a5841..84d1d4f2 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -254,178 +254,65 @@ bool xgi_dvi_irq_handler(struct xgi_info * info) } -void xgi_dump_register(struct xgi_info * info) +static void dump_reg_header(unsigned regbase) { - int i, j; - unsigned char temp; - - // 0x3C5 - printk("\r\n=====xgi_dump_register========0x%x===============\r\n", - 0x3C5); - - for (i = 0; i < 0x10; i++) { - if (i == 0) { - printk("%5x", i); - } else { - printk("%3x", i); - } - } - printk("\r\n"); - - for (i = 0; i < 0x10; i++) { - printk("%1x ", i); - - for (j = 0; j < 0x10; j++) { - temp = IN3C5B(info->mmio_map, i * 0x10 + j); - printk("%3x", temp); - } - printk("\r\n"); - } - - // 0x3D5 - printk("\r\n====xgi_dump_register=========0x%x===============\r\n", - 0x3D5); - for (i = 0; i < 0x10; i++) { - if (i == 0) { - printk("%5x", i); - } else { - printk("%3x", i); - } - } - printk("\r\n"); - - for (i = 0; i < 0x10; i++) { - printk("%1x ", i); - - for (j = 0; j < 0x10; j++) { - temp = IN3X5B(info->mmio_map, i * 0x10 + j); - printk("%3x", temp); - } - printk("\r\n"); - } - - // 0x3CF - printk("\r\n=========xgi_dump_register====0x%x===============\r\n", - 0x3CF); - for (i = 0; i < 0x10; i++) { - if (i == 0) { - printk("%5x", i); - } else { - printk("%3x", i); - } - } - printk("\r\n"); + printk("\n=====xgi_dump_register========0x%x===============\n", + regbase); + printk(" 0 1 2 3 4 5 6 7 8 9 a b c d e f\n"); +} - for (i = 0; i < 0x10; i++) { - printk("%1x ", i); - for (j = 0; j < 0x10; j++) { - temp = IN3CFB(info->mmio_map, i * 0x10 + j); - printk("%3x", temp); - } - printk("\r\n"); - } - - printk("\r\n=====xgi_dump_register======0x%x===============\r\n", - 0xB000); - for (i = 0; i < 0x10; i++) { - if (i == 0) { - printk("%5x", i); - } else { - printk("%3x", i); - } - } - printk("\r\n"); +static void dump_indexed_reg(struct xgi_info * info, unsigned regbase) +{ + unsigned i, j; + u8 temp; - for (i = 0; i < 0x5; i++) { - printk("%1x ", i); - for (j = 0; j < 0x10; j++) { - temp = DRM_READ8(info->mmio_map, 0xB000 + i * 0x10 + j); - printk("%3x", temp); - } - printk("\r\n"); - } - - printk("\r\n==================0x%x===============\r\n", 0x2200); + dump_reg_header(regbase); for (i = 0; i < 0x10; i++) { - if (i == 0) { - printk("%5x", i); - } else { - printk("%3x", i); - } - } - printk("\r\n"); - - for (i = 0; i < 0xB; i++) { printk("%1x ", i); for (j = 0; j < 0x10; j++) { - temp = DRM_READ8(info->mmio_map, 0x2200 + i * 0x10 + j); + DRM_WRITE8(info->mmio_map, regbase - 1, + (i * 0x10) + j); + temp = DRM_READ8(info->mmio_map, regbase); printk("%3x", temp); } - printk("\r\n"); - } - - printk("\r\n==================0x%x===============\r\n", 0x2300); - for (i = 0; i < 0x10; i++) { - if (i == 0) { - printk("%5x", i); - } else { - printk("%3x", i); - } + printk("\n"); } - printk("\r\n"); +} - for (i = 0; i < 0x7; i++) { - printk("%1x ", i); - for (j = 0; j < 0x10; j++) { - temp = DRM_READ8(info->mmio_map, 0x2300 + i * 0x10 + j); - printk("%3x", temp); - } - printk("\r\n"); - } +static void dump_reg(struct xgi_info * info, unsigned regbase, unsigned range) +{ + unsigned i, j; - printk("\r\n==================0x%x===============\r\n", 0x2400); - for (i = 0; i < 0x10; i++) { - if (i == 0) { - printk("%5x", i); - } else { - printk("%3x", i); - } - } - printk("\r\n"); - for (i = 0; i < 0x10; i++) { + dump_reg_header(regbase); + for (i = 0; i < range; i++) { printk("%1x ", i); for (j = 0; j < 0x10; j++) { - temp = DRM_READ8(info->mmio_map, 0x2400 + i * 0x10 + j); + u8 temp = DRM_READ8(info->mmio_map, + regbase + (i * 0x10) + j); printk("%3x", temp); } - printk("\r\n"); + printk("\n"); } +} - printk("\r\n==================0x%x===============\r\n", 0x2800); - for (i = 0; i < 0x10; i++) { - if (i == 0) { - printk("%5x", i); - } else { - printk("%3x", i); - } - } - printk("\r\n"); - - for (i = 0; i < 0x10; i++) { - printk("%1x ", i); - for (j = 0; j < 0x10; j++) { - temp = DRM_READ8(info->mmio_map, 0x2800 + i * 0x10 + j); - printk("%3x", temp); - } - printk("\r\n"); - } +void xgi_dump_register(struct xgi_info * info) +{ + dump_indexed_reg(info, 0x3c5); + dump_indexed_reg(info, 0x3d5); + dump_indexed_reg(info, 0x3cf); + + dump_reg(info, 0xB000, 0x05); + dump_reg(info, 0x2200, 0x0B); + dump_reg(info, 0x2300, 0x07); + dump_reg(info, 0x2400, 0x10); + dump_reg(info, 0x2800, 0x10); } -- cgit v1.2.3 From 7602e4f8a67d777437502672b4f74d9b990535ce Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Thu, 2 Aug 2007 19:13:45 +1000 Subject: drm: add unlocked ioctl code path - not used yet --- linux-core/drmP.h | 2 ++ linux-core/drm_drv.c | 8 +++++++- 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 2b7e0a44..a61efcff 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -911,6 +911,8 @@ extern void drm_exit(struct drm_driver *driver); extern void drm_cleanup_pci(struct pci_dev *pdev); extern int drm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); +extern long drm_unlocked_ioctl(struct file *filp, + unsigned int cmd, unsigned long arg); extern long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index cc676bda..bb15987e 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -573,6 +573,12 @@ static int drm_version(struct drm_device *dev, void *data, */ int drm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) +{ + return drm_unlocked_ioctl(filp, cmd, arg); +} +EXPORT_SYMBOL(drm_ioctl); + +long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct drm_file *file_priv = filp->private_data; struct drm_device *dev = file_priv->head->dev; @@ -650,7 +656,7 @@ err_i1: DRM_DEBUG("ret = %x\n", retcode); return retcode; } -EXPORT_SYMBOL(drm_ioctl); +EXPORT_SYMBOL(drm_unlocked_ioctl); drm_local_map_t *drm_getsarea(struct drm_device *dev) { -- cgit v1.2.3 From cf4f1a85af69c2c2e5ba9c822d30863f16ce6821 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Thu, 2 Aug 2007 13:51:55 -0700 Subject: Add a couple of doxygen comments from reading the code. --- linux-core/drm_bo.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index a2356c8a..4ce5f480 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -705,6 +705,10 @@ static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type, return ret; } +/** + * Repeatedly evict memory from the LRU for @mem_type until we create enough + * space, or we've evicted everything and there isn't enough space. + */ static int drm_bo_mem_force_space(struct drm_device * dev, struct drm_bo_mem_reg * mem, uint32_t mem_type, int no_wait) @@ -791,6 +795,14 @@ static int drm_bo_mt_compatible(struct drm_mem_type_manager * man, return 1; } +/** + * Creates space for memory region @mem according to its type. + * + * This function first searches for free space in compatible memory types in + * the priority order defined by the driver. If free space isn't found, then + * drm_bo_mem_force_space is attempted in priority order to evict and find + * space. + */ int drm_bo_mem_space(struct drm_buffer_object * bo, struct drm_bo_mem_reg * mem, int no_wait) { @@ -2406,8 +2418,7 @@ int drm_bo_driver_init(struct drm_device * dev) * Initialize the system memory buffer type. * Other types need to be driver / IOCTL initialized. */ - - ret = drm_bo_init_mm(dev, 0, 0, 0); + ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0); if (ret) goto out_unlock; -- cgit v1.2.3 From 3a0bc518e35c62bb9c64c9105f836584d949653f Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Thu, 2 Aug 2007 14:08:04 -0700 Subject: Remove the pinned buffer from the LRU when pinning. Also, be a little safer with setting the pinned flag within the struct lock. I'm not 100% sure if this is required, but it seems like it might be. --- linux-core/Makefile | 1 + linux-core/drm_bo.c | 12 ++++++++---- 2 files changed, 9 insertions(+), 4 deletions(-) (limited to 'linux-core') diff --git a/linux-core/Makefile b/linux-core/Makefile index 1758777c..46c821a4 100644 --- a/linux-core/Makefile +++ b/linux-core/Makefile @@ -268,6 +268,7 @@ PAGE_AGP := $(shell cat $(LINUXDIR)/include/asm/agp.h 2>/dev/null | \ ifneq ($(PAGE_AGP),0) EXTRA_CFLAGS += -DHAVE_PAGE_AGP endif +EXTRA_CFLAGS += -g -O0 # Start with all modules turned off. CONFIG_DRM_GAMMA := n diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 4ce5f480..53fb5afc 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1990,8 +1990,8 @@ drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, return ret; } - /* Validate the buffer into its pinned location, with no pending - * fence. + /* Validate the buffer into its pinned location, with no + * pending fence. */ ret = drm_buffer_object_validate(bo, 0, 0, 0); if (ret) { @@ -1999,9 +1999,12 @@ drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, return ret; } - /* Add our buffer to the pinned list */ + /* Pull the buffer off of the LRU and add it to the pinned + * list + */ bo->pinned_mem_type = bo->mem.mem_type; mutex_lock(&dev->struct_mutex); + list_del_init(&bo->lru); list_del_init(&bo->pinned_lru); drm_bo_add_to_pinned_lru(bo); @@ -2011,6 +2014,7 @@ drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, bo->pinned_node = bo->mem.mm_node; } + bo->pinned = pin; mutex_unlock(&dev->struct_mutex); } else { @@ -2022,9 +2026,9 @@ drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, list_del_init(&bo->pinned_lru); bo->pinned_node = NULL; + bo->pinned = pin; mutex_unlock(&dev->struct_mutex); } - bo->pinned = pin; mutex_unlock(&bo->mutex); return 0; } -- cgit v1.2.3 From beaa0c9a28b30a6ba3292184d04875b6a597e433 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 6 Aug 2007 03:40:43 +1000 Subject: nouveau: Pass channel struct around instead of channel id. --- linux-core/nouveau_sgdma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/nouveau_sgdma.c b/linux-core/nouveau_sgdma.c index 0ddac952..6393a469 100644 --- a/linux-core/nouveau_sgdma.c +++ b/linux-core/nouveau_sgdma.c @@ -211,7 +211,7 @@ nouveau_sgdma_init(struct drm_device *dev) obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8; } - if ((ret = nouveau_gpuobj_new(dev, -1, obj_size, 16, + if ((ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16, NVOBJ_FLAG_ALLOW_NO_REFS | NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, &gpuobj))) { -- cgit v1.2.3 From 97770db72040dc032130413e0cdabc1777560a75 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 6 Aug 2007 21:45:18 +1000 Subject: nouveau: Various internal and external API changes 1. DRM_NOUVEAU_GPUOBJ_FREE Used to free GPU objects. The obvious usage case is for Gr objects, but notifiers can also be destroyed in the same way. GPU objects gain a destructor method and private data fields with this change, so other specialised cases (like notifiers) can be implemented on top of gpuobjs. 2. DRM_NOUVEAU_CHANNEL_FREE 3. DRM_NOUVEAU_CARD_INIT Ideally we'd do init during module load, but this isn't currently possible. Doing init during firstopen() is bad as X has a love of opening/closing the DRM many times during startup. Once the modesetting-101 branch is merged this can go away. IRQs are enabled in nouveau_card_init() now, rather than having the X server call drmCtlInstHandler(). We'll need this for when we give the kernel module its own channel. 4. DRM_NOUVEAU_GETPARAM Add CHIPSET_ID value, which will return the chipset id derived from NV_PMC_BOOT_0. 4. Use list_* in a few places, rather than home-brewed stuff. --- linux-core/drmP.h | 1 + linux-core/drm_irq.c | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/drmP.h b/linux-core/drmP.h index a61efcff..aa562225 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -1075,6 +1075,7 @@ extern void drm_core_reclaim_buffers(struct drm_device *dev, extern int drm_control(struct drm_device *dev, void *data, struct drm_file *file_priv); extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS); +extern int drm_irq_install(struct drm_device *dev); extern int drm_irq_uninstall(struct drm_device *dev); extern void drm_driver_irq_preinstall(struct drm_device *dev); extern void drm_driver_irq_postinstall(struct drm_device *dev); diff --git a/linux-core/drm_irq.c b/linux-core/drm_irq.c index fe4316e0..25166b6f 100644 --- a/linux-core/drm_irq.c +++ b/linux-core/drm_irq.c @@ -80,7 +80,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data, * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions * before and after the installation. */ -static int drm_irq_install(struct drm_device * dev) +int drm_irq_install(struct drm_device * dev) { int ret; unsigned long sh_flags = 0; @@ -140,6 +140,7 @@ static int drm_irq_install(struct drm_device * dev) return 0; } +EXPORT_SYMBOL(drm_irq_install); /** * Uninstall the IRQ handler. -- cgit v1.2.3 From cf04641bc61c8bc18101713a8d95ef98e6afae7f Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 6 Aug 2007 22:05:31 +1000 Subject: nouveau: Give DRM its own gpu channel If your card doesn't have working context switching, it is now broken. --- linux-core/Makefile.kernel | 2 +- linux-core/nouveau_sgdma.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'linux-core') diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 5aa589cd..3d00cbe6 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -22,7 +22,7 @@ i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \ i915_buffer.o nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ nouveau_object.o nouveau_irq.o nouveau_notifier.o \ - nouveau_sgdma.o \ + nouveau_sgdma.o nouveau_dma.o \ nv04_timer.o \ nv04_mc.o nv40_mc.o nv50_mc.o \ nv04_fb.o nv10_fb.o nv40_fb.o \ diff --git a/linux-core/nouveau_sgdma.c b/linux-core/nouveau_sgdma.c index 6393a469..df970d11 100644 --- a/linux-core/nouveau_sgdma.c +++ b/linux-core/nouveau_sgdma.c @@ -69,7 +69,7 @@ nouveau_sgdma_clear(struct drm_ttm_backend *be) if (nvbe->is_bound) be->func->unbind(be); - for (d = 0; d < nvbe->pages_populated; d--) { + for (d = 0; d < nvbe->pages_populated; d++) { pci_unmap_page(nvbe->dev->pdev, nvbe->pagelist[d], NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); -- cgit v1.2.3 From 92084c6e056a738308ff65f3fcd7411fd7d2995a Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 6 Aug 2007 22:11:18 +1000 Subject: Export some useful ttm functions to drivers. --- linux-core/drm_bo.c | 10 ++++++---- linux-core/drm_bo_move.c | 1 + linux-core/drm_objects.h | 16 ++++++++++++++++ 3 files changed, 23 insertions(+), 4 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 467d03ff..4c2b1541 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -517,7 +517,7 @@ static void drm_bo_base_deref_locked(struct drm_file * file_priv, drm_bo_usage_deref_locked(&bo); } -static void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo) +void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo) { struct drm_buffer_object *tmp_bo = *bo; struct drm_device *dev = tmp_bo->dev; @@ -530,6 +530,7 @@ static void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo) mutex_unlock(&dev->struct_mutex); } } +EXPORT_SYMBOL(drm_bo_usage_deref_unlocked); /* * Note. The caller has to register (if applicable) @@ -1672,10 +1673,10 @@ int drm_buffer_object_create(struct drm_device *dev, drm_bo_usage_deref_unlocked(&bo); return ret; } +EXPORT_SYMBOL(drm_buffer_object_create); -static int drm_bo_add_user_object(struct drm_file *file_priv, - struct drm_buffer_object *bo, - int shareable) +int drm_bo_add_user_object(struct drm_file *file_priv, + struct drm_buffer_object *bo, int shareable) { struct drm_device *dev = file_priv->head->dev; int ret; @@ -1694,6 +1695,7 @@ static int drm_bo_add_user_object(struct drm_file *file_priv, mutex_unlock(&dev->struct_mutex); return ret; } +EXPORT_SYMBOL(drm_bo_add_user_object); static int drm_bo_lock_test(struct drm_device * dev, struct drm_file *file_priv) { diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 5e21173c..1a613916 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -128,6 +128,7 @@ int drm_mem_reg_ioremap(struct drm_device * dev, struct drm_bo_mem_reg * mem, *virtual = addr; return 0; } +EXPORT_SYMBOL(drm_mem_reg_ioremap); /** * \c Unmap mapping obtained using drm_bo_ioremap diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index e5f2b69c..e34fdbc4 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -483,6 +483,17 @@ extern int drm_bo_mem_space(struct drm_buffer_object * bo, struct drm_bo_mem_reg * mem, int no_wait); extern int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags, int no_wait, int move_unfenced); +extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size, + enum drm_bo_type type, uint64_t mask, + uint32_t hint, uint32_t page_alignment, + unsigned long buffer_start, + struct drm_buffer_object **bo); +extern int drm_bo_init_mm(struct drm_device *dev, unsigned type, + unsigned long p_offset, unsigned long p_size); +extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type); +extern int drm_bo_add_user_object(struct drm_file *file_priv, + struct drm_buffer_object *bo, int sharable); +extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo); /* * Buffer object memory move helpers. @@ -502,6 +513,11 @@ extern int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo, uint32_t fence_flags, struct drm_bo_mem_reg * new_mem); +extern int drm_mem_reg_ioremap(struct drm_device *dev, + struct drm_bo_mem_reg *mem, void **virtual); +extern void drm_mem_reg_iounmap(struct drm_device *dev, + struct drm_bo_mem_reg *mem, void *virtual); + #ifdef CONFIG_DEBUG_MUTEXES #define DRM_ASSERT_LOCKED(_mutex) \ BUG_ON(!mutex_is_locked(_mutex) || \ -- cgit v1.2.3 From 8d5a8ebc316028f14666697cff33daddbe384bcd Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 6 Aug 2007 22:32:36 +1000 Subject: nouveau: ouch, add nouveau_dma.[ch] files.. --- linux-core/nouveau_dma.c | 1 + linux-core/nouveau_dma.h | 1 + 2 files changed, 2 insertions(+) create mode 120000 linux-core/nouveau_dma.c create mode 120000 linux-core/nouveau_dma.h (limited to 'linux-core') diff --git a/linux-core/nouveau_dma.c b/linux-core/nouveau_dma.c new file mode 120000 index 00000000..f8e0bdc3 --- /dev/null +++ b/linux-core/nouveau_dma.c @@ -0,0 +1 @@ +../shared-core/nouveau_dma.c \ No newline at end of file diff --git a/linux-core/nouveau_dma.h b/linux-core/nouveau_dma.h new file mode 120000 index 00000000..a545e387 --- /dev/null +++ b/linux-core/nouveau_dma.h @@ -0,0 +1 @@ +../shared-core/nouveau_dma.h \ No newline at end of file -- cgit v1.2.3 From 997a9a738ec26cf0ef2c7dee5e30bb53bd11bf6c Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 6 Aug 2007 15:31:34 -0700 Subject: Eliminate allocation "owner" usage. --- linux-core/xgi_cmdlist.c | 1 - linux-core/xgi_drv.h | 22 +++------------------- linux-core/xgi_fb.c | 7 ++----- linux-core/xgi_pcie.c | 26 +------------------------- 4 files changed, 6 insertions(+), 50 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 4bb147c4..e0ca31f1 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -38,7 +38,6 @@ int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) { struct xgi_mem_alloc mem_alloc = { .size = size, - .owner = PCIE_2D, }; int err; diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 8dec1fa1..f771517d 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -34,11 +34,11 @@ #define DRIVER_NAME "xgi" #define DRIVER_DESC "XGI XP5 / XP10 / XG47" -#define DRIVER_DATE "20070723" +#define DRIVER_DATE "20070806" #define DRIVER_MAJOR 0 #define DRIVER_MINOR 10 -#define DRIVER_PATCHLEVEL 0 +#define DRIVER_PATCHLEVEL 1 #include "xgi_cmdlist.h" #include "xgi_drm.h" @@ -53,8 +53,6 @@ struct xgi_mem_block { unsigned long offset; unsigned long size; struct drm_file * filp; - - unsigned int owner; }; struct xgi_mem_heap { @@ -93,23 +91,9 @@ struct xgi_info { struct xgi_cmdring_info cmdring; }; -enum PcieOwner { - PCIE_2D = 0, - /* - PCIE_3D should not begin with 1, - 2D alloc pcie memory will use owner 1. - */ - PCIE_3D = 11, /*vetex buf */ - PCIE_3D_CMDLIST = 12, - PCIE_3D_SCRATCHPAD = 13, - PCIE_3D_TEXTURE = 14, - PCIE_INVALID = 0x7fffffff -}; - - extern struct kmem_cache *xgi_mem_block_cache; extern struct xgi_mem_block *xgi_mem_alloc(struct xgi_mem_heap * heap, - unsigned long size, enum PcieOwner owner); + unsigned long size); extern int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, struct drm_file * filp); extern int xgi_mem_heap_init(struct xgi_mem_heap * heap, unsigned int start, diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index 9c60a874..bbdebb57 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -93,7 +93,6 @@ struct xgi_mem_block *xgi_mem_new_node(void) block->offset = 0; block->size = 0; - block->owner = PCIE_INVALID; block->filp = (struct drm_file *) -1; return block; @@ -101,8 +100,7 @@ struct xgi_mem_block *xgi_mem_new_node(void) struct xgi_mem_block *xgi_mem_alloc(struct xgi_mem_heap * heap, - unsigned long originalSize, - enum PcieOwner owner) + unsigned long originalSize) { struct xgi_mem_block *block, *free_block, *used_block; unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK; @@ -167,7 +165,6 @@ struct xgi_mem_block *xgi_mem_alloc(struct xgi_mem_heap * heap, heap->max_freesize -= size; list_add(&used_block->list, &heap->used_list); - used_block->owner = owner; return (used_block); } @@ -258,7 +255,7 @@ int xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, ("Video RAM allocation on front buffer successfully! \n"); } else { down(&info->fb_sem); - block = xgi_mem_alloc(&info->fb_heap, alloc->size, PCIE_2D); + block = xgi_mem_alloc(&info->fb_heap, alloc->size); up(&info->fb_sem); if (block == NULL) { diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index b91471b8..f66ffee9 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -153,31 +153,7 @@ int xgi_pcie_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, struct xgi_mem_block *block; down(&info->pcie_sem); - if ((alloc->owner == PCIE_3D) && (xgi_pcie_vertex_block)) { - DRM_INFO("PCIE Vertex has been created, return directly.\n"); - block = xgi_pcie_vertex_block; - } - else if ((alloc->owner == PCIE_3D_CMDLIST) && (xgi_pcie_cmdlist_block)) { - DRM_INFO("PCIE Cmdlist has been created, return directly.\n"); - block = xgi_pcie_cmdlist_block; - } - else if ((alloc->owner == PCIE_3D_SCRATCHPAD) && (xgi_pcie_scratchpad_block)) { - DRM_INFO("PCIE Scratchpad has been created, return directly.\n"); - block = xgi_pcie_scratchpad_block; - } - else { - block = xgi_mem_alloc(&info->pcie_heap, alloc->size, alloc->owner); - - if (alloc->owner == PCIE_3D) { - xgi_pcie_vertex_block = block; - } - else if (alloc->owner == PCIE_3D_CMDLIST) { - xgi_pcie_cmdlist_block = block; - } - else if (alloc->owner == PCIE_3D_SCRATCHPAD) { - xgi_pcie_scratchpad_block = block; - } - } + block = xgi_mem_alloc(&info->pcie_heap, alloc->size); up(&info->pcie_sem); if (block == NULL) { -- cgit v1.2.3 From 78e9c1a93d00097895bc77d9ac90da1945021804 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 6 Aug 2007 15:37:56 -0700 Subject: Eliminate special-case handling of framebuffer (fake) allocation. --- linux-core/xgi_drv.h | 2 +- linux-core/xgi_fb.c | 51 +++++++++++++++++++-------------------------------- 2 files changed, 20 insertions(+), 33 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index f771517d..8a144fda 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -38,7 +38,7 @@ #define DRIVER_MAJOR 0 #define DRIVER_MINOR 10 -#define DRIVER_PATCHLEVEL 1 +#define DRIVER_PATCHLEVEL 2 #include "xgi_cmdlist.h" #include "xgi_drm.h" diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index bbdebb57..b27b6b20 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -247,32 +247,23 @@ int xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, { struct xgi_mem_block *block; - if (alloc->is_front) { - alloc->location = XGI_MEMLOC_LOCAL; - alloc->offset = 0; - alloc->hw_addr = 0; - DRM_INFO - ("Video RAM allocation on front buffer successfully! \n"); + down(&info->fb_sem); + block = xgi_mem_alloc(&info->fb_heap, alloc->size); + up(&info->fb_sem); + + if (block == NULL) { + alloc->size = 0; + DRM_ERROR("Video RAM allocation failed\n"); + return -ENOMEM; } else { - down(&info->fb_sem); - block = xgi_mem_alloc(&info->fb_heap, alloc->size); - up(&info->fb_sem); - - if (block == NULL) { - alloc->location = XGI_MEMLOC_LOCAL; - alloc->size = 0; - DRM_ERROR("Video RAM allocation failed\n"); - return -ENOMEM; - } else { - DRM_INFO("Video RAM allocation succeeded: 0x%p\n", - (char *)block->offset); - alloc->location = XGI_MEMLOC_LOCAL; - alloc->size = block->size; - alloc->offset = block->offset; - alloc->hw_addr = block->offset; - - block->filp = filp; - } + DRM_INFO("Video RAM allocation succeeded: 0x%p\n", + (char *)block->offset); + alloc->location = XGI_MEMLOC_LOCAL; + alloc->size = block->size; + alloc->offset = block->offset; + alloc->hw_addr = block->offset; + + block->filp = filp; } return 0; @@ -295,13 +286,9 @@ int xgi_fb_free(struct xgi_info * info, unsigned long offset, { int err = 0; - if (offset == 0) { - DRM_INFO("free onscreen frame buffer successfully !\n"); - } else { - down(&info->fb_sem); - err = xgi_mem_free(&info->fb_heap, offset, filp); - up(&info->fb_sem); - } + down(&info->fb_sem); + err = xgi_mem_free(&info->fb_heap, offset, filp); + up(&info->fb_sem); return err; } -- cgit v1.2.3 From a6fb93a150f90ada9af6760b52d34716497f744f Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 6 Aug 2007 15:43:51 -0700 Subject: Finish removing allocation "owner" infrastructure. --- linux-core/xgi_pcie.c | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index f66ffee9..be6915e8 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -28,10 +28,6 @@ #include "xgi_regs.h" #include "xgi_misc.h" -static struct xgi_mem_block *xgi_pcie_vertex_block = NULL; -static struct xgi_mem_block *xgi_pcie_cmdlist_block = NULL; -static struct xgi_mem_block *xgi_pcie_scratchpad_block = NULL; - static int xgi_pcie_free_locked(struct xgi_info * info, unsigned long offset, struct drm_file * filp); @@ -220,14 +216,7 @@ void xgi_pcie_free_all(struct xgi_info * info, struct drm_file * filp) int xgi_pcie_free_locked(struct xgi_info * info, unsigned long offset, struct drm_file * filp) { - const bool isvertex = (xgi_pcie_vertex_block - && (xgi_pcie_vertex_block->offset == offset)); - int err = xgi_mem_free(&info->pcie_heap, offset, filp); - - if (!err && isvertex) - xgi_pcie_vertex_block = NULL; - - return err; + return xgi_mem_free(&info->pcie_heap, offset, filp); } -- cgit v1.2.3 From d749cc9ae8c50157a1588369222a591410002c26 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Mon, 6 Aug 2007 15:45:37 -0700 Subject: Initialize the AGP structure's base address at init rather than enable. Not all drivers call enable (intel), but they would still like to use this member in driver code. --- linux-core/drm_agpsupport.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c index df54360d..4618823c 100644 --- a/linux-core/drm_agpsupport.c +++ b/linux-core/drm_agpsupport.c @@ -183,7 +183,6 @@ int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode) #else agp_enable(dev->agp->bridge, mode.mode); #endif - dev->agp->base = dev->agp->agp_info.aper_base; dev->agp->enabled = 1; return 0; } @@ -441,6 +440,7 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev) INIT_LIST_HEAD(&head->memory); head->cant_use_aperture = head->agp_info.cant_use_aperture; head->page_mask = head->agp_info.page_mask; + head->base = head->agp_info.aper_base; return head; } -- cgit v1.2.3 From 5362cc723e6605c31d152eb22ee3dc40c9e3f56b Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 6 Aug 2007 15:52:06 -0700 Subject: Eliminate unnecessary function xgi_pcie_free_locked. --- linux-core/xgi_pcie.c | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index be6915e8..df49615a 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -28,9 +28,6 @@ #include "xgi_regs.h" #include "xgi_misc.h" -static int xgi_pcie_free_locked(struct xgi_info * info, - unsigned long offset, struct drm_file * filp); - static int xgi_pcie_lut_init(struct xgi_info * info) { u8 temp = 0; @@ -206,27 +203,20 @@ void xgi_pcie_free_all(struct xgi_info * info, struct drm_file * filp) break; } - (void) xgi_pcie_free_locked(info, block->offset, filp); + (void) xgi_mem_free(&info->pcie_heap, block->offset, filp); } while(1); up(&info->pcie_sem); } -int xgi_pcie_free_locked(struct xgi_info * info, unsigned long offset, - struct drm_file * filp) -{ - return xgi_mem_free(&info->pcie_heap, offset, filp); -} - - int xgi_pcie_free(struct xgi_info * info, unsigned long offset, struct drm_file * filp) { int err; down(&info->pcie_sem); - err = xgi_pcie_free_locked(info, offset, filp); + err = xgi_mem_free(&info->pcie_heap, offset, filp); up(&info->pcie_sem); if (err) { -- cgit v1.2.3 From f96bff9e213a950ab910832908d30e732435e628 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 6 Aug 2007 16:09:05 -0700 Subject: Unify infrastructure for allocating (not yet freeing) on-card / GART memory. --- linux-core/xgi_cmdlist.c | 3 ++- linux-core/xgi_drv.h | 9 ++------- linux-core/xgi_fb.c | 18 ++++++++++++------ linux-core/xgi_pcie.c | 31 ++----------------------------- 4 files changed, 18 insertions(+), 43 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index e0ca31f1..33155827 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -37,11 +37,12 @@ static void xgi_cmdlist_reset(struct xgi_info * info); int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) { struct xgi_mem_alloc mem_alloc = { + .location = XGI_MEMLOC_NON_LOCAL, .size = size, }; int err; - err = xgi_pcie_alloc(info, &mem_alloc, 0); + err = xgi_alloc(info, &mem_alloc, 0); if (err) { return err; } diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 8a144fda..48c4b42c 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -38,7 +38,7 @@ #define DRIVER_MAJOR 0 #define DRIVER_MINOR 10 -#define DRIVER_PATCHLEVEL 2 +#define DRIVER_PATCHLEVEL 3 #include "xgi_cmdlist.h" #include "xgi_drm.h" @@ -92,8 +92,6 @@ struct xgi_info { }; extern struct kmem_cache *xgi_mem_block_cache; -extern struct xgi_mem_block *xgi_mem_alloc(struct xgi_mem_heap * heap, - unsigned long size); extern int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, struct drm_file * filp); extern int xgi_mem_heap_init(struct xgi_mem_heap * heap, unsigned int start, @@ -102,7 +100,7 @@ extern void xgi_mem_heap_cleanup(struct xgi_mem_heap * heap); extern int xgi_fb_heap_init(struct xgi_info * info); -extern int xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, +extern int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, struct drm_file * filp); extern int xgi_fb_free(struct xgi_info * info, unsigned long offset, @@ -111,9 +109,6 @@ extern int xgi_fb_free(struct xgi_info * info, unsigned long offset, extern int xgi_pcie_heap_init(struct xgi_info * info); extern void xgi_pcie_lut_cleanup(struct xgi_info * info); -extern int xgi_pcie_alloc(struct xgi_info * info, - struct xgi_mem_alloc * alloc, struct drm_file * filp); - extern int xgi_pcie_free(struct xgi_info * info, unsigned long offset, struct drm_file * filp); diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index b27b6b20..1d5dc22b 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -99,8 +99,8 @@ struct xgi_mem_block *xgi_mem_new_node(void) } -struct xgi_mem_block *xgi_mem_alloc(struct xgi_mem_heap * heap, - unsigned long originalSize) +static struct xgi_mem_block *xgi_mem_alloc(struct xgi_mem_heap * heap, + unsigned long originalSize) { struct xgi_mem_block *block, *free_block, *used_block; unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK; @@ -242,13 +242,15 @@ int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, } -int xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, +int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, struct drm_file * filp) { struct xgi_mem_block *block; down(&info->fb_sem); - block = xgi_mem_alloc(&info->fb_heap, alloc->size); + block = xgi_mem_alloc((alloc->location == XGI_MEMLOC_LOCAL) + ? &info->fb_heap : &info->pcie_heap, + alloc->size); up(&info->fb_sem); if (block == NULL) { @@ -258,11 +260,14 @@ int xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, } else { DRM_INFO("Video RAM allocation succeeded: 0x%p\n", (char *)block->offset); - alloc->location = XGI_MEMLOC_LOCAL; alloc->size = block->size; alloc->offset = block->offset; alloc->hw_addr = block->offset; + if (alloc->location == XGI_MEMLOC_NON_LOCAL) { + alloc->hw_addr += info->pcie.base; + } + block->filp = filp; } @@ -277,7 +282,8 @@ int xgi_fb_alloc_ioctl(struct drm_device * dev, void * data, (struct xgi_mem_alloc *) data; struct xgi_info *info = dev->dev_private; - return xgi_fb_alloc(info, alloc, filp); + alloc->location = XGI_MEMLOC_LOCAL; + return xgi_alloc(info, alloc, filp); } diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index df49615a..c0d424f5 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -140,34 +140,6 @@ int xgi_pcie_heap_init(struct xgi_info * info) } -int xgi_pcie_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, - struct drm_file * filp) -{ - struct xgi_mem_block *block; - - down(&info->pcie_sem); - block = xgi_mem_alloc(&info->pcie_heap, alloc->size); - up(&info->pcie_sem); - - if (block == NULL) { - alloc->location = XGI_MEMLOC_INVALID; - alloc->size = 0; - DRM_ERROR("PCIE RAM allocation failed\n"); - return -ENOMEM; - } else { - DRM_INFO("PCIE RAM allocation succeeded: offset = 0x%lx\n", - block->offset); - alloc->location = XGI_MEMLOC_NON_LOCAL; - alloc->size = block->size; - alloc->hw_addr = block->offset + info->pcie.base; - alloc->offset = block->offset; - - block->filp = filp; - return 0; - } -} - - int xgi_pcie_alloc_ioctl(struct drm_device * dev, void * data, struct drm_file * filp) { @@ -175,7 +147,8 @@ int xgi_pcie_alloc_ioctl(struct drm_device * dev, void * data, (struct xgi_mem_alloc *) data; struct xgi_info *info = dev->dev_private; - return xgi_pcie_alloc(info, alloc, filp); + alloc->location = XGI_MEMLOC_NON_LOCAL; + return xgi_alloc(info, alloc, filp); } -- cgit v1.2.3 From 90907c59152f628d6f0efea4927a06e547f4a3c7 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 6 Aug 2007 16:17:23 -0700 Subject: Replace per-heap semaphores with drm_device::struct_mutex. --- linux-core/xgi_drv.c | 3 --- linux-core/xgi_drv.h | 3 --- linux-core/xgi_fb.c | 12 ++++++------ linux-core/xgi_pcie.c | 8 ++++---- 4 files changed, 10 insertions(+), 16 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 33b3a51d..e98fd608 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -360,9 +360,6 @@ int xgi_driver_load(struct drm_device *dev, unsigned long flags) dev->dev_private = info; info->dev = dev; - sema_init(&info->fb_sem, 1); - sema_init(&info->pcie_sem, 1); - info->mmio.base = drm_get_resource_start(dev, 1); info->mmio.size = drm_get_resource_len(dev, 1); diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 48c4b42c..384381c7 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -85,9 +85,6 @@ struct xgi_info { struct xgi_mem_heap fb_heap; struct xgi_mem_heap pcie_heap; - struct semaphore fb_sem; - struct semaphore pcie_sem; - struct xgi_cmdring_info cmdring; }; diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index 1d5dc22b..373c45dd 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -247,11 +247,11 @@ int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, { struct xgi_mem_block *block; - down(&info->fb_sem); + mutex_lock(&info->dev->struct_mutex); block = xgi_mem_alloc((alloc->location == XGI_MEMLOC_LOCAL) ? &info->fb_heap : &info->pcie_heap, alloc->size); - up(&info->fb_sem); + mutex_unlock(&info->dev->struct_mutex); if (block == NULL) { alloc->size = 0; @@ -292,9 +292,9 @@ int xgi_fb_free(struct xgi_info * info, unsigned long offset, { int err = 0; - down(&info->fb_sem); + mutex_lock(&info->dev->struct_mutex); err = xgi_mem_free(&info->fb_heap, offset, filp); - up(&info->fb_sem); + mutex_unlock(&info->dev->struct_mutex); return err; } @@ -324,7 +324,7 @@ void xgi_fb_free_all(struct xgi_info * info, struct drm_file * filp) return; } - down(&info->fb_sem); + mutex_lock(&info->dev->struct_mutex); do { struct xgi_mem_block *block; @@ -342,5 +342,5 @@ void xgi_fb_free_all(struct xgi_info * info, struct drm_file * filp) (void) xgi_mem_free(&info->fb_heap, block->offset, filp); } while(1); - up(&info->fb_sem); + mutex_unlock(&info->dev->struct_mutex); } diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index c0d424f5..883fbe7e 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -161,7 +161,7 @@ void xgi_pcie_free_all(struct xgi_info * info, struct drm_file * filp) return; } - down(&info->pcie_sem); + mutex_lock(&info->dev->struct_mutex); do { struct xgi_mem_block *block; @@ -179,7 +179,7 @@ void xgi_pcie_free_all(struct xgi_info * info, struct drm_file * filp) (void) xgi_mem_free(&info->pcie_heap, block->offset, filp); } while(1); - up(&info->pcie_sem); + mutex_unlock(&info->dev->struct_mutex); } @@ -188,9 +188,9 @@ int xgi_pcie_free(struct xgi_info * info, unsigned long offset, { int err; - down(&info->pcie_sem); + mutex_lock(&info->dev->struct_mutex); err = xgi_mem_free(&info->pcie_heap, offset, filp); - up(&info->pcie_sem); + mutex_unlock(&info->dev->struct_mutex); if (err) { DRM_ERROR("xgi_pcie_free() failed at base 0x%lx\n", offset); -- cgit v1.2.3 From f3072becda3a2d5fe587f20e155d4d4f9ace60a2 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 6 Aug 2007 16:35:07 -0700 Subject: Refactor xgi_(fb|pcie)_free_all into xgi_free_all. --- linux-core/xgi_drv.c | 6 ++++-- linux-core/xgi_drv.h | 6 +++--- linux-core/xgi_fb.c | 14 ++++++-------- linux-core/xgi_pcie.c | 31 ------------------------------- 4 files changed, 13 insertions(+), 44 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index e98fd608..f6e7b550 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -284,8 +284,10 @@ void xgi_driver_preclose(struct drm_device * dev, struct drm_file * filp) { struct xgi_info * info = dev->dev_private; - xgi_pcie_free_all(info, filp); - xgi_fb_free_all(info, filp); + mutex_lock(&info->dev->struct_mutex); + xgi_free_all(info, &info->pcie_heap, filp); + xgi_free_all(info, &info->fb_heap, filp); + mutex_unlock(&info->dev->struct_mutex); } diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 384381c7..79276b70 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -38,7 +38,7 @@ #define DRIVER_MAJOR 0 #define DRIVER_MINOR 10 -#define DRIVER_PATCHLEVEL 3 +#define DRIVER_PATCHLEVEL 4 #include "xgi_cmdlist.h" #include "xgi_drm.h" @@ -111,8 +111,8 @@ extern int xgi_pcie_free(struct xgi_info * info, unsigned long offset, extern void *xgi_find_pcie_virt(struct xgi_info * info, u32 address); -extern void xgi_pcie_free_all(struct xgi_info *, struct drm_file *); -extern void xgi_fb_free_all(struct xgi_info *, struct drm_file *); +extern void xgi_free_all(struct xgi_info *, struct xgi_mem_heap *, + struct drm_file *); extern int xgi_fb_alloc_ioctl(struct drm_device * dev, void * data, struct drm_file * filp); diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index 373c45dd..f8341a67 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -318,29 +318,27 @@ int xgi_fb_heap_init(struct xgi_info * info) /** * Free all blocks associated with a particular file handle. */ -void xgi_fb_free_all(struct xgi_info * info, struct drm_file * filp) +void xgi_free_all(struct xgi_info * info, struct xgi_mem_heap * heap, + struct drm_file * filp) { - if (!info->fb_heap.initialized) { + if (!heap->initialized) { return; } - mutex_lock(&info->dev->struct_mutex); do { struct xgi_mem_block *block; - list_for_each_entry(block, &info->fb_heap.used_list, list) { + list_for_each_entry(block, &heap->used_list, list) { if (block->filp == filp) { break; } } - if (&block->list == &info->fb_heap.used_list) { + if (&block->list == &heap->used_list) { break; } - (void) xgi_mem_free(&info->fb_heap, block->offset, filp); + (void) xgi_mem_free(heap, block->offset, filp); } while(1); - - mutex_unlock(&info->dev->struct_mutex); } diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 883fbe7e..b2edf3b1 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -152,37 +152,6 @@ int xgi_pcie_alloc_ioctl(struct drm_device * dev, void * data, } -/** - * Free all blocks associated with a particular file handle. - */ -void xgi_pcie_free_all(struct xgi_info * info, struct drm_file * filp) -{ - if (!info->pcie_heap.initialized) { - return; - } - - mutex_lock(&info->dev->struct_mutex); - - do { - struct xgi_mem_block *block; - - list_for_each_entry(block, &info->pcie_heap.used_list, list) { - if (block->filp == filp) { - break; - } - } - - if (&block->list == &info->pcie_heap.used_list) { - break; - } - - (void) xgi_mem_free(&info->pcie_heap, block->offset, filp); - } while(1); - - mutex_unlock(&info->dev->struct_mutex); -} - - int xgi_pcie_free(struct xgi_info * info, unsigned long offset, struct drm_file * filp) { -- cgit v1.2.3 From 6718198897ef9e275506d3fcb497641e1b09d3b1 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 6 Aug 2007 16:56:20 -0700 Subject: Release client memory in reclaim_buffers_idlelocked instead of preclose. --- linux-core/xgi_drv.c | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index f6e7b550..b6fea437 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -61,9 +61,9 @@ static const int xgi_max_ioctl = DRM_ARRAY_SIZE(xgi_ioctls); static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); static int xgi_driver_load(struct drm_device *dev, unsigned long flags); static int xgi_driver_unload(struct drm_device *dev); -static void xgi_driver_preclose(struct drm_device * dev, - struct drm_file * filp); static void xgi_driver_lastclose(struct drm_device * dev); +static void xgi_reclaim_buffers_locked(struct drm_device * dev, + struct drm_file * filp); static irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS); @@ -74,7 +74,6 @@ static struct drm_driver driver = { .dev_priv_size = sizeof(struct xgi_info), .load = xgi_driver_load, .unload = xgi_driver_unload, - .preclose = xgi_driver_preclose, .lastclose = xgi_driver_lastclose, .dma_quiescent = NULL, .irq_preinstall = NULL, @@ -82,6 +81,7 @@ static struct drm_driver driver = { .irq_uninstall = NULL, .irq_handler = xgi_kern_isr, .reclaim_buffers = drm_core_reclaim_buffers, + .reclaim_buffers_idlelocked = xgi_reclaim_buffers_locked, .get_map_ofs = drm_core_get_map_ofs, .get_reg_ofs = drm_core_get_reg_ofs, .ioctls = xgi_ioctls, @@ -280,17 +280,6 @@ int xgi_bootstrap(struct drm_device * dev, void * data, } -void xgi_driver_preclose(struct drm_device * dev, struct drm_file * filp) -{ - struct xgi_info * info = dev->dev_private; - - mutex_lock(&info->dev->struct_mutex); - xgi_free_all(info, &info->pcie_heap, filp); - xgi_free_all(info, &info->fb_heap, filp); - mutex_unlock(&info->dev->struct_mutex); -} - - void xgi_driver_lastclose(struct drm_device * dev) { struct xgi_info * info = dev->dev_private; @@ -322,6 +311,23 @@ void xgi_driver_lastclose(struct drm_device * dev) } +void xgi_reclaim_buffers_locked(struct drm_device * dev, + struct drm_file * filp) +{ + struct xgi_info * info = dev->dev_private; + + mutex_lock(&info->dev->struct_mutex); + if (dev->driver->dma_quiescent) { + dev->driver->dma_quiescent(dev); + } + + xgi_free_all(info, &info->pcie_heap, filp); + xgi_free_all(info, &info->fb_heap, filp); + mutex_unlock(&info->dev->struct_mutex); + return; +} + + /* * driver receives an interrupt if someone waiting, then hand it off. */ -- cgit v1.2.3 From f7ba02b7458823627097a2320bf9befa84fc9c76 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 6 Aug 2007 17:27:15 -0700 Subject: Unify infrastructure for freeing on-card / GART memory. --- linux-core/xgi_cmdlist.c | 4 +++- linux-core/xgi_drv.h | 9 ++------- linux-core/xgi_fb.c | 15 +++++++++------ linux-core/xgi_pcie.c | 19 +------------------ 4 files changed, 15 insertions(+), 32 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 33155827..e1653021 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -202,7 +202,9 @@ void xgi_cmdlist_cleanup(struct xgi_info * info) xgi_waitfor_pci_idle(info); } - xgi_pcie_free(info, info->cmdring.ring_gart_base, NULL); + xgi_free(info, (XGI_MEMLOC_NON_LOCAL + | info->cmdring.ring_gart_base), + NULL); info->cmdring.ring_hw_base = 0; info->cmdring.ring_offset = 0; info->cmdring.size = 0; diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 79276b70..a7740ceb 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -38,7 +38,7 @@ #define DRIVER_MAJOR 0 #define DRIVER_MINOR 10 -#define DRIVER_PATCHLEVEL 4 +#define DRIVER_PATCHLEVEL 5 #include "xgi_cmdlist.h" #include "xgi_drm.h" @@ -89,8 +89,6 @@ struct xgi_info { }; extern struct kmem_cache *xgi_mem_block_cache; -extern int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, - struct drm_file * filp); extern int xgi_mem_heap_init(struct xgi_mem_heap * heap, unsigned int start, unsigned int end); extern void xgi_mem_heap_cleanup(struct xgi_mem_heap * heap); @@ -100,15 +98,12 @@ extern int xgi_fb_heap_init(struct xgi_info * info); extern int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, struct drm_file * filp); -extern int xgi_fb_free(struct xgi_info * info, unsigned long offset, +extern int xgi_free(struct xgi_info * info, unsigned long index, struct drm_file * filp); extern int xgi_pcie_heap_init(struct xgi_info * info); extern void xgi_pcie_lut_cleanup(struct xgi_info * info); -extern int xgi_pcie_free(struct xgi_info * info, unsigned long offset, - struct drm_file * filp); - extern void *xgi_find_pcie_virt(struct xgi_info * info, u32 address); extern void xgi_free_all(struct xgi_info *, struct xgi_mem_heap *, diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index f8341a67..26e6fc4e 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -169,8 +169,8 @@ static struct xgi_mem_block *xgi_mem_alloc(struct xgi_mem_heap * heap, return (used_block); } -int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, - struct drm_file * filp) +static int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, + struct drm_file * filp) { struct xgi_mem_block *used_block = NULL, *block; struct xgi_mem_block *prev, *next; @@ -287,13 +287,16 @@ int xgi_fb_alloc_ioctl(struct drm_device * dev, void * data, } -int xgi_fb_free(struct xgi_info * info, unsigned long offset, - struct drm_file * filp) +int xgi_free(struct xgi_info * info, unsigned long index, + struct drm_file * filp) { int err = 0; + const unsigned heap = index & 0x03; mutex_lock(&info->dev->struct_mutex); - err = xgi_mem_free(&info->fb_heap, offset, filp); + err = xgi_mem_free((heap == XGI_MEMLOC_NON_LOCAL) + ? &info->pcie_heap : &info->fb_heap, + (index & ~0x03), filp); mutex_unlock(&info->dev->struct_mutex); return err; @@ -305,7 +308,7 @@ int xgi_fb_free_ioctl(struct drm_device * dev, void * data, { struct xgi_info *info = dev->dev_private; - return xgi_fb_free(info, *(u32 *) data, filp); + return xgi_free(info, XGI_MEMLOC_LOCAL | *(u32 *) data, filp); } diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index b2edf3b1..281223b3 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -152,29 +152,12 @@ int xgi_pcie_alloc_ioctl(struct drm_device * dev, void * data, } -int xgi_pcie_free(struct xgi_info * info, unsigned long offset, - struct drm_file * filp) -{ - int err; - - mutex_lock(&info->dev->struct_mutex); - err = xgi_mem_free(&info->pcie_heap, offset, filp); - mutex_unlock(&info->dev->struct_mutex); - - if (err) { - DRM_ERROR("xgi_pcie_free() failed at base 0x%lx\n", offset); - } - - return err; -} - - int xgi_pcie_free_ioctl(struct drm_device * dev, void * data, struct drm_file * filp) { struct xgi_info *info = dev->dev_private; - return xgi_pcie_free(info, *(u32 *) data, filp); + return xgi_free(info, XGI_MEMLOC_NON_LOCAL | *(u32 *) data, filp); } -- cgit v1.2.3 From 25cb876f8513d02d4d189371eaa8b7b9a88e860d Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 9 Aug 2007 15:23:13 -0700 Subject: Associate master file pointer with command list buffer. Pass the master's file pointer, as supplied to xgi_bootstrap, to xgi_cmdlist_initialize. Associate that pointer with the memory allocated for the command list buffer. By doing this the memory will be automatically cleaned up when the master closes the device. This allows the removal of some clean up code. --- linux-core/xgi_cmdlist.c | 13 ++++--------- linux-core/xgi_cmdlist.h | 9 ++------- linux-core/xgi_drv.c | 2 +- linux-core/xgi_drv.h | 4 ++-- 4 files changed, 9 insertions(+), 19 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index e1653021..a1ec5720 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -34,7 +34,8 @@ static unsigned int get_batch_command(enum xgi_batch_type type); static void triggerHWCommandList(struct xgi_info * info); static void xgi_cmdlist_reset(struct xgi_info * info); -int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) +int xgi_cmdlist_initialize(struct xgi_info * info, size_t size, + struct drm_file * filp) { struct xgi_mem_alloc mem_alloc = { .location = XGI_MEMLOC_NON_LOCAL, @@ -42,7 +43,7 @@ int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) }; int err; - err = xgi_alloc(info, &mem_alloc, 0); + err = xgi_alloc(info, &mem_alloc, filp); if (err) { return err; } @@ -50,7 +51,6 @@ int xgi_cmdlist_initialize(struct xgi_info * info, size_t size) info->cmdring.ptr = xgi_find_pcie_virt(info, mem_alloc.hw_addr); info->cmdring.size = mem_alloc.size; info->cmdring.ring_hw_base = mem_alloc.hw_addr; - info->cmdring.ring_gart_base = mem_alloc.offset; info->cmdring.last_ptr = NULL; info->cmdring.ring_offset = 0; @@ -202,12 +202,7 @@ void xgi_cmdlist_cleanup(struct xgi_info * info) xgi_waitfor_pci_idle(info); } - xgi_free(info, (XGI_MEMLOC_NON_LOCAL - | info->cmdring.ring_gart_base), - NULL); - info->cmdring.ring_hw_base = 0; - info->cmdring.ring_offset = 0; - info->cmdring.size = 0; + (void) memset(&info->cmdring, 0, sizeof(info->cmdring)); } } diff --git a/linux-core/xgi_cmdlist.h b/linux-core/xgi_cmdlist.h index 604c9aac..07a2eb98 100644 --- a/linux-core/xgi_cmdlist.h +++ b/linux-core/xgi_cmdlist.h @@ -72,12 +72,6 @@ struct xgi_cmdring_info { */ unsigned int ring_hw_base; - /** - * Offset, in bytes, from the base of PCI-e GART space to the start - * of the ring. - */ - unsigned long ring_gart_base; - u32 * last_ptr; /** @@ -88,7 +82,8 @@ struct xgi_cmdring_info { }; struct xgi_info; -extern int xgi_cmdlist_initialize(struct xgi_info * info, size_t size); +extern int xgi_cmdlist_initialize(struct xgi_info * info, size_t size, + struct drm_file * filp); extern int xgi_state_change(struct xgi_info * info, unsigned int to, unsigned int from); diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index b6fea437..9aaeb467 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -248,7 +248,7 @@ int xgi_bootstrap(struct drm_device * dev, void * data, } /* Alloc 1M bytes for cmdbuffer which is flush2D batch array */ - err = xgi_cmdlist_initialize(info, 0x100000); + err = xgi_cmdlist_initialize(info, 0x100000, filp); if (err) { DRM_ERROR("xgi_cmdlist_initialize() failed\n"); return err; diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index a7740ceb..9c0f5a28 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -34,11 +34,11 @@ #define DRIVER_NAME "xgi" #define DRIVER_DESC "XGI XP5 / XP10 / XG47" -#define DRIVER_DATE "20070806" +#define DRIVER_DATE "20070809" #define DRIVER_MAJOR 0 #define DRIVER_MINOR 10 -#define DRIVER_PATCHLEVEL 5 +#define DRIVER_PATCHLEVEL 6 #include "xgi_cmdlist.h" #include "xgi_drm.h" -- cgit v1.2.3 From aea6b4dea9708f66f5fc2068fe84407682570aca Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 9 Aug 2007 15:30:36 -0700 Subject: Unify alloc and free ioctls. The DRM_XGI_PCIE_ALLOC and DRM_XGI_FB_ALLOC ioctls (and the matching free ioctls) are unified to DRM_XGI_ALLOC. The desired memory region is selected by xgi_mem_alloc::location. The region is magically encoded in xgi_mem_alloc::index, which is used to release the memory. Bump to version 0.11.0. This update requires a new DDX. --- linux-core/xgi_drv.c | 7 ++---- linux-core/xgi_drv.h | 14 +++++------ linux-core/xgi_fb.c | 65 ++++++++++++++++++++++++++++++++------------------- linux-core/xgi_pcie.c | 21 ----------------- 4 files changed, 49 insertions(+), 58 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 9aaeb467..997051fb 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -42,11 +42,8 @@ static int xgi_bootstrap(struct drm_device *, void *, struct drm_file *); static struct drm_ioctl_desc xgi_ioctls[] = { DRM_IOCTL_DEF(DRM_XGI_BOOTSTRAP, xgi_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_XGI_FB_ALLOC, xgi_fb_alloc_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_XGI_FB_FREE, xgi_fb_free_ioctl, DRM_AUTH), - - DRM_IOCTL_DEF(DRM_XGI_PCIE_ALLOC, xgi_pcie_alloc_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_XGI_PCIE_FREE, xgi_pcie_free_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_XGI_ALLOC, xgi_alloc_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_XGI_FREE, xgi_free_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_XGI_GE_RESET, xgi_ge_reset_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_XGI_DUMP_REGISTER, xgi_dump_register_ioctl, DRM_AUTH), diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 9c0f5a28..e56d00bb 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -37,8 +37,8 @@ #define DRIVER_DATE "20070809" #define DRIVER_MAJOR 0 -#define DRIVER_MINOR 10 -#define DRIVER_PATCHLEVEL 6 +#define DRIVER_MINOR 11 +#define DRIVER_PATCHLEVEL 0 #include "xgi_cmdlist.h" #include "xgi_drm.h" @@ -89,6 +89,8 @@ struct xgi_info { }; extern struct kmem_cache *xgi_mem_block_cache; +extern int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, + struct drm_file * filp); extern int xgi_mem_heap_init(struct xgi_mem_heap * heap, unsigned int start, unsigned int end); extern void xgi_mem_heap_cleanup(struct xgi_mem_heap * heap); @@ -109,13 +111,9 @@ extern void *xgi_find_pcie_virt(struct xgi_info * info, u32 address); extern void xgi_free_all(struct xgi_info *, struct xgi_mem_heap *, struct drm_file *); -extern int xgi_fb_alloc_ioctl(struct drm_device * dev, void * data, - struct drm_file * filp); -extern int xgi_fb_free_ioctl(struct drm_device * dev, void * data, - struct drm_file * filp); -extern int xgi_pcie_alloc_ioctl(struct drm_device * dev, void * data, +extern int xgi_alloc_ioctl(struct drm_device * dev, void * data, struct drm_file * filp); -extern int xgi_pcie_free_ioctl(struct drm_device * dev, void * data, +extern int xgi_free_ioctl(struct drm_device * dev, void * data, struct drm_file * filp); extern int xgi_ge_reset_ioctl(struct drm_device * dev, void * data, struct drm_file * filp); diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index 26e6fc4e..d0182831 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -169,8 +169,8 @@ static struct xgi_mem_block *xgi_mem_alloc(struct xgi_mem_heap * heap, return (used_block); } -static int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, - struct drm_file * filp) +int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, + struct drm_file * filp) { struct xgi_mem_block *used_block = NULL, *block; struct xgi_mem_block *prev, *next; @@ -243,26 +243,45 @@ static int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, - struct drm_file * filp) + struct drm_file * filp) { struct xgi_mem_block *block; + struct xgi_mem_heap *heap; + const char *const mem_name = (alloc->location == XGI_MEMLOC_LOCAL) + ? "on-card" : "GART"; + + + if ((alloc->location != XGI_MEMLOC_LOCAL) + && (alloc->location != XGI_MEMLOC_NON_LOCAL)) { + DRM_ERROR("Invalid memory pool (0x%08x) specified.\n", + alloc->location); + return -EINVAL; + } + + heap = (alloc->location == XGI_MEMLOC_LOCAL) + ? &info->fb_heap : &info->pcie_heap; + + if (!heap->initialized) { + DRM_ERROR("Attempt to allocate from uninitialized memory " + "pool (0x%08x).\n", alloc->location); + return -EINVAL; + } mutex_lock(&info->dev->struct_mutex); - block = xgi_mem_alloc((alloc->location == XGI_MEMLOC_LOCAL) - ? &info->fb_heap : &info->pcie_heap, - alloc->size); + block = xgi_mem_alloc(heap, alloc->size); mutex_unlock(&info->dev->struct_mutex); if (block == NULL) { alloc->size = 0; - DRM_ERROR("Video RAM allocation failed\n"); + DRM_ERROR("%s memory allocation failed\n", mem_name); return -ENOMEM; } else { - DRM_INFO("Video RAM allocation succeeded: 0x%p\n", - (char *)block->offset); + DRM_DEBUG("%s memory allocation succeeded: 0x%p\n", + mem_name, (char *)block->offset); alloc->size = block->size; alloc->offset = block->offset; alloc->hw_addr = block->offset; + alloc->index = alloc->offset | alloc->location; if (alloc->location == XGI_MEMLOC_NON_LOCAL) { alloc->hw_addr += info->pcie.base; @@ -275,47 +294,45 @@ int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, } -int xgi_fb_alloc_ioctl(struct drm_device * dev, void * data, - struct drm_file * filp) +int xgi_alloc_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp) { - struct xgi_mem_alloc *alloc = - (struct xgi_mem_alloc *) data; struct xgi_info *info = dev->dev_private; - alloc->location = XGI_MEMLOC_LOCAL; - return xgi_alloc(info, alloc, filp); + return xgi_alloc(info, (struct xgi_mem_alloc *) data, filp); } int xgi_free(struct xgi_info * info, unsigned long index, struct drm_file * filp) { - int err = 0; - const unsigned heap = index & 0x03; + int err; + struct xgi_mem_heap *const heap = + ((index & 0x03) == XGI_MEMLOC_NON_LOCAL) + ? &info->pcie_heap : &info->fb_heap; + const u32 offset = (index & ~0x03); mutex_lock(&info->dev->struct_mutex); - err = xgi_mem_free((heap == XGI_MEMLOC_NON_LOCAL) - ? &info->pcie_heap : &info->fb_heap, - (index & ~0x03), filp); + err = xgi_mem_free(heap, offset, filp); mutex_unlock(&info->dev->struct_mutex); return err; } -int xgi_fb_free_ioctl(struct drm_device * dev, void * data, - struct drm_file * filp) +int xgi_free_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp) { struct xgi_info *info = dev->dev_private; - return xgi_free(info, XGI_MEMLOC_LOCAL | *(u32 *) data, filp); + return xgi_free(info, *(unsigned long *) data, filp); } int xgi_fb_heap_init(struct xgi_info * info) { return xgi_mem_heap_init(&info->fb_heap, XGI_FB_HEAP_START, - info->fb.size); + info->fb.size - XGI_FB_HEAP_START); } /** diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 281223b3..4ec9b6ff 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -140,27 +140,6 @@ int xgi_pcie_heap_init(struct xgi_info * info) } -int xgi_pcie_alloc_ioctl(struct drm_device * dev, void * data, - struct drm_file * filp) -{ - struct xgi_mem_alloc *const alloc = - (struct xgi_mem_alloc *) data; - struct xgi_info *info = dev->dev_private; - - alloc->location = XGI_MEMLOC_NON_LOCAL; - return xgi_alloc(info, alloc, filp); -} - - -int xgi_pcie_free_ioctl(struct drm_device * dev, void * data, - struct drm_file * filp) -{ - struct xgi_info *info = dev->dev_private; - - return xgi_free(info, XGI_MEMLOC_NON_LOCAL | *(u32 *) data, filp); -} - - /** * xgi_find_pcie_virt * @address: GE HW address -- cgit v1.2.3 From dbd4d0597ff32458bbe4347bdea0e4b9e55a14da Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 9 Aug 2007 16:01:14 -0700 Subject: Use sman memory manager instead of internal version. --- linux-core/xgi_drv.c | 46 +++++---- linux-core/xgi_drv.h | 34 +----- linux-core/xgi_fb.c | 281 ++++---------------------------------------------- linux-core/xgi_pcie.c | 6 +- 4 files changed, 54 insertions(+), 313 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 997051fb..b0f84c81 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -225,7 +225,7 @@ int xgi_bootstrap(struct drm_device * dev, void * data, /* Init the resource manager */ - if (!info->fb_heap.initialized) { + if (!info->fb_heap_initialized) { err = xgi_fb_heap_init(info); if (err) { DRM_ERROR("Unable to initialize FB heap.\n"); @@ -237,7 +237,7 @@ int xgi_bootstrap(struct drm_device * dev, void * data, info->pcie.size = bs->gart.size; /* Init the resource manager */ - if (!info->pcie_heap.initialized) { + if (!info->pcie_heap_initialized) { err = xgi_pcie_heap_init(info); if (err) { DRM_ERROR("Unable to initialize GART heap.\n"); @@ -296,13 +296,13 @@ void xgi_driver_lastclose(struct drm_device * dev) info->mmio_map = NULL; info->fb_map = NULL; - if (info->fb_heap.initialized) { - xgi_mem_heap_cleanup(&info->fb_heap); + if (info->pcie_heap_initialized) { + xgi_pcie_lut_cleanup(info); } - if (info->pcie_heap.initialized) { - xgi_mem_heap_cleanup(&info->pcie_heap); - xgi_pcie_lut_cleanup(info); + if (info->fb_heap_initialized + || info->pcie_heap_initialized) { + drm_sman_cleanup(&info->sman); } } } @@ -314,12 +314,16 @@ void xgi_reclaim_buffers_locked(struct drm_device * dev, struct xgi_info * info = dev->dev_private; mutex_lock(&info->dev->struct_mutex); + if (drm_sman_owner_clean(&info->sman, (unsigned long) filp)) { + mutex_unlock(&info->dev->struct_mutex); + return; + } + if (dev->driver->dma_quiescent) { dev->driver->dma_quiescent(dev); } - xgi_free_all(info, &info->pcie_heap, filp); - xgi_free_all(info, &info->fb_heap, filp); + drm_sman_owner_cleanup(&info->sman, (unsigned long) filp); mutex_unlock(&info->dev->struct_mutex); return; } @@ -357,6 +361,7 @@ void xgi_kern_isr_bh(struct drm_device *dev) int xgi_driver_load(struct drm_device *dev, unsigned long flags) { struct xgi_info *info = drm_alloc(sizeof(*info), DRM_MEM_DRIVER); + int err; if (!info) return -ENOMEM; @@ -375,7 +380,8 @@ int xgi_driver_load(struct drm_device *dev, unsigned long flags) if ((info->mmio.base == 0) || (info->mmio.size == 0)) { DRM_ERROR("mmio appears to be wrong: 0x%lx 0x%x\n", (unsigned long) info->mmio.base, info->mmio.size); - return -EINVAL; + err = -EINVAL; + goto fail; } @@ -386,28 +392,24 @@ int xgi_driver_load(struct drm_device *dev, unsigned long flags) (unsigned long) info->fb.base, info->fb.size); - xgi_mem_block_cache = kmem_cache_create("xgi_mem_block", - sizeof(struct xgi_mem_block), - 0, - SLAB_HWCACHE_ALIGN, - NULL, NULL); - if (xgi_mem_block_cache == NULL) { - return -ENOMEM; + err = drm_sman_init(&info->sman, 2, 12, 8); + if (err) { + goto fail; } return 0; + +fail: + drm_free(info, sizeof(*info), DRM_MEM_DRIVER); + return err; } int xgi_driver_unload(struct drm_device *dev) { struct xgi_info * info = dev->dev_private; - if (xgi_mem_block_cache) { - kmem_cache_destroy(xgi_mem_block_cache); - xgi_mem_block_cache = NULL; - } - + drm_sman_takedown(&info->sman); drm_free(info, sizeof(*info), DRM_MEM_DRIVER); dev->dev_private = NULL; diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index e56d00bb..6b209aa4 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -29,6 +29,7 @@ #include "drmP.h" #include "drm.h" +#include "drm_sman.h" #define DRIVER_AUTHOR "Andrea Zhang " @@ -38,7 +39,7 @@ #define DRIVER_MAJOR 0 #define DRIVER_MINOR 11 -#define DRIVER_PATCHLEVEL 0 +#define DRIVER_PATCHLEVEL 1 #include "xgi_cmdlist.h" #include "xgi_drm.h" @@ -48,22 +49,6 @@ struct xgi_aperture { unsigned int size; }; -struct xgi_mem_block { - struct list_head list; - unsigned long offset; - unsigned long size; - struct drm_file * filp; -}; - -struct xgi_mem_heap { - struct list_head free_list; - struct list_head used_list; - struct list_head sort_list; - unsigned long max_freesize; - - bool initialized; -}; - struct xgi_info { struct drm_device *dev; @@ -82,19 +67,13 @@ struct xgi_info { struct drm_dma_handle *lut_handle; unsigned int lutPageSize; - struct xgi_mem_heap fb_heap; - struct xgi_mem_heap pcie_heap; + struct drm_sman sman; + bool fb_heap_initialized; + bool pcie_heap_initialized; struct xgi_cmdring_info cmdring; }; -extern struct kmem_cache *xgi_mem_block_cache; -extern int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, - struct drm_file * filp); -extern int xgi_mem_heap_init(struct xgi_mem_heap * heap, unsigned int start, - unsigned int end); -extern void xgi_mem_heap_cleanup(struct xgi_mem_heap * heap); - extern int xgi_fb_heap_init(struct xgi_info * info); extern int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, @@ -108,9 +87,6 @@ extern void xgi_pcie_lut_cleanup(struct xgi_info * info); extern void *xgi_find_pcie_virt(struct xgi_info * info, u32 address); -extern void xgi_free_all(struct xgi_info *, struct xgi_mem_heap *, - struct drm_file *); - extern int xgi_alloc_ioctl(struct drm_device * dev, void * data, struct drm_file * filp); extern int xgi_free_ioctl(struct drm_device * dev, void * data, diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index d0182831..40f39fbc 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -28,225 +28,10 @@ #define XGI_FB_HEAP_START 0x1000000 -struct kmem_cache *xgi_mem_block_cache = NULL; - -static struct xgi_mem_block *xgi_mem_new_node(void); - - -int xgi_mem_heap_init(struct xgi_mem_heap *heap, unsigned int start, - unsigned int end) -{ - struct xgi_mem_block *block; - - INIT_LIST_HEAD(&heap->free_list); - INIT_LIST_HEAD(&heap->used_list); - INIT_LIST_HEAD(&heap->sort_list); - heap->initialized = TRUE; - - block = kmem_cache_alloc(xgi_mem_block_cache, GFP_KERNEL); - if (!block) { - return -ENOMEM; - } - - block->offset = start; - block->size = end - start; - - list_add(&block->list, &heap->free_list); - - heap->max_freesize = end - start; - - return 0; -} - - -void xgi_mem_heap_cleanup(struct xgi_mem_heap * heap) -{ - struct list_head *free_list; - struct xgi_mem_block *block; - struct xgi_mem_block *next; - int i; - - free_list = &heap->free_list; - for (i = 0; i < 3; i++, free_list++) { - list_for_each_entry_safe(block, next, free_list, list) { - DRM_INFO - ("No. %d block->offset: 0x%lx block->size: 0x%lx \n", - i, block->offset, block->size); - kmem_cache_free(xgi_mem_block_cache, block); - block = NULL; - } - } - - heap->initialized = 0; -} - - -struct xgi_mem_block *xgi_mem_new_node(void) -{ - struct xgi_mem_block *block = - kmem_cache_alloc(xgi_mem_block_cache, GFP_KERNEL); - - if (!block) { - DRM_ERROR("kmem_cache_alloc failed\n"); - return NULL; - } - - block->offset = 0; - block->size = 0; - block->filp = (struct drm_file *) -1; - - return block; -} - - -static struct xgi_mem_block *xgi_mem_alloc(struct xgi_mem_heap * heap, - unsigned long originalSize) -{ - struct xgi_mem_block *block, *free_block, *used_block; - unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK; - - - DRM_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n", - originalSize, size); - - if (size == 0) { - DRM_ERROR("size == 0\n"); - return (NULL); - } - DRM_INFO("max_freesize: 0x%lx \n", heap->max_freesize); - if (size > heap->max_freesize) { - DRM_ERROR - ("size: 0x%lx is bigger than frame buffer total free size: 0x%lx !\n", - size, heap->max_freesize); - return (NULL); - } - - list_for_each_entry(block, &heap->free_list, list) { - DRM_INFO("block: 0x%px \n", block); - if (size <= block->size) { - break; - } - } - - if (&block->list == &heap->free_list) { - DRM_ERROR - ("Can't allocate %ldk size from frame buffer memory !\n", - size / 1024); - return (NULL); - } - - free_block = block; - DRM_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n", - size, free_block->offset, free_block->size); - - if (size == free_block->size) { - used_block = free_block; - DRM_INFO("size == free_block->size: free_block = 0x%p\n", - free_block); - list_del(&free_block->list); - } else { - used_block = xgi_mem_new_node(); - - if (used_block == NULL) - return (NULL); - - if (used_block == free_block) { - DRM_ERROR("used_block == free_block = 0x%p\n", - used_block); - } - - used_block->offset = free_block->offset; - used_block->size = size; - - free_block->offset += size; - free_block->size -= size; - } - - heap->max_freesize -= size; - - list_add(&used_block->list, &heap->used_list); - - return (used_block); -} - -int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset, - struct drm_file * filp) -{ - struct xgi_mem_block *used_block = NULL, *block; - struct xgi_mem_block *prev, *next; - - unsigned long upper; - unsigned long lower; - - list_for_each_entry(block, &heap->used_list, list) { - if (block->offset == offset) { - break; - } - } - - if (&block->list == &heap->used_list) { - DRM_ERROR("can't find block: 0x%lx to free!\n", offset); - return -ENOENT; - } - - if (block->filp != filp) { - return -EPERM; - } - - used_block = block; - DRM_INFO("used_block: 0x%p, offset = 0x%lx, size = 0x%lx\n", - used_block, used_block->offset, used_block->size); - - heap->max_freesize += used_block->size; - - prev = next = NULL; - upper = used_block->offset + used_block->size; - lower = used_block->offset; - - list_for_each_entry(block, &heap->free_list, list) { - if (block->offset == upper) { - next = block; - } else if ((block->offset + block->size) == lower) { - prev = block; - } - } - - DRM_INFO("next = 0x%p, prev = 0x%p\n", next, prev); - list_del(&used_block->list); - - if (prev && next) { - prev->size += (used_block->size + next->size); - list_del(&next->list); - DRM_INFO("free node 0x%p\n", next); - kmem_cache_free(xgi_mem_block_cache, next); - kmem_cache_free(xgi_mem_block_cache, used_block); - } - else if (prev) { - prev->size += used_block->size; - DRM_INFO("free node 0x%p\n", used_block); - kmem_cache_free(xgi_mem_block_cache, used_block); - } - else if (next) { - next->size += used_block->size; - next->offset = used_block->offset; - DRM_INFO("free node 0x%p\n", used_block); - kmem_cache_free(xgi_mem_block_cache, used_block); - } - else { - list_add(&used_block->list, &heap->free_list); - DRM_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n", - used_block, used_block->offset, used_block->size); - } - - return 0; -} - - int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, struct drm_file * filp) { - struct xgi_mem_block *block; - struct xgi_mem_heap *heap; + struct drm_memblock_item *block; const char *const mem_name = (alloc->location == XGI_MEMLOC_LOCAL) ? "on-card" : "GART"; @@ -258,17 +43,16 @@ int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, return -EINVAL; } - heap = (alloc->location == XGI_MEMLOC_LOCAL) - ? &info->fb_heap : &info->pcie_heap; - - if (!heap->initialized) { + if ((alloc->location == XGI_MEMLOC_LOCAL) + ? !info->fb_heap_initialized : !info->pcie_heap_initialized) { DRM_ERROR("Attempt to allocate from uninitialized memory " "pool (0x%08x).\n", alloc->location); return -EINVAL; } mutex_lock(&info->dev->struct_mutex); - block = xgi_mem_alloc(heap, alloc->size); + block = drm_sman_alloc(&info->sman, alloc->location, alloc->size, + 0, (unsigned long) filp); mutex_unlock(&info->dev->struct_mutex); if (block == NULL) { @@ -276,18 +60,17 @@ int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, DRM_ERROR("%s memory allocation failed\n", mem_name); return -ENOMEM; } else { - DRM_DEBUG("%s memory allocation succeeded: 0x%p\n", - mem_name, (char *)block->offset); - alloc->size = block->size; - alloc->offset = block->offset; - alloc->hw_addr = block->offset; - alloc->index = alloc->offset | alloc->location; + alloc->offset = (*block->mm->offset)(block->mm, + block->mm_info); + alloc->hw_addr = alloc->offset; + alloc->index = block->user_hash.key; if (alloc->location == XGI_MEMLOC_NON_LOCAL) { alloc->hw_addr += info->pcie.base; } - block->filp = filp; + DRM_DEBUG("%s memory allocation succeeded: 0x%x\n", + mem_name, alloc->offset); } return 0; @@ -307,13 +90,9 @@ int xgi_free(struct xgi_info * info, unsigned long index, struct drm_file * filp) { int err; - struct xgi_mem_heap *const heap = - ((index & 0x03) == XGI_MEMLOC_NON_LOCAL) - ? &info->pcie_heap : &info->fb_heap; - const u32 offset = (index & ~0x03); mutex_lock(&info->dev->struct_mutex); - err = xgi_mem_free(heap, offset, filp); + err = drm_sman_free_key(&info->sman, index); mutex_unlock(&info->dev->struct_mutex); return err; @@ -331,34 +110,14 @@ int xgi_free_ioctl(struct drm_device * dev, void * data, int xgi_fb_heap_init(struct xgi_info * info) { - return xgi_mem_heap_init(&info->fb_heap, XGI_FB_HEAP_START, + int err; + + mutex_lock(&info->dev->struct_mutex); + err = drm_sman_set_range(&info->sman, XGI_MEMLOC_LOCAL, + XGI_FB_HEAP_START, info->fb.size - XGI_FB_HEAP_START); -} - -/** - * Free all blocks associated with a particular file handle. - */ -void xgi_free_all(struct xgi_info * info, struct xgi_mem_heap * heap, - struct drm_file * filp) -{ - if (!heap->initialized) { - return; - } - - - do { - struct xgi_mem_block *block; - - list_for_each_entry(block, &heap->used_list, list) { - if (block->filp == filp) { - break; - } - } - - if (&block->list == &heap->used_list) { - break; - } + mutex_unlock(&info->dev->struct_mutex); - (void) xgi_mem_free(heap, block->offset, filp); - } while(1); + info->fb_heap_initialized = (err == 0); + return err; } diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 4ec9b6ff..932615a4 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -131,11 +131,15 @@ int xgi_pcie_heap_init(struct xgi_info * info) } - err = xgi_mem_heap_init(&info->pcie_heap, 0, info->pcie.size); + mutex_lock(&info->dev->struct_mutex); + err = drm_sman_set_range(&info->sman, XGI_MEMLOC_NON_LOCAL, + 0, info->pcie.size); + mutex_unlock(&info->dev->struct_mutex); if (err) { xgi_pcie_lut_cleanup(info); } + info->pcie_heap_initialized = (err == 0); return err; } -- cgit v1.2.3 From 6dd97099ea5c6dc7931c6b482eb5935f7dd9ed2d Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 9 Aug 2007 16:20:44 -0700 Subject: Minor clean up of IRQ code. Much, much more to come. --- linux-core/xgi_drv.c | 27 ++++++++------------------- linux-core/xgi_drv.h | 2 +- 2 files changed, 9 insertions(+), 20 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index b0f84c81..d0878184 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -186,8 +186,6 @@ void xgi_engine_init(struct xgi_info * info) } -void xgi_kern_isr_bh(struct drm_device *dev); - int xgi_bootstrap(struct drm_device * dev, void * data, struct drm_file * filp) { @@ -335,28 +333,19 @@ void xgi_reclaim_buffers_locked(struct drm_device * dev, irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; -// struct xgi_info *info = dev->dev_private; - u32 need_to_run_bottom_half = 0; - - //DRM_INFO("xgi_kern_isr \n"); + struct xgi_info *info = dev->dev_private; + const u32 irq_bits = DRM_READ32(info->mmio_map, 0x2810); - //xgi_dvi_irq_handler(info); - if (need_to_run_bottom_half) { - drm_locked_tasklet(dev, xgi_kern_isr_bh); + if ((irq_bits & 0x00000000) != 0) { + DRM_WRITE32(info->mmio_map, 0x2810, + 0x04000000 | irq_bits); + return IRQ_HANDLED; + } else { + return IRQ_NONE; } - - return IRQ_HANDLED; } -void xgi_kern_isr_bh(struct drm_device *dev) -{ - struct xgi_info *info = dev->dev_private; - - DRM_INFO("xgi_kern_isr_bh \n"); - - //xgi_dvi_irq_handler(info); -} int xgi_driver_load(struct drm_device *dev, unsigned long flags) { diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 6b209aa4..6afc4c6c 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -39,7 +39,7 @@ #define DRIVER_MAJOR 0 #define DRIVER_MINOR 11 -#define DRIVER_PATCHLEVEL 1 +#define DRIVER_PATCHLEVEL 2 #include "xgi_cmdlist.h" #include "xgi_drm.h" -- cgit v1.2.3 From 371f0a4d410f02d8db050b51fd2e714f888a71e0 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 9 Aug 2007 18:15:42 -0700 Subject: Mask off correct bits in M2REG_AUTO_LINK_STATUS for interrupt handling. --- linux-core/xgi_cmdlist.h | 29 --------------------------- linux-core/xgi_drv.c | 19 ++++++++++++------ linux-core/xgi_regs.h | 51 ++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 64 insertions(+), 35 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.h b/linux-core/xgi_cmdlist.h index 07a2eb98..dc3fbe5a 100644 --- a/linux-core/xgi_cmdlist.h +++ b/linux-core/xgi_cmdlist.h @@ -27,35 +27,6 @@ #ifndef _XGI_CMDLIST_H_ #define _XGI_CMDLIST_H_ -#define ONE_BIT_MASK 0x1 -#define TWENTY_BIT_MASK 0xfffff -#define M2REG_FLUSH_2D_ENGINE_MASK (ONE_BIT_MASK<<20) -#define M2REG_FLUSH_3D_ENGINE_MASK TWENTY_BIT_MASK -#define M2REG_FLUSH_FLIP_ENGINE_MASK (ONE_BIT_MASK<<21) -#define BASE_3D_ENG 0x2800 -#define M2REG_AUTO_LINK_SETTING_ADDRESS 0x10 -#define M2REG_CLEAR_COUNTERS_MASK (ONE_BIT_MASK<<4) -#define M2REG_PCI_TRIGGER_MODE_MASK (ONE_BIT_MASK<<1) -#define BEGIN_VALID_MASK (ONE_BIT_MASK<<20) -#define BEGIN_LINK_ENABLE_MASK (ONE_BIT_MASK<<31) -#define BEGIN_BEGIN_IDENTIFICATION_MASK (TWENTY_BIT_MASK<<0) -#define M2REG_PCI_TRIGGER_REGISTER_ADDRESS 0x14 - -typedef enum { - FLUSH_2D = M2REG_FLUSH_2D_ENGINE_MASK, - FLUSH_3D = M2REG_FLUSH_3D_ENGINE_MASK, - FLUSH_FLIP = M2REG_FLUSH_FLIP_ENGINE_MASK -} FLUSH_CODE; - -typedef enum { - AGPCMDLIST_SCRATCH_SIZE = 0x100, - AGPCMDLIST_BEGIN_SIZE = 0x004, - AGPCMDLIST_3D_SCRATCH_CMD_SIZE = 0x004, - AGPCMDLIST_2D_SCRATCH_CMD_SIZE = 0x00c, - AGPCMDLIST_FLUSH_CMD_LEN = 0x004, - AGPCMDLIST_DUMY_END_BATCH_LEN = AGPCMDLIST_BEGIN_SIZE -} CMD_SIZE; - struct xgi_cmdring_info { /** * Kernel space pointer to the base of the command ring. diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index d0878184..f20df585 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -334,12 +334,19 @@ irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; struct xgi_info *info = dev->dev_private; - const u32 irq_bits = DRM_READ32(info->mmio_map, 0x2810); - - - if ((irq_bits & 0x00000000) != 0) { - DRM_WRITE32(info->mmio_map, 0x2810, - 0x04000000 | irq_bits); + const u32 irq_bits = DRM_READ32(info->mmio_map, + (0x2800 + + M2REG_AUTO_LINK_STATUS_ADDRESS)) + & (M2REG_ACTIVE_TIMER_INTERRUPT_MASK + | M2REG_ACTIVE_INTERRUPT_0_MASK + | M2REG_ACTIVE_INTERRUPT_2_MASK + | M2REG_ACTIVE_INTERRUPT_3_MASK); + + + if (irq_bits != 0) { + DRM_WRITE32(info->mmio_map, + 0x2800 + M2REG_AUTO_LINK_SETTING_ADDRESS, + M2REG_AUTO_LINK_SETTING_COMMAND | irq_bits); return IRQ_HANDLED; } else { return IRQ_NONE; diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h index b3a47f8e..57e93405 100644 --- a/linux-core/xgi_regs.h +++ b/linux-core/xgi_regs.h @@ -30,6 +30,57 @@ #include "drmP.h" #include "drm.h" +#define BASE_3D_ENG 0x2800 + +#define MAKE_MASK(bits) ((1U << (bits)) - 1) + +#define ONE_BIT_MASK MAKE_MASK(1) +#define TWENTY_BIT_MASK MAKE_MASK(20) +#define TWENTYONE_BIT_MASK MAKE_MASK(21) +#define TWENTYTWO_BIT_MASK MAKE_MASK(22) + +#define M2REG_FLUSH_ENGINE_ADDRESS 0x000 +#define M2REG_FLUSH_ENGINE_COMMAND 0x00 +#define M2REG_FLUSH_FLIP_ENGINE_MASK (ONE_BIT_MASK<<21) +#define M2REG_FLUSH_2D_ENGINE_MASK (ONE_BIT_MASK<<20) +#define M2REG_FLUSH_3D_ENGINE_MASK TWENTY_BIT_MASK + +/* Write register */ +#define M2REG_AUTO_LINK_SETTING_ADDRESS 0x010 +#define M2REG_AUTO_LINK_SETTING_COMMAND 0x04 +#define M2REG_CLEAR_TIMER_INTERRUPT_MASK (ONE_BIT_MASK<<11) +#define M2REG_CLEAR_INTERRUPT_3_MASK (ONE_BIT_MASK<<10) +#define M2REG_CLEAR_INTERRUPT_2_MASK (ONE_BIT_MASK<<9) +#define M2REG_CLEAR_INTERRUPT_0_MASK (ONE_BIT_MASK<<8) +#define M2REG_CLEAR_COUNTERS_MASK (ONE_BIT_MASK<<4) +#define M2REG_PCI_TRIGGER_MODE_MASK (ONE_BIT_MASK<<1) +#define M2REG_INVALID_LIST_AUTO_INTERRUPT_MASK (ONE_BIT_MASK<<0) + +/* Read register */ +#define M2REG_AUTO_LINK_STATUS_ADDRESS 0x010 +#define M2REG_AUTO_LINK_STATUS_COMMAND 0x04 +#define M2REG_ACTIVE_TIMER_INTERRUPT_MASK (ONE_BIT_MASK<<11) +#define M2REG_ACTIVE_INTERRUPT_3_MASK (ONE_BIT_MASK<<10) +#define M2REG_ACTIVE_INTERRUPT_2_MASK (ONE_BIT_MASK<<9) +#define M2REG_ACTIVE_INTERRUPT_0_MASK (ONE_BIT_MASK<<8) +#define M2REG_INVALID_LIST_AUTO_INTERRUPTED_MODE_MASK (ONE_BIT_MASK<<0) + +#define M2REG_PCI_TRIGGER_REGISTER_ADDRESS 0x014 +#define M2REG_PCI_TRIGGER_REGISTER_COMMAND 0x05 + + +/** + * Begin instruction, double-word 0 + */ +#define BEGIN_VALID_MASK (ONE_BIT_MASK<<20) +#define BEGIN_BEGIN_IDENTIFICATION_MASK TWENTY_BIT_MASK + +/** + * Begin instruction, double-word 1 + */ +#define BEGIN_LINK_ENABLE_MASK (ONE_BIT_MASK<<31) +#define BEGIN_COMMAND_LIST_LENGTH_MASK TWENTYTWO_BIT_MASK + /* Hardware access functions */ static inline void OUT3C5B(struct drm_map * map, u8 index, u8 data) -- cgit v1.2.3 From 06e09842dfbdaa9502d3b3e6b657de4e3630644c Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 9 Aug 2007 18:28:16 -0700 Subject: Use DRM_MEMORYBARRIER() macro instead of mb(). --- linux-core/xgi_pcie.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 932615a4..d15ea32a 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -96,11 +96,7 @@ static int xgi_pcie_lut_init(struct xgi_info * info) lut[i] = info->dev->sg->busaddr[i]; } -#if defined(__i386__) || defined(__x86_64__) - asm volatile ("wbinvd":::"memory"); -#else - mb(); -#endif + DRM_MEMORYBARRIER(); /* Set GART in SFB */ temp = DRM_READ8(info->mmio_map, 0xB00C); -- cgit v1.2.3 From 20a0e5e4298761ae6005399e45d66b93109d2121 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 9 Aug 2007 18:57:15 -0700 Subject: After calling drm_sman_cleanup, mark both heaps as uninitialized. Since the heaps weren't marked as uninitialized, SG memory was never re-allocated. This prevented the X-server from being able to restart without re-loading the kernel module. --- linux-core/xgi_drv.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'linux-core') diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index f20df585..fa418c0d 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -301,6 +301,9 @@ void xgi_driver_lastclose(struct drm_device * dev) if (info->fb_heap_initialized || info->pcie_heap_initialized) { drm_sman_cleanup(&info->sman); + + info->fb_heap_initialized = FALSE; + info->pcie_heap_initialized = FALSE; } } } -- cgit v1.2.3 From 15f841bd529b50901272ca35a4c57de42a51901a Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Mon, 13 Aug 2007 16:21:20 -0700 Subject: Strobe magic 0xB03F register to flush PCI-e GART table. The original XGI kernel driver strobed 0xB03F each time a page was allocated to back a GART page. When the driver was converted to use the DRM SG interface, this code was lost. Returning it fixes a long standing issue where the X-server would work fine the first time, but acceleration commands would be ignored on the second X-server invocation. --- linux-core/xgi_pcie.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index d15ea32a..a0f52740 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -103,7 +103,11 @@ static int xgi_pcie_lut_init(struct xgi_info * info) DRM_WRITE8(info->mmio_map, 0xB00C, temp & ~0x02); /* Set GART base address to HW */ - dwWriteReg(info->mmio_map, 0xB034, info->lut_handle->busaddr); + DRM_WRITE32(info->mmio_map, 0xB034, info->lut_handle->busaddr); + + /* Flush GART table. */ + DRM_WRITE8(info->mmio_map, 0xB03F, 0x40); + DRM_WRITE8(info->mmio_map, 0xB03F, 0x00); return 0; } -- cgit v1.2.3 From 3ee211f4f7435792752c1dbcd3a60e2e7abfba09 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Mon, 13 Aug 2007 16:29:24 -0700 Subject: Bug #11895: Only add the AGP base to map offset if the caller didn't. The i830 and newer intel 2D code adds the AGP base to map offsets already, because it wasn't doing the AGP enable which used to set dev->agp->base. Credit goes to Zhenyu for finding the issue. --- linux-core/drm_bufs.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c index f9987ca6..f0b28fa1 100644 --- a/linux-core/drm_bufs.c +++ b/linux-core/drm_bufs.c @@ -223,11 +223,17 @@ static int drm_addmap_core(struct drm_device *dev, unsigned int offset, #ifdef __alpha__ map->offset += dev->hose->mem_space->start; #endif - /* Note: dev->agp->base may actually be 0 when the DRM - * is not in control of AGP space. But if user space is - * it should already have added the AGP base itself. + /* In some cases (i810 driver), user space may have already + * added the AGP base itself, because dev->agp->base previously + * only got set during AGP enable. So, only add the base + * address if the map's offset isn't already within the + * aperture. */ - map->offset += dev->agp->base; + if (map->offset < dev->agp->base || + map->offset > dev->agp->base + + dev->agp->agp_info.aper_size * 1024 * 1024) { + map->offset += dev->agp->base; + } map->mtrr = dev->agp->agp_mtrr; /* for getmap */ /* This assumes the DRM is in total control of AGP space. -- cgit v1.2.3 From 891714d8d732480af97fbc45562145a560b7999b Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 14 Aug 2007 13:18:44 -0700 Subject: Clean up xgi_(en|dis)able_(mmio|ge) and move to xgi_misc.c. --- linux-core/xgi_drv.h | 5 ++ linux-core/xgi_misc.c | 126 ++++++++++++++++++++++++++++++++++++++++++ linux-core/xgi_regs.h | 150 ++++++++++++++------------------------------------ 3 files changed, 171 insertions(+), 110 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 6afc4c6c..9f746de2 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -87,6 +87,11 @@ extern void xgi_pcie_lut_cleanup(struct xgi_info * info); extern void *xgi_find_pcie_virt(struct xgi_info * info, u32 address); +extern void xgi_enable_mmio(struct xgi_info * info); +extern void xgi_disable_mmio(struct xgi_info * info); +extern void xgi_enable_ge(struct xgi_info * info); +extern void xgi_disable_ge(struct xgi_info * info); + extern int xgi_alloc_ioctl(struct drm_device * dev, void * data, struct drm_file * filp); extern int xgi_free_ioctl(struct drm_device * dev, void * data, diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 84d1d4f2..be75e970 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -374,3 +374,129 @@ void xgi_waitfor_pci_idle(struct xgi_info * info) } } } + + +void xgi_enable_mmio(struct xgi_info * info) +{ + u8 protect = 0; + u8 temp; + + /* Unprotect registers */ + DRM_WRITE8(info->mmio_map, 0x3C4, 0x11); + protect = DRM_READ8(info->mmio_map, 0x3C5); + DRM_WRITE8(info->mmio_map, 0x3C5, 0x92); + + DRM_WRITE8(info->mmio_map, 0x3D4, 0x3A); + temp = DRM_READ8(info->mmio_map, 0x3D5); + DRM_WRITE8(info->mmio_map, 0x3D5, temp | 0x20); + + /* Enable MMIO */ + DRM_WRITE8(info->mmio_map, 0x3D4, 0x39); + temp = DRM_READ8(info->mmio_map, 0x3D5); + DRM_WRITE8(info->mmio_map, 0x3D5, temp | 0x01); + + /* Protect registers */ + OUT3C5B(info->mmio_map, 0x11, protect); +} + + +void xgi_disable_mmio(struct xgi_info * info) +{ + u8 protect = 0; + u8 temp; + + /* Unprotect registers */ + DRM_WRITE8(info->mmio_map, 0x3C4, 0x11); + protect = DRM_READ8(info->mmio_map, 0x3C5); + DRM_WRITE8(info->mmio_map, 0x3C5, 0x92); + + /* Disable MMIO access */ + DRM_WRITE8(info->mmio_map, 0x3D4, 0x39); + temp = DRM_READ8(info->mmio_map, 0x3D5); + DRM_WRITE8(info->mmio_map, 0x3D5, temp & 0xFE); + + /* Protect registers */ + OUT3C5B(info->mmio_map, 0x11, protect); +} + + +void xgi_enable_ge(struct xgi_info * info) +{ + u8 bOld3cf2a; + int wait = 0; + + OUT3C5B(info->mmio_map, 0x11, 0x92); + + /* Save and close dynamic gating + */ + bOld3cf2a = IN3CFB(info->mmio_map, XGI_MISC_CTRL); + OUT3CFB(info->mmio_map, XGI_MISC_CTRL, bOld3cf2a & ~EN_GEPWM); + + /* Enable 2D and 3D GE + */ + OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D)); + wait = 10; + while (wait--) { + DRM_READ8(info->mmio_map, 0x36); + } + + /* Reset both 3D and 2D engine + */ + OUT3X5B(info->mmio_map, XGI_GE_CNTL, + (GE_ENABLE | GE_RESET | GE_ENABLE_3D)); + wait = 10; + while (wait--) { + DRM_READ8(info->mmio_map, 0x36); + } + + OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D)); + wait = 10; + while (wait--) { + DRM_READ8(info->mmio_map, 0x36); + } + + /* Enable 2D engine only + */ + OUT3X5B(info->mmio_map, XGI_GE_CNTL, GE_ENABLE); + + /* Enable 2D+3D engine + */ + OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D)); + + /* Restore dynamic gating + */ + OUT3CFB(info->mmio_map, XGI_MISC_CTRL, bOld3cf2a); +} + + +void xgi_disable_ge(struct xgi_info * info) +{ + int wait = 0; + + OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D)); + + wait = 10; + while (wait--) { + DRM_READ8(info->mmio_map, 0x36); + } + + /* Reset both 3D and 2D engine + */ + OUT3X5B(info->mmio_map, XGI_GE_CNTL, + (GE_ENABLE | GE_RESET | GE_ENABLE_3D)); + + wait = 10; + while (wait--) { + DRM_READ8(info->mmio_map, 0x36); + } + OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D)); + + wait = 10; + while (wait--) { + DRM_READ8(info->mmio_map, 0x36); + } + + /* Disable 2D engine and 3D engine. + */ + OUT3X5B(info->mmio_map, XGI_GE_CNTL, 0); +} diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h index 57e93405..7f643f92 100644 --- a/linux-core/xgi_regs.h +++ b/linux-core/xgi_regs.h @@ -30,8 +30,6 @@ #include "drmP.h" #include "drm.h" -#define BASE_3D_ENG 0x2800 - #define MAKE_MASK(bits) ((1U << (bits)) - 1) #define ONE_BIT_MASK MAKE_MASK(1) @@ -39,6 +37,46 @@ #define TWENTYONE_BIT_MASK MAKE_MASK(21) #define TWENTYTWO_BIT_MASK MAKE_MASK(22) + +/* Port 0x3d4/0x3d5, index 0x2a */ +#define XGI_INTERFACE_SEL 0x2a +#define DUAL_64BIT (1U<<7) +#define INTERNAL_32BIT (1U<<6) +#define EN_SEP_WR (1U<<5) +#define POWER_DOWN_SEL (1U<<4) +/*#define RESERVED_3 (1U<<3) */ +#define SUBS_MCLK_PCICLK (1U<<2) +#define MEM_SIZE_MASK (3<<0) +#define MEM_SIZE_32MB (0<<0) +#define MEM_SIZE_64MB (1<<0) +#define MEM_SIZE_128MB (2<<0) +#define MEM_SIZE_256MB (3<<0) + +/* Port 0x3d4/0x3d5, index 0x36 */ +#define XGI_GE_CNTL 0x36 +#define GE_ENABLE (1U<<7) +/*#define RESERVED_6 (1U<<6) */ +/*#define RESERVED_5 (1U<<5) */ +#define GE_RESET (1U<<4) +/*#define RESERVED_3 (1U<<3) */ +#define GE_ENABLE_3D (1U<<2) +/*#define RESERVED_1 (1U<<1) */ +/*#define RESERVED_0 (1U<<0) */ + +/* Port 0x3ce/0x3cf, index 0x2a */ +#define XGI_MISC_CTRL 0x2a +#define MOTION_VID_SUSPEND (1U<<7) +#define DVI_CRTC_TIMING_SEL (1U<<6) +#define LCD_SEL_CTL_NEW (1U<<5) +#define LCD_SEL_EXT_DELYCTRL (1U<<4) +#define REG_LCDDPARST (1U<<3) +#define LCD2DPAOFF (1U<<2) +/*#define RESERVED_1 (1U<<1) */ +#define EN_GEPWM (1U<<0) /* Enable GE power management */ + + +#define BASE_3D_ENG 0x2800 + #define M2REG_FLUSH_ENGINE_ADDRESS 0x000 #define M2REG_FLUSH_ENGINE_COMMAND 0x00 #define M2REG_FLUSH_FLIP_ENGINE_MASK (ONE_BIT_MASK<<21) @@ -133,114 +171,6 @@ static inline void dwWriteReg(struct drm_map * map, u32 addr, u32 data) } -static inline void xgi_enable_mmio(struct xgi_info * info) -{ - u8 protect = 0; - u8 temp; - - /* Unprotect registers */ - DRM_WRITE8(info->mmio_map, 0x3C4, 0x11); - protect = DRM_READ8(info->mmio_map, 0x3C5); - DRM_WRITE8(info->mmio_map, 0x3C5, 0x92); - - DRM_WRITE8(info->mmio_map, 0x3D4, 0x3A); - temp = DRM_READ8(info->mmio_map, 0x3D5); - DRM_WRITE8(info->mmio_map, 0x3D5, temp | 0x20); - - /* Enable MMIO */ - DRM_WRITE8(info->mmio_map, 0x3D4, 0x39); - temp = DRM_READ8(info->mmio_map, 0x3D5); - DRM_WRITE8(info->mmio_map, 0x3D5, temp | 0x01); - - /* Protect registers */ - OUT3C5B(info->mmio_map, 0x11, protect); -} - -static inline void xgi_disable_mmio(struct xgi_info * info) -{ - u8 protect = 0; - u8 temp; - - /* Unprotect registers */ - DRM_WRITE8(info->mmio_map, 0x3C4, 0x11); - protect = DRM_READ8(info->mmio_map, 0x3C5); - DRM_WRITE8(info->mmio_map, 0x3C5, 0x92); - - /* Disable MMIO access */ - DRM_WRITE8(info->mmio_map, 0x3D4, 0x39); - temp = DRM_READ8(info->mmio_map, 0x3D5); - DRM_WRITE8(info->mmio_map, 0x3D5, temp & 0xFE); - - /* Protect registers */ - OUT3C5B(info->mmio_map, 0x11, protect); -} - -static inline void xgi_enable_ge(struct xgi_info * info) -{ - unsigned char bOld3cf2a = 0; - int wait = 0; - - // Enable GE - OUT3C5B(info->mmio_map, 0x11, 0x92); - - // Save and close dynamic gating - bOld3cf2a = IN3CFB(info->mmio_map, 0x2a); - OUT3CFB(info->mmio_map, 0x2a, bOld3cf2a & 0xfe); - - // Reset both 3D and 2D engine - OUT3X5B(info->mmio_map, 0x36, 0x84); - wait = 10; - while (wait--) { - DRM_READ8(info->mmio_map, 0x36); - } - OUT3X5B(info->mmio_map, 0x36, 0x94); - wait = 10; - while (wait--) { - DRM_READ8(info->mmio_map, 0x36); - } - OUT3X5B(info->mmio_map, 0x36, 0x84); - wait = 10; - while (wait--) { - DRM_READ8(info->mmio_map, 0x36); - } - // Enable 2D engine only - OUT3X5B(info->mmio_map, 0x36, 0x80); - - // Enable 2D+3D engine - OUT3X5B(info->mmio_map, 0x36, 0x84); - - // Restore dynamic gating - OUT3CFB(info->mmio_map, 0x2a, bOld3cf2a); -} - -static inline void xgi_disable_ge(struct xgi_info * info) -{ - int wait = 0; - - // Reset both 3D and 2D engine - OUT3X5B(info->mmio_map, 0x36, 0x84); - - wait = 10; - while (wait--) { - DRM_READ8(info->mmio_map, 0x36); - } - OUT3X5B(info->mmio_map, 0x36, 0x94); - - wait = 10; - while (wait--) { - DRM_READ8(info->mmio_map, 0x36); - } - OUT3X5B(info->mmio_map, 0x36, 0x84); - - wait = 10; - while (wait--) { - DRM_READ8(info->mmio_map, 0x36); - } - - // Disable 2D engine only - OUT3X5B(info->mmio_map, 0x36, 0); -} - static inline void xgi_enable_dvi_interrupt(struct xgi_info * info) { OUT3CFB(info->mmio_map, 0x39, IN3CFB(info->mmio_map, 0x39) & ~0x01); //Set 3cf.39 bit 0 to 0 -- cgit v1.2.3 From be76f0eea9b455fde77e15ff35f4f00c70661e51 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 14 Aug 2007 13:19:48 -0700 Subject: Remove unused interrupt related functions. --- linux-core/xgi_regs.h | 24 ------------------------ 1 file changed, 24 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h index 7f643f92..098d824d 100644 --- a/linux-core/xgi_regs.h +++ b/linux-core/xgi_regs.h @@ -170,28 +170,4 @@ static inline void dwWriteReg(struct drm_map * map, u32 addr, u32 data) DRM_WRITE32(map, addr, data); } - -static inline void xgi_enable_dvi_interrupt(struct xgi_info * info) -{ - OUT3CFB(info->mmio_map, 0x39, IN3CFB(info->mmio_map, 0x39) & ~0x01); //Set 3cf.39 bit 0 to 0 - OUT3CFB(info->mmio_map, 0x39, IN3CFB(info->mmio_map, 0x39) | 0x01); //Set 3cf.39 bit 0 to 1 - OUT3CFB(info->mmio_map, 0x39, IN3CFB(info->mmio_map, 0x39) | 0x02); -} -static inline void xgi_disable_dvi_interrupt(struct xgi_info * info) -{ - OUT3CFB(info->mmio_map, 0x39, IN3CFB(info->mmio_map, 0x39) & ~0x02); -} - -static inline void xgi_enable_crt1_interrupt(struct xgi_info * info) -{ - OUT3CFB(info->mmio_map, 0x3d, IN3CFB(info->mmio_map, 0x3d) | 0x04); - OUT3CFB(info->mmio_map, 0x3d, IN3CFB(info->mmio_map, 0x3d) & ~0x04); - OUT3CFB(info->mmio_map, 0x3d, IN3CFB(info->mmio_map, 0x3d) | 0x08); -} - -static inline void xgi_disable_crt1_interrupt(struct xgi_info * info) -{ - OUT3CFB(info->mmio_map, 0x3d, IN3CFB(info->mmio_map, 0x3d) & ~0x08); -} - #endif -- cgit v1.2.3 From d3c8e98dd9ccc366513c117d032fbf80be4eb06a Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 14 Aug 2007 13:20:37 -0700 Subject: Move dwWriteReg to xgi_cmdlist.c, the only file where it is used. --- linux-core/xgi_cmdlist.c | 14 ++++++++++++++ linux-core/xgi_regs.h | 13 ------------- 2 files changed, 14 insertions(+), 13 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index a1ec5720..a728c0ef 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -34,6 +34,20 @@ static unsigned int get_batch_command(enum xgi_batch_type type); static void triggerHWCommandList(struct xgi_info * info); static void xgi_cmdlist_reset(struct xgi_info * info); + +/** + * Graphic engine register (2d/3d) acessing interface + */ +static inline void dwWriteReg(struct drm_map * map, u32 addr, u32 data) +{ +#ifdef XGI_MMIO_DEBUG + DRM_INFO("mmio_map->handle = 0x%p, addr = 0x%x, data = 0x%x\n", + map->handle, addr, data); +#endif + DRM_WRITE32(map, addr, data); +} + + int xgi_cmdlist_initialize(struct xgi_info * info, size_t size, struct drm_file * filp) { diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h index 098d824d..2f9fbe64 100644 --- a/linux-core/xgi_regs.h +++ b/linux-core/xgi_regs.h @@ -157,17 +157,4 @@ static inline u8 IN3CFB(struct drm_map * map, u8 index) return DRM_READ8(map, 0x3CF); } - -/* - * Graphic engine register (2d/3d) acessing interface - */ -static inline void dwWriteReg(struct drm_map * map, u32 addr, u32 data) -{ -#ifdef XGI_MMIO_DEBUG - DRM_INFO("mmio_map->handle = 0x%p, addr = 0x%x, data = 0x%x\n", - map->handle, addr, data); -#endif - DRM_WRITE32(map, addr, data); -} - #endif -- cgit v1.2.3 From 7b12174aacd09a991be3e74a3db47534961a6887 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 14 Aug 2007 13:24:02 -0700 Subject: Clean up remaining C++ style comments. --- linux-core/xgi_misc.c | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index be75e970..8232de9a 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -202,15 +202,18 @@ bool xgi_crt_irq_handler(struct xgi_info * info) bool ret = FALSE; u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce); - if (IN3CFB(info->mmio_map, 0x37) & 0x01) // CRT1 interrupt just happened - { + /* CRT1 interrupt just happened + */ + if (IN3CFB(info->mmio_map, 0x37) & 0x01) { u8 op3cf_3d; u8 op3cf_37; - // What happened? + /* What happened? + */ op3cf_37 = IN3CFB(info->mmio_map, 0x37); - // Clear CRT interrupt + /* Clear CRT interrupt + */ op3cf_3d = IN3CFB(info->mmio_map, 0x3d); OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d | 0x04)); OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d & ~0x04)); @@ -226,25 +229,30 @@ bool xgi_dvi_irq_handler(struct xgi_info * info) bool ret = FALSE; const u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce); - if (IN3CFB(info->mmio_map, 0x38) & 0x20) { // DVI interrupt just happened + /* DVI interrupt just happened + */ + if (IN3CFB(info->mmio_map, 0x38) & 0x20) { const u8 save_3x4 = DRM_READ8(info->mmio_map, 0x3d4); u8 op3cf_39; u8 op3cf_37; u8 op3x5_5a; - // What happened? + /* What happened? + */ op3cf_37 = IN3CFB(info->mmio_map, 0x37); - //Notify BIOS that DVI plug/unplug happened + /* Notify BIOS that DVI plug/unplug happened + */ op3x5_5a = IN3X5B(info->mmio_map, 0x5a); OUT3X5B(info->mmio_map, 0x5a, op3x5_5a & 0xf7); DRM_WRITE8(info->mmio_map, 0x3d4, save_3x4); - // Clear DVI interrupt + /* Clear DVI interrupt + */ op3cf_39 = IN3CFB(info->mmio_map, 0x39); - OUT3C5B(info->mmio_map, 0x39, (op3cf_39 & ~0x01)); //Set 3cf.39 bit 0 to 0 - OUT3C5B(info->mmio_map, 0x39, (op3cf_39 | 0x01)); //Set 3cf.39 bit 0 to 1 + OUT3C5B(info->mmio_map, 0x39, (op3cf_39 & ~0x01)); + OUT3C5B(info->mmio_map, 0x39, (op3cf_39 | 0x01)); ret = TRUE; } -- cgit v1.2.3 From f563a50d145848ed296b63c63422caff80232ddf Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 14 Aug 2007 13:44:51 -0700 Subject: Eliminate unused / useless ioctls. --- linux-core/xgi_drv.c | 6 ------ linux-core/xgi_drv.h | 14 +++----------- linux-core/xgi_misc.c | 33 --------------------------------- linux-core/xgi_pcie.c | 32 -------------------------------- 4 files changed, 3 insertions(+), 82 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index fa418c0d..0e77d4cd 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -41,15 +41,9 @@ static int xgi_bootstrap(struct drm_device *, void *, struct drm_file *); static struct drm_ioctl_desc xgi_ioctls[] = { DRM_IOCTL_DEF(DRM_XGI_BOOTSTRAP, xgi_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_XGI_ALLOC, xgi_alloc_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_XGI_FREE, xgi_free_ioctl, DRM_AUTH), - - DRM_IOCTL_DEF(DRM_XGI_GE_RESET, xgi_ge_reset_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_XGI_DUMP_REGISTER, xgi_dump_register_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_XGI_DEBUG_INFO, xgi_restore_registers_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_XGI_SUBMIT_CMDLIST, xgi_submit_cmdlist, DRM_AUTH), - DRM_IOCTL_DEF(DRM_XGI_TEST_RWINKERNEL, xgi_test_rwinkernel_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_XGI_STATE_CHANGE, xgi_state_change_ioctl, DRM_AUTH|DRM_MASTER), }; diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 9f746de2..194313cd 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -35,11 +35,11 @@ #define DRIVER_NAME "xgi" #define DRIVER_DESC "XGI XP5 / XP10 / XG47" -#define DRIVER_DATE "20070809" +#define DRIVER_DATE "20070814" #define DRIVER_MAJOR 0 -#define DRIVER_MINOR 11 -#define DRIVER_PATCHLEVEL 2 +#define DRIVER_MINOR 12 +#define DRIVER_PATCHLEVEL 0 #include "xgi_cmdlist.h" #include "xgi_drm.h" @@ -96,16 +96,8 @@ extern int xgi_alloc_ioctl(struct drm_device * dev, void * data, struct drm_file * filp); extern int xgi_free_ioctl(struct drm_device * dev, void * data, struct drm_file * filp); -extern int xgi_ge_reset_ioctl(struct drm_device * dev, void * data, - struct drm_file * filp); -extern int xgi_dump_register_ioctl(struct drm_device * dev, void * data, - struct drm_file * filp); -extern int xgi_restore_registers_ioctl(struct drm_device * dev, void * data, - struct drm_file * filp); extern int xgi_submit_cmdlist(struct drm_device * dev, void * data, struct drm_file * filp); -extern int xgi_test_rwinkernel_ioctl(struct drm_device * dev, void * data, - struct drm_file * filp); extern int xgi_state_change_ioctl(struct drm_device * dev, void * data, struct drm_file * filp); diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 8232de9a..50a721c0 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -29,18 +29,6 @@ #include -int xgi_ge_reset_ioctl(struct drm_device * dev, void * data, - struct drm_file * filp) -{ - struct xgi_info *info = dev->dev_private; - - xgi_disable_ge(info); - xgi_enable_ge(info); - - return 0; -} - - /* * irq functions */ @@ -324,27 +312,6 @@ void xgi_dump_register(struct xgi_info * info) } -int xgi_dump_register_ioctl(struct drm_device * dev, void * data, - struct drm_file * filp) -{ - struct xgi_info *info = dev->dev_private; - - xgi_dump_register(info); - return 0; -} - - -int xgi_restore_registers_ioctl(struct drm_device * dev, void * data, - struct drm_file * filp) -{ - struct xgi_info *info = dev->dev_private; - - OUT3X5B(info->mmio_map, 0x13, 0); - OUT3X5B(info->mmio_map, 0x8b, 2); - return 0; -} - - #define WHOLD_GE_STATUS 0x2800 /* Test everything except the "whole GE busy" bit, the "master engine busy" diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index a0f52740..b4d204c1 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -157,35 +157,3 @@ void *xgi_find_pcie_virt(struct xgi_info * info, u32 address) return ((u8 *) info->dev->sg->virtual) + offset; } - -/* - address -- GE hw address -*/ -int xgi_test_rwinkernel_ioctl(struct drm_device * dev, void * data, - struct drm_file * filp) -{ - struct xgi_info *info = dev->dev_private; - u32 address = *(u32 *) data; - u32 *virtaddr = 0; - - - DRM_INFO("input GE HW addr is 0x%x\n", address); - - if (address == 0) { - return -EFAULT; - } - - virtaddr = (u32 *)xgi_find_pcie_virt(info, address); - - DRM_INFO("convert to CPU virt addr 0x%p\n", virtaddr); - - if (virtaddr != NULL) { - DRM_INFO("original [virtaddr] = 0x%x\n", *virtaddr); - *virtaddr = 0x00f00fff; - DRM_INFO("modified [virtaddr] = 0x%x\n", *virtaddr); - } else { - return -EFAULT; - } - - return 0; -} -- cgit v1.2.3 From c3faa589b09616acdfd827be1719f6c2706c49ab Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 15 Aug 2007 13:36:54 +1000 Subject: nouveau: Allow GART notifiers when using sgdma code. --- linux-core/nouveau_sgdma.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'linux-core') diff --git a/linux-core/nouveau_sgdma.c b/linux-core/nouveau_sgdma.c index df970d11..97d5330b 100644 --- a/linux-core/nouveau_sgdma.c +++ b/linux-core/nouveau_sgdma.c @@ -316,3 +316,20 @@ nouveau_sgdma_nottm_hack_takedown(struct drm_device *dev) { } +int +nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; + int pte; + + pte = (offset >> NV_CTXDMA_PAGE_SHIFT); + if (dev_priv->card_type < NV_50) { + *page = INSTANCE_RD(gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK; + return 0; + } + + DRM_ERROR("Unimplemented on NV50\n"); + return -EINVAL; +} + -- cgit v1.2.3 From b668d6d9050106bebfb704e4ed32d2924bb26371 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Wed, 15 Aug 2007 14:29:31 -0700 Subject: Fix dev->agp->base initialization on BSD, and fix addmap range check on Linux. With the previous linux commit, an AGP aperture at the end of the address space would have wrapped to 0 and the test would have failed. --- linux-core/drm_bufs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c index f0b28fa1..60eca60c 100644 --- a/linux-core/drm_bufs.c +++ b/linux-core/drm_bufs.c @@ -231,7 +231,7 @@ static int drm_addmap_core(struct drm_device *dev, unsigned int offset, */ if (map->offset < dev->agp->base || map->offset > dev->agp->base + - dev->agp->agp_info.aper_size * 1024 * 1024) { + dev->agp->agp_info.aper_size * 1024 * 1024 - 1) { map->offset += dev->agp->base; } map->mtrr = dev->agp->agp_mtrr; /* for getmap */ -- cgit v1.2.3 From d8a800b63de09f41d482d2b3367e4da67ed0f92b Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Wed, 15 Aug 2007 21:05:26 -0700 Subject: Implement fence support. --- linux-core/Makefile.kernel | 3 +- linux-core/xgi_cmdlist.c | 76 ++++++++++++++++++++++++++++++++++++---------- linux-core/xgi_cmdlist.h | 2 ++ linux-core/xgi_drv.c | 18 +++++++++++ linux-core/xgi_drv.h | 13 +++++++- linux-core/xgi_regs.h | 9 ++++++ 6 files changed, 103 insertions(+), 18 deletions(-) (limited to 'linux-core') diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index c898206d..c651b0b2 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -38,7 +38,8 @@ via-objs := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o \ via_video.o via_dmablit.o via_fence.o via_buffer.o mach64-objs := mach64_drv.o mach64_dma.o mach64_irq.o mach64_state.o nv-objs := nv_drv.o -xgi-objs := xgi_cmdlist.o xgi_drv.o xgi_fb.o xgi_misc.o xgi_pcie.o +xgi-objs := xgi_cmdlist.o xgi_drv.o xgi_fb.o xgi_misc.o xgi_pcie.o \ + xgi_fence.o ifeq ($(CONFIG_COMPAT),y) drm-objs += drm_ioc32.o diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index a728c0ef..5409892a 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -28,8 +28,10 @@ #include "xgi_regs.h" #include "xgi_misc.h" #include "xgi_cmdlist.h" +#include -static void xgi_emit_flush(struct xgi_info * info, bool link); +static void xgi_emit_flush(struct xgi_info * info, bool stop); +static void xgi_emit_nop(struct xgi_info * info); static unsigned int get_batch_command(enum xgi_batch_type type); static void triggerHWCommandList(struct xgi_info * info); static void xgi_cmdlist_reset(struct xgi_info * info); @@ -101,7 +103,7 @@ int xgi_submit_cmdlist(struct drm_device * dev, void * data, begin[0] = (cmd << 24) | BEGIN_VALID_MASK - | (BEGIN_BEGIN_IDENTIFICATION_MASK & pCmdInfo->id); + | (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence); begin[1] = BEGIN_LINK_ENABLE_MASK | pCmdInfo->size; begin[2] = pCmdInfo->hw_addr >> 4; begin[3] = 0; @@ -134,19 +136,20 @@ int xgi_submit_cmdlist(struct drm_device * dev, void * data, DRM_DEBUG("info->cmdring.last_ptr != NULL\n"); if (pCmdInfo->type == BTYPE_3D) { - xgi_emit_flush(info, TRUE); + xgi_emit_flush(info, FALSE); } info->cmdring.last_ptr[1] = begin[1]; info->cmdring.last_ptr[2] = begin[2]; info->cmdring.last_ptr[3] = begin[3]; - wmb(); + DRM_WRITEMEMORYBARRIER(); info->cmdring.last_ptr[0] = begin[0]; triggerHWCommandList(info); } info->cmdring.last_ptr = xgi_find_pcie_virt(info, pCmdInfo->hw_addr); + drm_fence_flush_old(info->dev, 0, info->next_sequence); return 0; } @@ -213,9 +216,11 @@ void xgi_cmdlist_cleanup(struct xgi_info * info) */ if (info->cmdring.last_ptr != NULL) { xgi_emit_flush(info, FALSE); - xgi_waitfor_pci_idle(info); + xgi_emit_nop(info); } + xgi_waitfor_pci_idle(info); + (void) memset(&info->cmdring, 0, sizeof(info->cmdring)); } } @@ -233,23 +238,25 @@ static void triggerHWCommandList(struct xgi_info * info) /** * Emit a flush to the CRTL command stream. * @info XGI info structure - * @link Emit (or don't emit) link information at start of flush command. * * This function assumes info->cmdring.ptr is non-NULL. */ -static void xgi_emit_flush(struct xgi_info * info, bool link) +void xgi_emit_flush(struct xgi_info * info, bool stop) { - static const u32 flush_command[8] = { - (0x10 << 24), + const u32 flush_command[8] = { + ((0x10 << 24) + | (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence)), BEGIN_LINK_ENABLE_MASK | (0x00004), 0x00000000, 0x00000000, - /* Flush everything with the default 32 clock delay. + /* Flush the 2D engine with the default 32 clock delay. */ - 0x003fffff, 0x003fffff, 0x003fffff, 0x003fffff + M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK, + M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK, + M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK, + M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK, }; - const unsigned int base = (link) ? 0 : 4; - const unsigned int flush_size = (8 - base) * sizeof(u32); + const unsigned int flush_size = sizeof(flush_command); u32 *batch_addr; u32 hw_addr; @@ -263,17 +270,54 @@ static void xgi_emit_flush(struct xgi_info * info, bool link) batch_addr = info->cmdring.ptr + (info->cmdring.ring_offset / 4); - (void) memcpy(batch_addr, & flush_command[base], flush_size); + (void) memcpy(batch_addr, flush_command, flush_size); + + if (stop) { + *batch_addr |= BEGIN_STOP_STORE_CURRENT_POINTER_MASK; + } info->cmdring.last_ptr[1] = BEGIN_LINK_ENABLE_MASK | (flush_size / 4); info->cmdring.last_ptr[2] = hw_addr >> 4; info->cmdring.last_ptr[3] = 0; - wmb(); + DRM_WRITEMEMORYBARRIER(); info->cmdring.last_ptr[0] = (get_batch_command(BTYPE_CTRL) << 24) | (BEGIN_VALID_MASK); triggerHWCommandList(info); info->cmdring.ring_offset += flush_size; - info->cmdring.last_ptr = (link) ? batch_addr : NULL; + info->cmdring.last_ptr = batch_addr; +} + + +/** + * Emit an empty command to the CRTL command stream. + * @info XGI info structure + * + * This function assumes info->cmdring.ptr is non-NULL. In addition, since + * this function emits a command that does not have linkage information, + * it sets info->cmdring.ptr to NULL. + */ +void xgi_emit_nop(struct xgi_info * info) +{ + info->cmdring.last_ptr[1] = BEGIN_LINK_ENABLE_MASK + | (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence); + info->cmdring.last_ptr[2] = 0; + info->cmdring.last_ptr[3] = 0; + DRM_WRITEMEMORYBARRIER(); + info->cmdring.last_ptr[0] = (get_batch_command(BTYPE_CTRL) << 24) + | (BEGIN_VALID_MASK); + + triggerHWCommandList(info); + + info->cmdring.last_ptr = NULL; +} + + +void xgi_emit_irq(struct xgi_info * info) +{ + if (info->cmdring.last_ptr == NULL) + return; + + xgi_emit_flush(info, TRUE); } diff --git a/linux-core/xgi_cmdlist.h b/linux-core/xgi_cmdlist.h index dc3fbe5a..f6f1c1ef 100644 --- a/linux-core/xgi_cmdlist.h +++ b/linux-core/xgi_cmdlist.h @@ -61,4 +61,6 @@ extern int xgi_state_change(struct xgi_info * info, unsigned int to, extern void xgi_cmdlist_cleanup(struct xgi_info * info); +extern void xgi_emit_irq(struct xgi_info * info); + #endif /* _XGI_CMDLIST_H_ */ diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 0e77d4cd..241cd39f 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -37,6 +37,17 @@ static struct pci_device_id pciidlist[] = { xgi_PCI_IDS }; +static struct drm_fence_driver xgi_fence_driver = { + .num_classes = 1, + .wrap_diff = BEGIN_BEGIN_IDENTIFICATION_MASK, + .flush_diff = BEGIN_BEGIN_IDENTIFICATION_MASK - 1, + .sequence_mask = BEGIN_BEGIN_IDENTIFICATION_MASK, + .lazy_capable = 1, + .emit = xgi_fence_emit_sequence, + .poke_flush = xgi_poke_flush, + .has_irq = xgi_fence_has_irq +}; + static int xgi_bootstrap(struct drm_device *, void *, struct drm_file *); static struct drm_ioctl_desc xgi_ioctls[] = { @@ -95,6 +106,8 @@ static struct drm_driver driver = { .remove = __devexit_p(drm_cleanup_pci), }, + .fence_driver = &xgi_fence_driver, + .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, @@ -189,6 +202,10 @@ int xgi_bootstrap(struct drm_device * dev, void * data, int err; + spin_lock_init(&info->fence_lock); + info->next_sequence = 0; + info->complete_sequence = 0; + if (info->mmio_map == NULL) { err = drm_addmap(dev, info->mmio.base, info->mmio.size, _DRM_REGISTERS, _DRM_KERNEL, @@ -344,6 +361,7 @@ irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS) DRM_WRITE32(info->mmio_map, 0x2800 + M2REG_AUTO_LINK_SETTING_ADDRESS, M2REG_AUTO_LINK_SETTING_COMMAND | irq_bits); + xgi_fence_handler(dev); return IRQ_HANDLED; } else { return IRQ_NONE; diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 194313cd..c815f63e 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -38,7 +38,7 @@ #define DRIVER_DATE "20070814" #define DRIVER_MAJOR 0 -#define DRIVER_MINOR 12 +#define DRIVER_MINOR 13 #define DRIVER_PATCHLEVEL 0 #include "xgi_cmdlist.h" @@ -72,6 +72,10 @@ struct xgi_info { bool pcie_heap_initialized; struct xgi_cmdring_info cmdring; + + spinlock_t fence_lock; + unsigned complete_sequence; + unsigned next_sequence; }; extern int xgi_fb_heap_init(struct xgi_info * info); @@ -92,6 +96,13 @@ extern void xgi_disable_mmio(struct xgi_info * info); extern void xgi_enable_ge(struct xgi_info * info); extern void xgi_disable_ge(struct xgi_info * info); +extern void xgi_poke_flush(struct drm_device * dev, uint32_t class); +extern int xgi_fence_emit_sequence(struct drm_device * dev, uint32_t class, + uint32_t flags, uint32_t * sequence, uint32_t * native_type); +extern void xgi_fence_handler(struct drm_device * dev); +extern int xgi_fence_has_irq(struct drm_device *dev, uint32_t class, + uint32_t flags); + extern int xgi_alloc_ioctl(struct drm_device * dev, void * data, struct drm_file * filp); extern int xgi_free_ioctl(struct drm_device * dev, void * data, diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h index 2f9fbe64..5c0100a0 100644 --- a/linux-core/xgi_regs.h +++ b/linux-core/xgi_regs.h @@ -83,6 +83,14 @@ #define M2REG_FLUSH_2D_ENGINE_MASK (ONE_BIT_MASK<<20) #define M2REG_FLUSH_3D_ENGINE_MASK TWENTY_BIT_MASK +#define M2REG_RESET_ADDRESS 0x004 +#define M2REG_RESET_COMMAND 0x01 +#define M2REG_RESET_STATUS2_MASK (ONE_BIT_MASK<<10) +#define M2REG_RESET_STATUS1_MASK (ONE_BIT_MASK<<9) +#define M2REG_RESET_STATUS0_MASK (ONE_BIT_MASK<<8) +#define M2REG_RESET_3DENG_MASK (ONE_BIT_MASK<<4) +#define M2REG_RESET_2DENG_MASK (ONE_BIT_MASK<<2) + /* Write register */ #define M2REG_AUTO_LINK_SETTING_ADDRESS 0x010 #define M2REG_AUTO_LINK_SETTING_COMMAND 0x04 @@ -110,6 +118,7 @@ /** * Begin instruction, double-word 0 */ +#define BEGIN_STOP_STORE_CURRENT_POINTER_MASK (ONE_BIT_MASK<<22) #define BEGIN_VALID_MASK (ONE_BIT_MASK<<20) #define BEGIN_BEGIN_IDENTIFICATION_MASK TWENTY_BIT_MASK -- cgit v1.2.3 From 0d3c741df19c35307723422c1f2f28a23995823d Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 16 Aug 2007 13:43:04 -0700 Subject: Forgot to add this file on the last commit. --- linux-core/xgi_fence.c | 125 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 125 insertions(+) create mode 100644 linux-core/xgi_fence.c (limited to 'linux-core') diff --git a/linux-core/xgi_fence.c b/linux-core/xgi_fence.c new file mode 100644 index 00000000..e5b545de --- /dev/null +++ b/linux-core/xgi_fence.c @@ -0,0 +1,125 @@ +/* + * (C) Copyright IBM Corporation 2007 + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * on the rights to use, copy, modify, merge, publish, distribute, sub + * license, and/or sell copies of the Software, and to permit persons to whom + * the Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Ian Romanick + */ + +#include "xgi_drv.h" +#include "xgi_regs.h" +#include "xgi_misc.h" +#include "xgi_cmdlist.h" + +static uint32_t xgi_do_flush(struct drm_device * dev, uint32_t class) +{ + struct xgi_info * info = dev->dev_private; + struct drm_fence_class_manager * fc = &dev->fm.class[class]; + uint32_t pending_flush_types = 0; + uint32_t signaled_flush_types = 0; + + + if ((info == NULL) || (class != 0)) + return 0; + + spin_lock(&info->fence_lock); + + pending_flush_types = fc->pending_flush | + ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0); + + if (pending_flush_types) { + if (pending_flush_types & DRM_FENCE_TYPE_EXE) { + const u32 begin_id = DRM_READ32(info->mmio_map, + 0x2820) + & BEGIN_BEGIN_IDENTIFICATION_MASK; + + if (begin_id != info->complete_sequence) { + info->complete_sequence = begin_id; + signaled_flush_types |= DRM_FENCE_TYPE_EXE; + } + } + + if (signaled_flush_types) { + drm_fence_handler(dev, 0, info->complete_sequence, + signaled_flush_types); + } + } + + spin_unlock(&info->fence_lock); + + return fc->pending_flush | + ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0); +} + + +int xgi_fence_emit_sequence(struct drm_device * dev, uint32_t class, + uint32_t flags, uint32_t * sequence, + uint32_t * native_type) +{ + struct xgi_info * info = dev->dev_private; + + if ((info == NULL) || (class != 0)) + return -EINVAL; + + + spin_lock(&info->fence_lock); + info->next_sequence++; + if (info->next_sequence > BEGIN_BEGIN_IDENTIFICATION_MASK) { + info->next_sequence = 1; + } + spin_unlock(&info->fence_lock); + + + *sequence = (uint32_t) info->next_sequence; + *native_type = DRM_FENCE_TYPE_EXE; + + return 0; +} + + +void xgi_poke_flush(struct drm_device * dev, uint32_t class) +{ + struct drm_fence_manager * fm = &dev->fm; + unsigned long flags; + + + write_lock_irqsave(&fm->lock, flags); + xgi_do_flush(dev, class); + write_unlock_irqrestore(&fm->lock, flags); +} + + +void xgi_fence_handler(struct drm_device * dev) +{ + struct drm_fence_manager * fm = &dev->fm; + + + write_lock(&fm->lock); + xgi_do_flush(dev, 0); + write_unlock(&fm->lock); +} + + +int xgi_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags) +{ + return ((class == 0) && (flags == DRM_FENCE_TYPE_EXE)) ? 1 : 0; +} -- cgit v1.2.3 From 3383e8bd6bcd2323c81252e617c8522593baf818 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 17 Aug 2007 10:53:18 -0700 Subject: Remove unnecessary include. --- linux-core/xgi_cmdlist.c | 1 - 1 file changed, 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 5409892a..261f4e13 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -28,7 +28,6 @@ #include "xgi_regs.h" #include "xgi_misc.h" #include "xgi_cmdlist.h" -#include static void xgi_emit_flush(struct xgi_info * info, bool stop); static void xgi_emit_nop(struct xgi_info * info); -- cgit v1.2.3 From 589707b765eee78cc278c10603e2c858bb819436 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 28 Aug 2007 15:17:11 +1000 Subject: drm: remove XFREE86_VERSION macros --- linux-core/i810_drm.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'linux-core') diff --git a/linux-core/i810_drm.h b/linux-core/i810_drm.h index eff61b4d..d803aeca 100644 --- a/linux-core/i810_drm.h +++ b/linux-core/i810_drm.h @@ -102,13 +102,8 @@ typedef enum _drm_i810_init_func { /* This is the init structure after v1.2 */ typedef struct _drm_i810_init { drm_i810_init_func_t func; -#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0) - int ring_map_idx; - int buffer_map_idx; -#else unsigned int mmio_offset; unsigned int buffers_offset; -#endif int sarea_priv_offset; unsigned int ring_start; unsigned int ring_end; -- cgit v1.2.3 From 2bcd5b5e330843e1e1a5f0a19105ecd33e76b00b Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Wed, 29 Aug 2007 00:04:18 -0700 Subject: Use DRM_SPINLOCK / DRM_UNSPINLOCK macros. --- linux-core/xgi_drv.c | 2 +- linux-core/xgi_drv.h | 2 +- linux-core/xgi_fence.c | 8 ++++---- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 241cd39f..6b576558 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -202,7 +202,7 @@ int xgi_bootstrap(struct drm_device * dev, void * data, int err; - spin_lock_init(&info->fence_lock); + DRM_SPINLOCK_INIT(&info->fence_lock); info->next_sequence = 0; info->complete_sequence = 0; diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index c815f63e..d43a6b4e 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -73,7 +73,7 @@ struct xgi_info { struct xgi_cmdring_info cmdring; - spinlock_t fence_lock; + DRM_SPINTYPE fence_lock; unsigned complete_sequence; unsigned next_sequence; }; diff --git a/linux-core/xgi_fence.c b/linux-core/xgi_fence.c index e5b545de..42ed814d 100644 --- a/linux-core/xgi_fence.c +++ b/linux-core/xgi_fence.c @@ -41,7 +41,7 @@ static uint32_t xgi_do_flush(struct drm_device * dev, uint32_t class) if ((info == NULL) || (class != 0)) return 0; - spin_lock(&info->fence_lock); + DRM_SPINLOCK(&info->fence_lock); pending_flush_types = fc->pending_flush | ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0); @@ -64,7 +64,7 @@ static uint32_t xgi_do_flush(struct drm_device * dev, uint32_t class) } } - spin_unlock(&info->fence_lock); + DRM_SPINUNLOCK(&info->fence_lock); return fc->pending_flush | ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0); @@ -81,12 +81,12 @@ int xgi_fence_emit_sequence(struct drm_device * dev, uint32_t class, return -EINVAL; - spin_lock(&info->fence_lock); + DRM_SPINLOCK(&info->fence_lock); info->next_sequence++; if (info->next_sequence > BEGIN_BEGIN_IDENTIFICATION_MASK) { info->next_sequence = 1; } - spin_unlock(&info->fence_lock); + DRM_SPINUNLOCK(&info->fence_lock); *sequence = (uint32_t) info->next_sequence; -- cgit v1.2.3 From c46ffd6b2943332a88589fb525305ffd09d35b8d Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Wed, 29 Aug 2007 00:23:30 -0700 Subject: Fix late night dumb-dumb mistake. --- linux-core/xgi_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 6b576558..4b90579e 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -202,7 +202,7 @@ int xgi_bootstrap(struct drm_device * dev, void * data, int err; - DRM_SPINLOCK_INIT(&info->fence_lock); + DRM_SPININIT(&info->fence_lock, "fence lock"); info->next_sequence = 0; info->complete_sequence = 0; -- cgit v1.2.3 From 9c5b9d458bc618fb9d7d8590c866655e92f9cb0b Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Wed, 29 Aug 2007 14:41:49 -0700 Subject: Use ati_pcigart for PCI-e GART table handling. --- linux-core/xgi_drv.c | 2 +- linux-core/xgi_drv.h | 3 +- linux-core/xgi_pcie.c | 89 ++++++++++++++++----------------------------------- 3 files changed, 30 insertions(+), 64 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 4b90579e..84547f62 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -306,7 +306,7 @@ void xgi_driver_lastclose(struct drm_device * dev) info->fb_map = NULL; if (info->pcie_heap_initialized) { - xgi_pcie_lut_cleanup(info); + drm_ati_pcigart_cleanup(dev, &info->gart_info); } if (info->fb_heap_initialized diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index d43a6b4e..f2768d1b 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -64,7 +64,7 @@ struct xgi_info { struct drm_map *fb_map; /* look up table parameters */ - struct drm_dma_handle *lut_handle; + struct ati_pcigart_info gart_info; unsigned int lutPageSize; struct drm_sman sman; @@ -87,7 +87,6 @@ extern int xgi_free(struct xgi_info * info, unsigned long index, struct drm_file * filp); extern int xgi_pcie_heap_init(struct xgi_info * info); -extern void xgi_pcie_lut_cleanup(struct xgi_info * info); extern void *xgi_find_pcie_virt(struct xgi_info * info, u32 address); diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index b4d204c1..a7d3ea24 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -28,15 +28,31 @@ #include "xgi_regs.h" #include "xgi_misc.h" -static int xgi_pcie_lut_init(struct xgi_info * info) +void xgi_gart_flush(struct drm_device *dev) +{ + struct xgi_info *const info = dev->dev_private; + u8 temp; + + DRM_MEMORYBARRIER(); + + /* Set GART in SFB */ + temp = DRM_READ8(info->mmio_map, 0xB00C); + DRM_WRITE8(info->mmio_map, 0xB00C, temp & ~0x02); + + /* Set GART base address to HW */ + DRM_WRITE32(info->mmio_map, 0xB034, info->gart_info.bus_addr); + + /* Flush GART table. */ + DRM_WRITE8(info->mmio_map, 0xB03F, 0x40); + DRM_WRITE8(info->mmio_map, 0xB03F, 0x00); +} + + +int xgi_pcie_heap_init(struct xgi_info * info) { u8 temp = 0; int err; - unsigned i; struct drm_scatter_gather request; - struct drm_sg_mem *sg; - u32 *lut; - /* Get current FB aperture size */ temp = IN3X5B(info->mmio_map, 0x27); @@ -70,73 +86,24 @@ static int xgi_pcie_lut_init(struct xgi_info * info) return err; } - sg = info->dev->sg; + info->gart_info.gart_table_location = DRM_ATI_GART_MAIN; + info->gart_info.gart_reg_if = DRM_ATI_GART_PCI; + info->gart_info.table_size = info->dev->sg->pages * sizeof(u32); - info->lut_handle = drm_pci_alloc(info->dev, - sizeof(u32) * sg->pages, - PAGE_SIZE, - DMA_31BIT_MASK); - if (info->lut_handle == NULL) { - DRM_ERROR("cannot allocate PCIE lut page!\n"); + if (!drm_ati_pcigart_init(info->dev, &info->gart_info)) { + DRM_ERROR("failed to init PCI GART!\n"); return -ENOMEM; } - lut = info->lut_handle->vaddr; - for (i = 0; i < sg->pages; i++) { - info->dev->sg->busaddr[i] = pci_map_page(info->dev->pdev, - sg->pagelist[i], - 0, - PAGE_SIZE, - DMA_BIDIRECTIONAL); - if (dma_mapping_error(info->dev->sg->busaddr[i])) { - DRM_ERROR("cannot map GART backing store for DMA!\n"); - return info->dev->sg->busaddr[i]; - } - - lut[i] = info->dev->sg->busaddr[i]; - } - - DRM_MEMORYBARRIER(); - - /* Set GART in SFB */ - temp = DRM_READ8(info->mmio_map, 0xB00C); - DRM_WRITE8(info->mmio_map, 0xB00C, temp & ~0x02); - - /* Set GART base address to HW */ - DRM_WRITE32(info->mmio_map, 0xB034, info->lut_handle->busaddr); - - /* Flush GART table. */ - DRM_WRITE8(info->mmio_map, 0xB03F, 0x40); - DRM_WRITE8(info->mmio_map, 0xB03F, 0x00); - - return 0; -} - -void xgi_pcie_lut_cleanup(struct xgi_info * info) -{ - if (info->lut_handle) { - drm_pci_free(info->dev, info->lut_handle); - info->lut_handle = NULL; - } -} - -int xgi_pcie_heap_init(struct xgi_info * info) -{ - int err; - - err = xgi_pcie_lut_init(info); - if (err) { - DRM_ERROR("xgi_pcie_lut_init failed\n"); - return err; - } + xgi_gart_flush(info->dev); mutex_lock(&info->dev->struct_mutex); err = drm_sman_set_range(&info->sman, XGI_MEMLOC_NON_LOCAL, 0, info->pcie.size); mutex_unlock(&info->dev->struct_mutex); if (err) { - xgi_pcie_lut_cleanup(info); + drm_ati_pcigart_cleanup(info->dev, &info->gart_info); } info->pcie_heap_initialized = (err == 0); -- cgit v1.2.3 From bb3da88601749cd647632eed86fb57dfd7cb81ee Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Fri, 31 Aug 2007 10:48:13 -0700 Subject: Acutally emit the IRQ (duh) when setting the fence post. --- linux-core/xgi_fence.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/xgi_fence.c b/linux-core/xgi_fence.c index 42ed814d..adedf300 100644 --- a/linux-core/xgi_fence.c +++ b/linux-core/xgi_fence.c @@ -87,7 +87,9 @@ int xgi_fence_emit_sequence(struct drm_device * dev, uint32_t class, info->next_sequence = 1; } DRM_SPINUNLOCK(&info->fence_lock); - + + + xgi_emit_irq(info); *sequence = (uint32_t) info->next_sequence; *native_type = DRM_FENCE_TYPE_EXE; -- cgit v1.2.3 From c597bd57eee3ea05a3b8c851615c7351d0b32fce Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Thu, 6 Sep 2007 15:20:52 -0700 Subject: Bump version to 1.0.0. --- linux-core/xgi_drv.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index f2768d1b..88ade64d 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -35,10 +35,10 @@ #define DRIVER_NAME "xgi" #define DRIVER_DESC "XGI XP5 / XP10 / XG47" -#define DRIVER_DATE "20070814" +#define DRIVER_DATE "20070906" -#define DRIVER_MAJOR 0 -#define DRIVER_MINOR 13 +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 0 #define DRIVER_PATCHLEVEL 0 #include "xgi_cmdlist.h" -- cgit v1.2.3 From c453135789597648ef5aa641c4e59bb5b5e320de Mon Sep 17 00:00:00 2001 From: Brian Date: Wed, 12 Sep 2007 11:48:48 -0600 Subject: Added idr_replace() function which was apparently added in Linux 2.6.18 Someone should probably double-check my work here since this is the first time I've touched drm_compat.[ch] --- linux-core/drm_compat.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++ linux-core/drm_compat.h | 5 +++++ 2 files changed, 52 insertions(+) (limited to 'linux-core') diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 9a6da7e9..e51aedb7 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -678,4 +678,51 @@ void idr_remove_all(struct idr *idp) idp->layers = 0; } EXPORT_SYMBOL(idr_remove_all); + +#endif /* DRM_IDR_COMPAT_FN */ + + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) +/** + * idr_replace - replace pointer for given id + * @idp: idr handle + * @ptr: pointer you want associated with the id + * @id: lookup key + * + * Replace the pointer registered with an id and return the old value. + * A -ENOENT return indicates that @id was not found. + * A -EINVAL return indicates that @id was not within valid constraints. + * + * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove(). + */ +void *idr_replace(struct idr *idp, void *ptr, int id) +{ + int n; + struct idr_layer *p, *old_p; + + n = idp->layers * IDR_BITS; + p = idp->top; + + id &= MAX_ID_MASK; + + if (id >= (1 << n)) + return ERR_PTR(-EINVAL); + + n -= IDR_BITS; + while ((n > 0) && p) { + p = p->ary[(id >> n) & IDR_MASK]; + n -= IDR_BITS; + } + + n = id & IDR_MASK; + if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) + return ERR_PTR(-ENOENT); + + old_p = p->ary[n]; + p->ary[n] = ptr; + + return (void *)old_p; +} +EXPORT_SYMBOL(idr_replace); #endif diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h index 0b00ba47..94db8533 100644 --- a/linux-core/drm_compat.h +++ b/linux-core/drm_compat.h @@ -316,4 +316,9 @@ int idr_for_each(struct idr *idp, void idr_remove_all(struct idr *idp); #endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) +void *idr_replace(struct idr *idp, void *ptr, int id); +#endif + #endif -- cgit v1.2.3 From 41345b95a2cdc1e509171d31fc8aed8cecb43dbd Mon Sep 17 00:00:00 2001 From: Brian Date: Wed, 12 Sep 2007 12:05:15 -0600 Subject: Added bool typedef added in kernel 2.6.19 This allows the xgi code to compile with older kernels. --- linux-core/drm_compat.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'linux-core') diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h index 94db8533..870f8b73 100644 --- a/linux-core/drm_compat.h +++ b/linux-core/drm_compat.h @@ -321,4 +321,8 @@ void idr_remove_all(struct idr *idp); void *idr_replace(struct idr *idp, void *ptr, int id); #endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) +typedef _Bool bool; +#endif + #endif -- cgit v1.2.3 From e7d4a26913ba3a4949ac36280925062948ee21ce Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 18 Sep 2007 11:03:08 -0700 Subject: Fix ioc32 compat layer Previously any ioctls that weren't explicitly listed in the compat ioctl table would fail with ENOTTY. If the incoming ioctl number is outside the range of the table, assume that it Just Works, and pass it off to drm_ioctl. This make the fence related ioctls work on 64-bit PowerPC. --- linux-core/drm_ioc32.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/drm_ioc32.c b/linux-core/drm_ioc32.c index 558376de..0188154e 100644 --- a/linux-core/drm_ioc32.c +++ b/linux-core/drm_ioc32.c @@ -1051,8 +1051,13 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) drm_ioctl_compat_t *fn; int ret; + + /* Assume that ioctls without an explicit compat routine will "just + * work". This may not always be a good assumption, but it's better + * than always failing. + */ if (nr >= DRM_ARRAY_SIZE(drm_compat_ioctls)) - return -ENOTTY; + return drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); fn = drm_compat_ioctls[nr]; -- cgit v1.2.3 From a3881ad2fef99aaf0a863609a847020ea822798c Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Tue, 18 Sep 2007 11:03:49 -0700 Subject: Add ioc32 compat layer for XGI DRM. --- linux-core/Makefile.kernel | 3 +- linux-core/xgi_drv.c | 5 +- linux-core/xgi_drv.h | 7 ++- linux-core/xgi_fb.c | 7 +++ linux-core/xgi_ioc32.c | 140 +++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 158 insertions(+), 4 deletions(-) create mode 100644 linux-core/xgi_ioc32.c (limited to 'linux-core') diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index ac77941e..b282bd05 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -48,6 +48,7 @@ mga-objs += mga_ioc32.o r128-objs += r128_ioc32.o i915-objs += i915_ioc32.o nouveau-objs += nouveau_ioc32.o +xgi-objs += xgi_ioc32.o endif obj-m += drm.o @@ -64,4 +65,4 @@ obj-$(CONFIG_DRM_VIA) += via.o obj-$(CONFIG_DRM_MACH64)+= mach64.o obj-$(CONFIG_DRM_NV) += nv.o obj-$(CONFIG_DRM_NOUVEAU) += nouveau.o -obj-$(CONFIG_DRM_XGI) += xgi.o \ No newline at end of file +obj-$(CONFIG_DRM_XGI) += xgi.o diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 84547f62..bc6873a9 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -48,7 +48,7 @@ static struct drm_fence_driver xgi_fence_driver = { .has_irq = xgi_fence_has_irq }; -static int xgi_bootstrap(struct drm_device *, void *, struct drm_file *); +int xgi_bootstrap(struct drm_device *, void *, struct drm_file *); static struct drm_ioctl_desc xgi_ioctls[] = { DRM_IOCTL_DEF(DRM_XGI_BOOTSTRAP, xgi_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), @@ -97,6 +97,9 @@ static struct drm_driver driver = { .mmap = drm_mmap, .poll = drm_poll, .fasync = drm_fasync, +#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9) + .compat_ioctl = xgi_compat_ioctl, +#endif }, .pci_driver = { diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 88ade64d..a68dc03b 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -35,10 +35,10 @@ #define DRIVER_NAME "xgi" #define DRIVER_DESC "XGI XP5 / XP10 / XG47" -#define DRIVER_DATE "20070906" +#define DRIVER_DATE "20070918" #define DRIVER_MAJOR 1 -#define DRIVER_MINOR 0 +#define DRIVER_MINOR 1 #define DRIVER_PATCHLEVEL 0 #include "xgi_cmdlist.h" @@ -78,6 +78,9 @@ struct xgi_info { unsigned next_sequence; }; +extern long xgi_compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg); + extern int xgi_fb_heap_init(struct xgi_info * info); extern int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index 40f39fbc..2e2d0094 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -65,6 +65,13 @@ int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, alloc->hw_addr = alloc->offset; alloc->index = block->user_hash.key; + if (block->user_hash.key != (unsigned long) alloc->index) { + DRM_ERROR("%s truncated handle %lx for pool %d " + "offset %x\n", + __func__, block->user_hash.key, + alloc->location, alloc->offset); + } + if (alloc->location == XGI_MEMLOC_NON_LOCAL) { alloc->hw_addr += info->pcie.base; } diff --git a/linux-core/xgi_ioc32.c b/linux-core/xgi_ioc32.c new file mode 100644 index 00000000..c54044fa --- /dev/null +++ b/linux-core/xgi_ioc32.c @@ -0,0 +1,140 @@ +/* + * (C) Copyright IBM Corporation 2007 + * Copyright (C) Paul Mackerras 2005. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * on the rights to use, copy, modify, merge, publish, distribute, sub + * license, and/or sell copies of the Software, and to permit persons to whom + * the Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Ian Romanick + */ + +#include + +#include "drmP.h" +#include "drm.h" + +#include "xgi_drm.h" + +/* This is copied from drm_ioc32.c. + */ +struct drm_map32 { + u32 offset; /**< Requested physical address (0 for SAREA)*/ + u32 size; /**< Requested physical size (bytes) */ + enum drm_map_type type; /**< Type of memory to map */ + enum drm_map_flags flags; /**< Flags */ + u32 handle; /**< User-space: "Handle" to pass to mmap() */ + int mtrr; /**< MTRR slot used */ +}; + +struct drm32_xgi_bootstrap { + struct drm_map32 gart; +}; + + +extern int xgi_bootstrap(struct drm_device *, void *, struct drm_file *); + +static int compat_xgi_bootstrap(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct drm32_xgi_bootstrap __user *const argp = (void __user *)arg; + struct drm32_xgi_bootstrap bs32; + struct xgi_bootstrap __user *bs; + int err; + void *handle; + + + if (copy_from_user(&bs32, argp, sizeof(bs32))) { + return -EFAULT; + } + + bs = compat_alloc_user_space(sizeof(*bs)); + if (!access_ok(VERIFY_WRITE, bs, sizeof(*bs))) { + return -EFAULT; + } + + if (__put_user(bs32.gart.offset, &bs->gart.offset) + || __put_user(bs32.gart.size, &bs->gart.size) + || __put_user(bs32.gart.type, &bs->gart.type) + || __put_user(bs32.gart.flags, &bs->gart.flags)) { + return -EFAULT; + } + + err = drm_ioctl(filp->f_dentry->d_inode, filp, XGI_IOCTL_BOOTSTRAP, + (unsigned long)bs); + if (err) { + return err; + } + + if (__get_user(bs32.gart.offset, &bs->gart.offset) + || __get_user(bs32.gart.mtrr, &bs->gart.mtrr) + || __get_user(handle, &bs->gart.handle)) { + return -EFAULT; + } + + bs32.gart.handle = (unsigned long)handle; + if (bs32.gart.handle != (unsigned long)handle && printk_ratelimit()) { + printk(KERN_ERR "%s truncated handle %p for type %d " + "offset %x\n", + __func__, handle, bs32.gart.type, bs32.gart.offset); + } + + if (copy_to_user(argp, &bs32, sizeof(bs32))) { + return -EFAULT; + } + + return 0; +} + + +drm_ioctl_compat_t *xgi_compat_ioctls[] = { + [DRM_XGI_BOOTSTRAP] = compat_xgi_bootstrap, +}; + +/** + * Called whenever a 32-bit process running under a 64-bit kernel + * performs an ioctl on /dev/dri/card. + * + * \param filp file pointer. + * \param cmd command. + * \param arg user argument. + * \return zero on success or negative number on failure. + */ +long xgi_compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + const unsigned int nr = DRM_IOCTL_NR(cmd); + drm_ioctl_compat_t *fn = NULL; + int ret; + + if (nr < DRM_COMMAND_BASE) + return drm_compat_ioctl(filp, cmd, arg); + + if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(xgi_compat_ioctls)) + fn = xgi_compat_ioctls[nr - DRM_COMMAND_BASE]; + + lock_kernel(); + ret = (fn != NULL) + ? (*fn)(filp, cmd, arg) + : drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); + unlock_kernel(); + + return ret; +} -- cgit v1.2.3 From bc5423f16838257a040a55b88df9588d268fda06 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Thu, 20 Sep 2007 14:01:29 +1000 Subject: drm_sysfs: update sysfs code from kernel --- linux-core/drmP.h | 9 ++- linux-core/drm_stub.c | 2 +- linux-core/drm_sysfs.c | 171 +++++++++++++++---------------------------------- 3 files changed, 58 insertions(+), 124 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drmP.h b/linux-core/drmP.h index aa562225..f8ca3f4b 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -1130,7 +1130,7 @@ extern int drm_put_head(struct drm_head * head); extern unsigned int drm_debug; /* 1 to enable debug output */ extern unsigned int drm_cards_limit; extern struct drm_head **drm_heads; -extern struct drm_sysfs_class *drm_class; +extern struct class *drm_class; extern struct proc_dir_entry *drm_proc_root; extern drm_local_map_t *drm_getsarea(struct drm_device *dev); @@ -1163,10 +1163,9 @@ extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah); /* sysfs support (drm_sysfs.c) */ struct drm_sysfs_class; -extern struct drm_sysfs_class *drm_sysfs_create(struct module *owner, - char *name); -extern void drm_sysfs_destroy(struct drm_sysfs_class *cs); -extern struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, +extern struct class *drm_sysfs_create(struct module *owner, char *name); +extern void drm_sysfs_destroy(struct class *cs); +extern struct class_device *drm_sysfs_device_add(struct class *cs, struct drm_head * head); extern void drm_sysfs_device_remove(struct class_device *class_dev); diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c index eba6deed..07ea91e0 100644 --- a/linux-core/drm_stub.c +++ b/linux-core/drm_stub.c @@ -51,7 +51,7 @@ module_param_named(cards_limit, drm_cards_limit, int, 0444); module_param_named(debug, drm_debug, int, 0600); struct drm_head **drm_heads; -struct drm_sysfs_class *drm_class; +struct class *drm_class; struct proc_dir_entry *drm_proc_root; static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, diff --git a/linux-core/drm_sysfs.c b/linux-core/drm_sysfs.c index 1090e719..cf4349b0 100644 --- a/linux-core/drm_sysfs.c +++ b/linux-core/drm_sysfs.c @@ -1,3 +1,4 @@ + /* * drm_sysfs.c - Modifications to drm_sysfs_class.c to support * extra sysfs attribute from DRM. Normal drm_sysfs_class @@ -15,38 +16,8 @@ #include #include -#include "drmP.h" #include "drm_core.h" - -struct drm_sysfs_class { - struct class_device_attribute attr; - struct class class; -}; -#define to_drm_sysfs_class(d) container_of(d, struct drm_sysfs_class, class) - -struct simple_dev { - dev_t dev; - struct class_device class_dev; -}; -#define to_simple_dev(d) container_of(d, struct simple_dev, class_dev) - -static void release_simple_dev(struct class_device *class_dev) -{ - struct simple_dev *s_dev = to_simple_dev(class_dev); - kfree(s_dev); -} - -static ssize_t show_dev(struct class_device *class_dev, char *buf) -{ - struct simple_dev *s_dev = to_simple_dev(class_dev); - return print_dev_t(buf, s_dev->dev); -} - -static void drm_sysfs_class_release(struct class *class) -{ - struct drm_sysfs_class *cs = to_drm_sysfs_class(class); - kfree(cs); -} +#include "drmP.h" /* Display the version of drm_core. This doesn't work right in current design */ static ssize_t version_show(struct class *dev, char *buf) @@ -68,42 +39,27 @@ static CLASS_ATTR(version, S_IRUGO, version_show, NULL); * Note, the pointer created here is to be destroyed when finished by making a * call to drm_sysfs_destroy(). */ -struct drm_sysfs_class *drm_sysfs_create(struct module *owner, char *name) +struct class *drm_sysfs_create(struct module *owner, char *name) { - struct drm_sysfs_class *cs; - int retval; + struct class *class; + int err; - cs = kmalloc(sizeof(*cs), GFP_KERNEL); - if (!cs) { - retval = -ENOMEM; - goto error; + class = class_create(owner, name); + if (IS_ERR(class)) { + err = PTR_ERR(class); + goto err_out; } - memset(cs, 0x00, sizeof(*cs)); - - cs->class.name = name; - cs->class.class_release = drm_sysfs_class_release; - cs->class.release = release_simple_dev; - - cs->attr.attr.name = "dev"; - cs->attr.attr.mode = S_IRUGO; - cs->attr.attr.owner = owner; - cs->attr.show = show_dev; - cs->attr.store = NULL; - - retval = class_register(&cs->class); - if (retval) - goto error; - retval = class_create_file(&cs->class, &class_attr_version); - if (retval) - goto error_with_class; - - return cs; - - error_with_class: - class_unregister(&cs->class); - error: - kfree(cs); - return ERR_PTR(retval); + + err = class_create_file(class, &class_attr_version); + if (err) + goto err_out_class; + + return class; + +err_out_class: + class_destroy(class); +err_out: + return ERR_PTR(err); } /** @@ -113,12 +69,13 @@ struct drm_sysfs_class *drm_sysfs_create(struct module *owner, char *name) * Note, the pointer to be destroyed must have been created with a call to * drm_sysfs_create(). */ -void drm_sysfs_destroy(struct drm_sysfs_class *cs) +void drm_sysfs_destroy(struct class *class) { - if ((cs == NULL) || (IS_ERR(cs))) + if ((class == NULL) || (IS_ERR(class))) return; - class_unregister(&cs->class); + class_remove_file(class, &class_attr_version); + class_destroy(class); } static ssize_t show_dri(struct class_device *class_device, char *buf) @@ -135,7 +92,7 @@ static struct class_device_attribute class_device_attrs[] = { /** * drm_sysfs_device_add - adds a class device to sysfs for a character driver - * @cs: pointer to the struct drm_sysfs_class that this device should be registered to. + * @cs: pointer to the struct class that this device should be registered to. * @dev: the dev_t for the device to be added. * @device: a pointer to a struct device that is assiociated with this class device. * @fmt: string for the class device's name @@ -144,62 +101,42 @@ static struct class_device_attribute class_device_attrs[] = { * class. A "dev" file will be created, showing the dev_t for the device. The * pointer to the struct class_device will be returned from the call. Any further * sysfs files that might be required can be created using this pointer. - * Note: the struct drm_sysfs_class passed to this function must have previously been + * Note: the struct class passed to this function must have previously been * created with a call to drm_sysfs_create(). */ -struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, - struct drm_head * head) +struct class_device *drm_sysfs_device_add(struct class *cs, struct drm_head *head) { - struct simple_dev *s_dev = NULL; - int i, retval; - - if ((cs == NULL) || (IS_ERR(cs))) { - retval = -ENODEV; - goto error; + struct class_device *class_dev; + int i, j, err; + + class_dev = class_device_create(cs, NULL, + MKDEV(DRM_MAJOR, head->minor), + &(head->dev->pdev)->dev, + "card%d", head->minor); + if (IS_ERR(class_dev)) { + err = PTR_ERR(class_dev); + goto err_out; } - s_dev = kmalloc(sizeof(*s_dev), GFP_KERNEL); - if (!s_dev) { - retval = -ENOMEM; - goto error; - } - memset(s_dev, 0x00, sizeof(*s_dev)); - - s_dev->dev = MKDEV(DRM_MAJOR, head->minor); - s_dev->class_dev.dev = &head->dev->pdev->dev; - s_dev->class_dev.class = &cs->class; - - snprintf(s_dev->class_dev.class_id, BUS_ID_SIZE, "card%d", head->minor); - retval = class_device_register(&s_dev->class_dev); - if (retval) - goto error; - - retval = class_device_create_file(&s_dev->class_dev, &cs->attr); - if (retval) - goto error_with_device; - - class_set_devdata(&s_dev->class_dev, head); + class_set_devdata(class_dev, head); for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) { - retval = class_device_create_file(&s_dev->class_dev, - &class_device_attrs[i]); - if (retval) - goto error_with_files; + err = class_device_create_file(class_dev, + &class_device_attrs[i]); + if (err) + goto err_out_files; } - return &s_dev->class_dev; - - error_with_files: - while (i > 0) - class_device_remove_file(&s_dev->class_dev, - &class_device_attrs[--i]); - class_device_remove_file(&s_dev->class_dev, &cs->attr); - error_with_device: - class_device_unregister(&s_dev->class_dev); - error: - kfree(s_dev); + return class_dev; - return ERR_PTR(retval); +err_out_files: + if (i > 0) + for (j = 0; j < i; j++) + class_device_remove_file(class_dev, + &class_device_attrs[i]); + class_device_unregister(class_dev); +err_out: + return ERR_PTR(err); } /** @@ -211,11 +148,9 @@ struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, */ void drm_sysfs_device_remove(struct class_device *class_dev) { - struct simple_dev *s_dev = to_simple_dev(class_dev); int i; for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) - class_device_remove_file(&s_dev->class_dev, &class_device_attrs[i]); - - class_device_unregister(&s_dev->class_dev); + class_device_remove_file(class_dev, &class_device_attrs[i]); + class_device_unregister(class_dev); } -- cgit v1.2.3 From da63f4ba0f15c3ae614eba92c8219670c674727e Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Sat, 22 Sep 2007 13:34:33 +0200 Subject: Add fence error member. Modify the TTM backend bind arguments. Export a number of functions needed for driver-specific super-ioctls. Add a function to map buffer objects from the kernel, regardless of where they're currently placed. A number of error fixes. --- linux-core/drm_agpsupport.c | 15 +-- linux-core/drm_bo.c | 223 +++++++++++++++++++++++++++++++------------- linux-core/drm_bo_move.c | 197 +++++++++++++++++++++++++++++++++++++- linux-core/drm_fence.c | 154 +++++++++++++++++------------- linux-core/drm_object.c | 4 + linux-core/drm_objects.h | 146 +++++++++++++++++++++++------ linux-core/drm_ttm.c | 12 +-- linux-core/i915_buffer.c | 9 +- linux-core/i915_fence.c | 7 +- linux-core/nouveau_sgdma.c | 16 ++-- linux-core/via_buffer.c | 3 +- linux-core/via_fence.c | 3 +- linux-core/xgi_fence.c | 2 +- 13 files changed, 602 insertions(+), 189 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c index 4618823c..b68efc64 100644 --- a/linux-core/drm_agpsupport.c +++ b/linux-core/drm_agpsupport.c @@ -535,23 +535,23 @@ static int drm_agp_populate(struct drm_ttm_backend *backend, unsigned long num_p } static int drm_agp_bind_ttm(struct drm_ttm_backend *backend, - unsigned long offset, - int cached) + struct drm_bo_mem_reg *bo_mem) { - struct drm_agp_ttm_backend *agp_be = + struct drm_agp_ttm_backend *agp_be = container_of(backend, struct drm_agp_ttm_backend, backend); DRM_AGP_MEM *mem = agp_be->mem; int ret; DRM_DEBUG("drm_agp_bind_ttm\n"); mem->is_flushed = TRUE; - mem->type = (cached) ? AGP_USER_CACHED_MEMORY : + mem->type = (bo_mem->flags & DRM_BO_FLAG_CACHED) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY; - ret = drm_agp_bind_memory(mem, offset); + ret = drm_agp_bind_memory(mem, bo_mem->mm_node->start); if (ret) { DRM_ERROR("AGP Bind memory failed\n"); } - DRM_FLAG_MASKED(backend->flags, (cached) ? DRM_BE_FLAG_BOUND_CACHED : 0, + DRM_FLAG_MASKED(backend->flags, (bo_mem->flags & DRM_BO_FLAG_CACHED) ? + DRM_BE_FLAG_BOUND_CACHED : 0, DRM_BE_FLAG_BOUND_CACHED); return ret; } @@ -643,7 +643,8 @@ struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev) agp_be->bridge = dev->agp->bridge; agp_be->populated = FALSE; agp_be->backend.func = &agp_ttm_backend; - agp_be->backend.mem_type = DRM_BO_MEM_TT; + // agp_be->backend.mem_type = DRM_BO_MEM_TT; + agp_be->backend.dev = dev; return &agp_be->backend; } diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index b46d0361..1913df44 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -142,12 +142,8 @@ static int drm_bo_add_ttm(struct drm_buffer_object * bo) switch (bo->type) { case drm_bo_type_dc: - bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT); - if (!bo->ttm) - ret = -ENOMEM; - break; case drm_bo_type_kernel: - bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT); + bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT); if (!bo->ttm) ret = -ENOMEM; break; @@ -175,7 +171,8 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo, struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type]; int ret = 0; - if (old_is_pci || new_is_pci) + if (old_is_pci || new_is_pci || + ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED)) ret = drm_bo_vm_pre_move(bo, old_is_pci); if (ret) return ret; @@ -190,9 +187,7 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo, goto out_err; if (mem->mem_type != DRM_BO_MEM_LOCAL) { - ret = drm_bind_ttm(bo->ttm, new_man->flags & - DRM_BO_FLAG_CACHED, - mem->mm_node->start); + ret = drm_bind_ttm(bo->ttm, mem); if (ret) goto out_err; } @@ -242,7 +237,9 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo, _DRM_BO_FLAG_EVICTED); if (bo->mem.mm_node) - bo->offset = bo->mem.mm_node->start << PAGE_SHIFT; + bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) + + bm->man[bo->mem.mem_type].gpu_offset; + return 0; @@ -290,6 +287,7 @@ int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals, } return 0; } +EXPORT_SYMBOL(drm_bo_wait); static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors) { @@ -417,7 +415,7 @@ static void drm_bo_destroy_locked(struct drm_buffer_object * bo) atomic_dec(&bm->count); - BUG_ON(!list_empty(&bo->base.list)); + // BUG_ON(!list_empty(&bo->base.list)); drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ); return; @@ -503,6 +501,7 @@ void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo) drm_bo_destroy_locked(tmp_bo); } } +EXPORT_SYMBOL(drm_bo_usage_deref_locked); static void drm_bo_base_deref_locked(struct drm_file * file_priv, struct drm_user_object * uo) @@ -531,38 +530,76 @@ void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo) } EXPORT_SYMBOL(drm_bo_usage_deref_unlocked); +void drm_putback_buffer_objects(struct drm_device *dev) +{ + struct drm_buffer_manager *bm = &dev->bm; + struct list_head *list = &bm->unfenced; + struct drm_buffer_object *entry, *next; + + mutex_lock(&dev->struct_mutex); + list_for_each_entry_safe(entry, next, list, lru) { + atomic_inc(&entry->usage); + mutex_unlock(&dev->struct_mutex); + + mutex_lock(&entry->mutex); + BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED)); + mutex_lock(&dev->struct_mutex); + + list_del_init(&entry->lru); + DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); + DRM_WAKEUP(&entry->event_queue); + + /* + * FIXME: Might want to put back on head of list + * instead of tail here. + */ + + drm_bo_add_to_lru(entry); + mutex_unlock(&entry->mutex); + drm_bo_usage_deref_locked(&entry); + } + mutex_unlock(&dev->struct_mutex); +} +EXPORT_SYMBOL(drm_putback_buffer_objects); + + /* * Note. The caller has to register (if applicable) * and deregister fence object usage. */ -int drm_fence_buffer_objects(struct drm_file * file_priv, +int drm_fence_buffer_objects(struct drm_device *dev, struct list_head *list, uint32_t fence_flags, struct drm_fence_object * fence, struct drm_fence_object ** used_fence) { - struct drm_device *dev = file_priv->head->dev; struct drm_buffer_manager *bm = &dev->bm; - struct drm_buffer_object *entry; uint32_t fence_type = 0; + uint32_t fence_class = ~0; int count = 0; int ret = 0; struct list_head *l; - LIST_HEAD(f_list); mutex_lock(&dev->struct_mutex); if (!list) list = &bm->unfenced; + if (fence) + fence_class = fence->class; + list_for_each_entry(entry, list, lru) { BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED)); - fence_type |= entry->fence_type; - if (entry->fence_class != 0) { - DRM_ERROR("Fence class %d is not implemented yet.\n", - entry->fence_class); + fence_type |= entry->new_fence_type; + if (fence_class == ~0) + fence_class = entry->new_fence_class; + else if (entry->new_fence_class != fence_class) { + DRM_ERROR("Unmatching fence classes on unfenced list: " + "%d and %d.\n", + fence_class, + entry->new_fence_class); ret = -EINVAL; goto out; } @@ -574,14 +611,6 @@ int drm_fence_buffer_objects(struct drm_file * file_priv, goto out; } - /* - * Transfer to a local list before we release the dev->struct_mutex; - * This is so we don't get any new unfenced objects while fencing - * the ones we already have.. - */ - - list_splice_init(list, &f_list); - if (fence) { if ((fence_type & fence->type) != fence_type) { DRM_ERROR("Given fence doesn't match buffers " @@ -591,7 +620,7 @@ int drm_fence_buffer_objects(struct drm_file * file_priv, } } else { mutex_unlock(&dev->struct_mutex); - ret = drm_fence_object_create(dev, 0, fence_type, + ret = drm_fence_object_create(dev, fence_class, fence_type, fence_flags | DRM_FENCE_FLAG_EMIT, &fence); mutex_lock(&dev->struct_mutex); @@ -600,8 +629,8 @@ int drm_fence_buffer_objects(struct drm_file * file_priv, } count = 0; - l = f_list.next; - while (l != &f_list) { + l = list->next; + while (l != list) { prefetch(l->next); entry = list_entry(l, struct drm_buffer_object, lru); atomic_inc(&entry->usage); @@ -614,6 +643,8 @@ int drm_fence_buffer_objects(struct drm_file * file_priv, if (entry->fence) drm_fence_usage_deref_locked(&entry->fence); entry->fence = drm_fence_reference_locked(fence); + entry->fence_class = entry->new_fence_class; + entry->fence_type = entry->new_fence_type; DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); DRM_WAKEUP(&entry->event_queue); @@ -621,7 +652,7 @@ int drm_fence_buffer_objects(struct drm_file * file_priv, } mutex_unlock(&entry->mutex); drm_bo_usage_deref_locked(&entry); - l = f_list.next; + l = list->next; } DRM_DEBUG("Fenced %d buffers\n", count); out: @@ -629,7 +660,6 @@ int drm_fence_buffer_objects(struct drm_file * file_priv, *used_fence = fence; return ret; } - EXPORT_SYMBOL(drm_fence_buffer_objects); /* @@ -944,6 +974,7 @@ struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv, atomic_inc(&bo->usage); return bo; } +EXPORT_SYMBOL(drm_lookup_buffer_object); /* * Call bo->mutex locked. @@ -1079,9 +1110,12 @@ static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait, static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo, struct drm_bo_info_rep *rep) { + if (!rep) + return; + rep->handle = bo->base.hash.key; rep->flags = bo->mem.flags; - rep->size = bo->mem.num_pages * PAGE_SIZE; + rep->size = bo->num_pages * PAGE_SIZE; rep->offset = bo->offset; rep->arg_handle = bo->map_list.user_token; rep->mask = bo->mem.mask; @@ -1260,7 +1294,7 @@ int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags, if (ret) return ret; - mem.num_pages = bo->mem.num_pages; + mem.num_pages = bo->num_pages; mem.size = mem.num_pages << PAGE_SHIFT; mem.mask = new_mem_flags; mem.page_alignment = bo->mem.page_alignment; @@ -1308,7 +1342,7 @@ static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem) if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0) return 0; if ((flag_diff & DRM_BO_FLAG_CACHED) && - (!(mem->mask & DRM_BO_FLAG_CACHED) || + (/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/ (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) { return 0; } @@ -1375,7 +1409,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo, (unsigned long long) bo->mem.mask, (unsigned long long) bo->mem.flags); - ret = driver->fence_type(bo, &ftype); + ret = driver->fence_type(bo, &fence_class, &ftype); if (ret) { DRM_ERROR("Driver did not support given buffer permissions\n"); @@ -1404,13 +1438,15 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo, return ret; } - - bo->fence_class = fence_class; - bo->fence_type = ftype; + + bo->new_fence_class = fence_class; + bo->new_fence_type = ftype; + ret = drm_bo_wait_unmapped(bo, no_wait); - if (ret) + if (ret) { + DRM_ERROR("Timed out waiting for buffer unmap.\n"); return ret; - + } if (bo->type == drm_bo_type_fake) { ret = drm_bo_check_fake(dev, &bo->mem); if (ret) @@ -1465,23 +1501,13 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo, return 0; } -static int drm_bo_handle_validate(struct drm_file *file_priv, - uint32_t handle, - uint32_t fence_class, - uint64_t flags, uint64_t mask, uint32_t hint, - struct drm_bo_info_rep *rep) +int drm_bo_do_validate(struct drm_buffer_object *bo, + uint64_t flags, uint64_t mask, uint32_t hint, + uint32_t fence_class, + int no_wait, + struct drm_bo_info_rep *rep) { - struct drm_device *dev = file_priv->head->dev; - struct drm_buffer_object *bo; int ret; - int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; - - mutex_lock(&dev->struct_mutex); - bo = drm_lookup_buffer_object(file_priv, handle, 1); - mutex_unlock(&dev->struct_mutex); - if (!bo) { - return -EINVAL; - } mutex_lock(&bo->mutex); ret = drm_bo_wait_unfenced(bo, no_wait, 0); @@ -1489,24 +1515,63 @@ static int drm_bo_handle_validate(struct drm_file *file_priv, if (ret) goto out; + if ((mask & flags & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) { + DRM_ERROR + ("DRM_BO_FLAG_NO_EVICT is only available to priviliged " + "processes\n"); + return -EPERM; + } + + DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask); ret = drm_bo_new_mask(bo, flags, hint); if (ret) goto out; - ret = - drm_buffer_object_validate(bo, fence_class, - !(hint & DRM_BO_HINT_DONT_FENCE), - no_wait); - drm_bo_fill_rep_arg(bo, rep); - - out: + ret = drm_buffer_object_validate(bo, + fence_class, + !(hint & DRM_BO_HINT_DONT_FENCE), + no_wait); +out: + if (rep) + drm_bo_fill_rep_arg(bo, rep); mutex_unlock(&bo->mutex); + return ret; +} +EXPORT_SYMBOL(drm_bo_do_validate); + + +int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle, + uint32_t fence_class, + uint64_t flags, uint64_t mask, uint32_t hint, + struct drm_bo_info_rep * rep, + struct drm_buffer_object **bo_rep) +{ + struct drm_device *dev = file_priv->head->dev; + struct drm_buffer_object *bo; + int ret; + int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; + + mutex_lock(&dev->struct_mutex); + bo = drm_lookup_buffer_object(file_priv, handle, 1); + mutex_unlock(&dev->struct_mutex); + + if (!bo) { + return -EINVAL; + } + + ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, + no_wait, rep); + + if (!ret && bo_rep) + *bo_rep = bo; + else + drm_bo_usage_deref_unlocked(&bo); - drm_bo_usage_deref_unlocked(&bo); return ret; } +EXPORT_SYMBOL(drm_bo_handle_validate); /** * Fills out the generic buffer object ioctl reply with the information for @@ -1612,8 +1677,9 @@ int drm_buffer_object_create(struct drm_device *dev, #endif bo->dev = dev; bo->type = type; + bo->num_pages = num_pages; bo->mem.mem_type = DRM_BO_MEM_LOCAL; - bo->mem.num_pages = num_pages; + bo->mem.num_pages = bo->num_pages; bo->mem.mm_node = NULL; bo->mem.page_alignment = page_alignment; if (bo->type == drm_bo_type_fake) { @@ -1706,6 +1772,7 @@ int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr struct drm_bo_op_arg *arg = data; struct drm_bo_op_req *req = &arg->d.req; struct drm_bo_info_rep rep; + struct drm_buffer_object *dummy; unsigned long next = 0; void __user *curuserarg = NULL; int ret; @@ -1742,7 +1809,7 @@ int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr req->bo_req.flags, req->bo_req.mask, req->bo_req.hint, - &rep); + &rep, &dummy); break; case drm_bo_fence: ret = -EINVAL; @@ -2092,9 +2159,30 @@ static void drm_bo_clean_unfenced(struct drm_device *dev) struct drm_buffer_manager *bm = &dev->bm; struct list_head *head, *list; struct drm_buffer_object *entry; + struct drm_fence_object *fence; head = &bm->unfenced; + if (list_empty(head)) + return; + + DRM_ERROR("Clean unfenced\n"); + + if (drm_fence_buffer_objects(dev, NULL, 0, NULL, &fence)) { + + /* + * Fixme: Should really wait here. + */ + } + + if (fence) + drm_fence_usage_deref_locked(&fence); + + if (list_empty(head)) + return; + + DRM_ERROR("Really clean unfenced\n"); + list = head->next; while(list != head) { prefetch(list->next); @@ -2254,7 +2342,7 @@ int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type) if (!man->has_type) { DRM_ERROR("Trying to take down uninitialized " - "memory manager type\n"); + "memory manager type %u\n", mem_type); return ret; } man->use_type = 0; @@ -2276,6 +2364,7 @@ int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type) return ret; } +EXPORT_SYMBOL(drm_bo_clean_mm); /** *Evict all buffers of a particular mem_type, but leave memory manager diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 1a613916..dae99181 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -71,9 +71,7 @@ int drm_bo_move_ttm(struct drm_buffer_object * bo, save_flags = old_mem->flags; } if (new_mem->mem_type != DRM_BO_MEM_LOCAL) { - ret = drm_bind_ttm(ttm, - new_mem->flags & DRM_BO_FLAG_CACHED, - new_mem->mm_node->start); + ret = drm_bind_ttm(ttm, new_mem); if (ret) return ret; } @@ -344,6 +342,7 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo, ret = drm_fence_object_create(dev, fence_class, fence_type, fence_flags | DRM_FENCE_FLAG_EMIT, &bo->fence); + bo->fence_type = fence_type; if (ret) return ret; @@ -410,3 +409,195 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo, } EXPORT_SYMBOL(drm_bo_move_accel_cleanup); + +int drm_bo_same_page(unsigned long offset, + unsigned long offset2) +{ + return (offset & PAGE_MASK) == (offset2 & PAGE_MASK); +} +EXPORT_SYMBOL(drm_bo_same_page); + +unsigned long drm_bo_offset_end(unsigned long offset, + unsigned long end) +{ + + offset = (offset + PAGE_SIZE) & PAGE_MASK; + return (end < offset) ? end : offset; +} +EXPORT_SYMBOL(drm_bo_offset_end); + + +static pgprot_t drm_kernel_io_prot(uint32_t map_type) +{ + pgprot_t tmp = PAGE_KERNEL; + +#if defined(__i386__) || defined(__x86_64__) +#ifdef USE_PAT_WC +#warning using pat + if (drm_use_pat() && map_type == _DRM_TTM) { + pgprot_val(tmp) |= _PAGE_PAT; + return tmp; + } +#endif + if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) { + pgprot_val(tmp) |= _PAGE_PCD; + pgprot_val(tmp) &= ~_PAGE_PWT; + } +#elif defined(__powerpc__) + pgprot_val(tmp) |= _PAGE_NO_CACHE; + if (map_type == _DRM_REGISTERS) + pgprot_val(tmp) |= _PAGE_GUARDED; +#endif +#if defined(__ia64__) + if (map_type == _DRM_TTM) + tmp = pgprot_writecombine(tmp); + else + tmp = pgprot_noncached(tmp); +#endif + return tmp; +} + +static int drm_bo_ioremap(struct drm_buffer_object *bo, unsigned long bus_base, + unsigned long bus_offset, unsigned long bus_size, + struct drm_bo_kmap_obj *map) +{ + struct drm_device *dev = bo->dev; + struct drm_bo_mem_reg *mem = &bo->mem; + struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type]; + + if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) { + map->bo_kmap_type = bo_map_premapped; + map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset); + } else { + map->bo_kmap_type = bo_map_iomap; + map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size); + } + return (!map->virtual) ? -ENOMEM : 0; +} + +static int drm_bo_kmap_ttm(struct drm_buffer_object *bo, unsigned long start_page, + unsigned long num_pages, struct drm_bo_kmap_obj *map) +{ + struct drm_device *dev = bo->dev; + struct drm_bo_mem_reg *mem = &bo->mem; + struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type]; + pgprot_t prot; + struct drm_ttm *ttm = bo->ttm; + struct page *d; + int i; + + BUG_ON(!ttm); + + /* + * Populate the part we're mapping; + */ + + for (i=start_page; i< num_pages; ++i) { + d = drm_ttm_get_page(ttm, i); + if (!d) + return -ENOMEM; + } + + if (num_pages == 1 && (mem->flags & DRM_BO_FLAG_CACHED)) { + + /* + * We're mapping a single page, and the desired + * page protection is consistent with the bo. + */ + + map->bo_kmap_type = bo_map_kmap; + map->page = drm_ttm_get_page(ttm, start_page); + map->virtual = kmap(map->page); + } else { + + /* + * We need to use vmap to get the desired page protection + * or to make the buffer object look contigous. + */ + + prot = (mem->flags & DRM_BO_FLAG_CACHED) ? + PAGE_KERNEL : + drm_kernel_io_prot(man->drm_bus_maptype); + map->bo_kmap_type = bo_map_vmap; + map->virtual = vmap(ttm->pages + start_page, + num_pages, 0, prot); + } + return (!map->virtual) ? -ENOMEM : 0; +} + +/* + * This function is to be used for kernel mapping of buffer objects. + * It chooses the appropriate mapping method depending on the memory type + * and caching policy the buffer currently has. + * Mapping multiple pages or buffers that live in io memory is a bit slow and + * consumes vmalloc space. Be restrictive with such mappings. + * Mapping single pages usually returns the logical kernel address, (which is fast) + * BUG may use slower temporary mappings for high memory pages or + * uncached / write-combined pages. + * + * The function fills in a drm_bo_kmap_obj which can be used to return the + * kernel virtual address of the buffer. + * + * Code servicing a non-priviliged user request is only allowed to map one + * page at a time. We might need to implement a better scheme to stop such + * processes from consuming all vmalloc space. + */ + +int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page, + unsigned long num_pages, struct drm_bo_kmap_obj *map) +{ + int ret; + unsigned long bus_base; + unsigned long bus_offset; + unsigned long bus_size; + + map->virtual = NULL; + + if (num_pages > bo->num_pages) + return -EINVAL; + if (start_page > bo->num_pages) + return -EINVAL; +#if 0 + if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) + return -EPERM; +#endif + ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base, + &bus_offset, &bus_size); + + if (ret) + return ret; + + if (bus_size == 0) { + return drm_bo_kmap_ttm(bo, start_page, num_pages, map); + } else { + bus_offset += start_page << PAGE_SHIFT; + bus_size = num_pages << PAGE_SHIFT; + return drm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map); + } +} +EXPORT_SYMBOL(drm_bo_kmap); + +void drm_bo_kunmap(struct drm_bo_kmap_obj *map) +{ + if (!map->virtual) + return; + + switch(map->bo_kmap_type) { + case bo_map_iomap: + iounmap(map->virtual); + break; + case bo_map_vmap: + vunmap(map->virtual); + break; + case bo_map_kmap: + kunmap(map->page); + break; + case bo_map_premapped: + break; + default: + BUG(); + } + map->virtual = NULL; + map->page = NULL; +} +EXPORT_SYMBOL(drm_bo_kunmap); diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index 2f16f7ef..d228547c 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -35,7 +35,7 @@ */ void drm_fence_handler(struct drm_device * dev, uint32_t class, - uint32_t sequence, uint32_t type) + uint32_t sequence, uint32_t type, uint32_t error) { int wake = 0; uint32_t diff; @@ -49,6 +49,7 @@ void drm_fence_handler(struct drm_device * dev, uint32_t class, int is_exe = (type & DRM_FENCE_TYPE_EXE); int ge_last_exe; + diff = (sequence - fc->exe_flush_sequence) & driver->sequence_mask; if (fc->pending_exe_flush && is_exe && diff < driver->wrap_diff) @@ -57,9 +58,6 @@ void drm_fence_handler(struct drm_device * dev, uint32_t class, diff = (sequence - fc->last_exe_flush) & driver->sequence_mask; ge_last_exe = diff < driver->wrap_diff; - if (ge_last_exe) - fc->pending_flush &= ~type; - if (is_exe && ge_last_exe) { fc->last_exe_flush = sequence; } @@ -75,36 +73,66 @@ void drm_fence_handler(struct drm_device * dev, uint32_t class, } } + fc->pending_flush &= ~type; head = (found) ? &fence->ring : &fc->ring; list_for_each_entry_safe_reverse(fence, next, head, ring) { if (&fence->ring == &fc->ring) break; + if (error) { + fence->error = error; + fence->signaled = fence->type; + fence->submitted_flush = fence->type; + fence->flush_mask = fence->type; + list_del_init(&fence->ring); + wake = 1; + break; + } + type |= fence->native_type; relevant = type & fence->type; if ((fence->signaled | relevant) != fence->signaled) { fence->signaled |= relevant; + fence->flush_mask |= relevant; + fence->submitted_flush |= relevant; DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n", fence->base.hash.key, fence->signaled); - fence->submitted_flush |= relevant; wake = 1; } relevant = fence->flush_mask & - ~(fence->signaled | fence->submitted_flush); + ~(fence->submitted_flush | fence->signaled); - if (relevant) { - fc->pending_flush |= relevant; - fence->submitted_flush = fence->flush_mask; - } + fc->pending_flush |= relevant; + fence->submitted_flush |= relevant; if (!(fence->type & ~fence->signaled)) { DRM_DEBUG("Fence completely signaled 0x%08lx\n", fence->base.hash.key); list_del_init(&fence->ring); } + + } + + /* + * Reinstate lost flush flags. + */ + + if ((fc->pending_flush & type) != type) { + head = head->prev; + list_for_each_entry(fence, head, ring) { + if (&fence->ring == &fc->ring) + break; + diff = (fc->last_exe_flush - fence->sequence) & + driver->sequence_mask; + if (diff > driver->wrap_diff) + break; + + relevant = fence->submitted_flush & ~fence->signaled; + fc->pending_flush |= relevant; + } } if (wake) { @@ -141,6 +169,7 @@ void drm_fence_usage_deref_locked(struct drm_fence_object ** fence) drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE); } } +EXPORT_SYMBOL(drm_fence_usage_deref_locked); void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence) { @@ -160,6 +189,7 @@ void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence) mutex_unlock(&dev->struct_mutex); } } +EXPORT_SYMBOL(drm_fence_usage_deref_unlocked); struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src) @@ -178,7 +208,7 @@ void drm_fence_reference_unlocked(struct drm_fence_object **dst, atomic_inc(&src->usage); mutex_unlock(&src->dev->struct_mutex); } - +EXPORT_SYMBOL(drm_fence_reference_unlocked); static void drm_fence_object_destroy(struct drm_file *priv, struct drm_user_object * base) { @@ -206,6 +236,7 @@ int drm_fence_object_signaled(struct drm_fence_object * fence, return signaled; } +EXPORT_SYMBOL(drm_fence_object_signaled); static void drm_fence_flush_exe(struct drm_fence_class_manager * fc, struct drm_fence_driver * driver, uint32_t sequence) @@ -241,7 +272,8 @@ int drm_fence_object_flush(struct drm_fence_object * fence, write_lock_irqsave(&fm->lock, flags); fence->flush_mask |= type; - if (fence->submitted_flush == fence->signaled) { + if ((fence->submitted_flush & fence->signaled) + == fence->submitted_flush) { if ((fence->type & DRM_FENCE_TYPE_EXE) && !(fence->submitted_flush & DRM_FENCE_TYPE_EXE)) { drm_fence_flush_exe(fc, driver, fence->sequence); @@ -329,7 +361,15 @@ static int drm_fence_lazy_wait(struct drm_fence_object *fence, if (ret == -EBUSY) { DRM_ERROR("Fence timeout. " "GPU lockup or fence driver was " - "taken down.\n"); + "taken down. %d 0x%08x 0x%02x 0x%02x 0x%02x\n", + fence->class, + fence->sequence, + fence->type, + mask, + fence->signaled); + DRM_ERROR("Pending exe flush %d 0x%08x\n", + fc->pending_exe_flush, + fc->exe_flush_sequence); } return ((ret == -EINTR) ? -EAGAIN : ret); } @@ -348,6 +388,7 @@ int drm_fence_object_wait(struct drm_fence_object * fence, if (mask & ~fence->type) { DRM_ERROR("Wait trying to extend fence type" " 0x%08x 0x%08x\n", mask, fence->type); + BUG(); return -EINVAL; } @@ -402,6 +443,8 @@ int drm_fence_object_wait(struct drm_fence_object * fence, return 0; } +EXPORT_SYMBOL(drm_fence_object_wait); + int drm_fence_object_emit(struct drm_fence_object * fence, uint32_t fence_flags, uint32_t class, uint32_t type) @@ -434,6 +477,7 @@ int drm_fence_object_emit(struct drm_fence_object * fence, write_unlock_irqrestore(&fm->lock, flags); return 0; } +EXPORT_SYMBOL(drm_fence_object_emit); static int drm_fence_object_init(struct drm_device * dev, uint32_t class, uint32_t type, @@ -545,6 +589,23 @@ void drm_fence_manager_init(struct drm_device * dev) write_unlock_irqrestore(&fm->lock, flags); } +void drm_fence_fill_arg(struct drm_fence_object *fence, struct drm_fence_arg *arg) +{ + struct drm_device *dev = fence->dev; + struct drm_fence_manager *fm = &dev->fm; + unsigned long irq_flags; + + read_lock_irqsave(&fm->lock, irq_flags); + arg->handle = fence->base.hash.key; + arg->class = fence->class; + arg->type = fence->type; + arg->signaled = fence->signaled; + arg->error = fence->error; + read_unlock_irqrestore(&fm->lock, irq_flags); +} +EXPORT_SYMBOL(drm_fence_fill_arg); + + void drm_fence_manager_takedown(struct drm_device * dev) { } @@ -572,7 +633,6 @@ int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file * struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg *arg = data; struct drm_fence_object *fence; - unsigned long flags; ret = 0; if (!fm->initialized) { @@ -597,14 +657,10 @@ int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file * /* * usage > 0. No need to lock dev->struct_mutex; */ - + arg->handle = fence->base.hash.key; - read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; - arg->type = fence->type; - arg->signaled = fence->signaled; - read_unlock_irqrestore(&fm->lock, flags); + drm_fence_fill_arg(fence, arg); drm_fence_usage_deref_unlocked(&fence); return ret; @@ -642,7 +698,6 @@ int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_fil struct drm_fence_arg *arg = data; struct drm_fence_object *fence; struct drm_user_object *uo; - unsigned long flags; ret = 0; if (!fm->initialized) { @@ -654,12 +709,7 @@ int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_fil if (ret) return ret; fence = drm_lookup_fence_object(file_priv, arg->handle); - - read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; - arg->type = fence->type; - arg->signaled = fence->signaled; - read_unlock_irqrestore(&fm->lock, flags); + drm_fence_fill_arg(fence, arg); drm_fence_usage_deref_unlocked(&fence); return ret; @@ -687,7 +737,6 @@ int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg *arg = data; struct drm_fence_object *fence; - unsigned long flags; ret = 0; if (!fm->initialized) { @@ -699,11 +748,7 @@ int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file if (!fence) return -EINVAL; - read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; - arg->type = fence->type; - arg->signaled = fence->signaled; - read_unlock_irqrestore(&fm->lock, flags); + drm_fence_fill_arg(fence, arg); drm_fence_usage_deref_unlocked(&fence); return ret; @@ -715,7 +760,6 @@ int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *f struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg *arg = data; struct drm_fence_object *fence; - unsigned long flags; ret = 0; if (!fm->initialized) { @@ -728,11 +772,7 @@ int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *f return -EINVAL; ret = drm_fence_object_flush(fence, arg->type); - read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; - arg->type = fence->type; - arg->signaled = fence->signaled; - read_unlock_irqrestore(&fm->lock, flags); + drm_fence_fill_arg(fence, arg); drm_fence_usage_deref_unlocked(&fence); return ret; @@ -745,7 +785,6 @@ int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *fi struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg *arg = data; struct drm_fence_object *fence; - unsigned long flags; ret = 0; if (!fm->initialized) { @@ -760,11 +799,7 @@ int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *fi arg->flags & DRM_FENCE_FLAG_WAIT_LAZY, 0, arg->type); - read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; - arg->type = fence->type; - arg->signaled = fence->signaled; - read_unlock_irqrestore(&fm->lock, flags); + drm_fence_fill_arg(fence, arg); drm_fence_usage_deref_unlocked(&fence); return ret; @@ -777,7 +812,6 @@ int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *fi struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg *arg = data; struct drm_fence_object *fence; - unsigned long flags; ret = 0; if (!fm->initialized) { @@ -792,11 +826,7 @@ int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *fi ret = drm_fence_object_emit(fence, arg->flags, arg->class, arg->type); - read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; - arg->type = fence->type; - arg->signaled = fence->signaled; - read_unlock_irqrestore(&fm->lock, flags); + drm_fence_fill_arg(fence, arg); drm_fence_usage_deref_unlocked(&fence); return ret; @@ -808,7 +838,6 @@ int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg *arg = data; struct drm_fence_object *fence; - unsigned long flags; ret = 0; if (!fm->initialized) { @@ -821,23 +850,22 @@ int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file return -EINVAL; } LOCK_TEST_WITH_RETURN(dev, file_priv); - ret = drm_fence_buffer_objects(file_priv, NULL, arg->flags, + ret = drm_fence_buffer_objects(dev, NULL, arg->flags, NULL, &fence); if (ret) return ret; - ret = drm_fence_add_user_object(file_priv, fence, - arg->flags & - DRM_FENCE_FLAG_SHAREABLE); - if (ret) - return ret; + + if (!(arg->flags & DRM_FENCE_FLAG_NO_USER)) { + ret = drm_fence_add_user_object(file_priv, fence, + arg->flags & + DRM_FENCE_FLAG_SHAREABLE); + if (ret) + return ret; + } arg->handle = fence->base.hash.key; - read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; - arg->type = fence->type; - arg->signaled = fence->signaled; - read_unlock_irqrestore(&fm->lock, flags); + drm_fence_fill_arg(fence, arg); drm_fence_usage_deref_unlocked(&fence); return ret; diff --git a/linux-core/drm_object.c b/linux-core/drm_object.c index 3d866333..6bd89b1d 100644 --- a/linux-core/drm_object.c +++ b/linux-core/drm_object.c @@ -50,6 +50,7 @@ int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item, list_add_tail(&item->list, &priv->user_objects); return 0; } +EXPORT_SYMBOL(drm_add_user_object); struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t key) { @@ -76,6 +77,7 @@ struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t } return item; } +EXPORT_SYMBOL(drm_lookup_user_object); static void drm_deref_user_object(struct drm_file * priv, struct drm_user_object * item) { @@ -104,6 +106,7 @@ int drm_remove_user_object(struct drm_file * priv, struct drm_user_object * item drm_deref_user_object(priv, item); return 0; } +EXPORT_SYMBOL(drm_remove_user_object); static int drm_object_ref_action(struct drm_file * priv, struct drm_user_object * ro, enum drm_ref_type action) @@ -196,6 +199,7 @@ struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv, return drm_hash_entry(hash, struct drm_ref_object, hash); } +EXPORT_SYMBOL(drm_lookup_ref_object); static void drm_remove_other_references(struct drm_file * priv, struct drm_user_object * ro) diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 096041d7..25072dbe 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -32,6 +32,7 @@ #define _DRM_OBJECTS_H struct drm_device; +struct drm_bo_mem_reg; /*************************************************** * User space objects. (drm_object.c) @@ -42,10 +43,14 @@ struct drm_device; enum drm_object_type { drm_fence_type, drm_buffer_type, - drm_ttm_type /* * Add other user space object types here. */ + drm_driver_type0 = 256, + drm_driver_type1, + drm_driver_type2, + drm_driver_type3, + drm_driver_type4 }; /* @@ -156,6 +161,7 @@ struct drm_fence_object { uint32_t sequence; uint32_t flush_mask; uint32_t submitted_flush; + uint32_t error; }; #define _DRM_FENCE_CLASSES 8 @@ -192,7 +198,7 @@ struct drm_fence_driver { }; extern void drm_fence_handler(struct drm_device *dev, uint32_t class, - uint32_t sequence, uint32_t type); + uint32_t sequence, uint32_t type, uint32_t error); extern void drm_fence_manager_init(struct drm_device *dev); extern void drm_fence_manager_takedown(struct drm_device *dev); extern void drm_fence_flush_old(struct drm_device *dev, uint32_t class, @@ -210,6 +216,12 @@ extern int drm_fence_object_wait(struct drm_fence_object * fence, extern int drm_fence_object_create(struct drm_device *dev, uint32_t type, uint32_t fence_flags, uint32_t class, struct drm_fence_object ** c_fence); +extern int drm_fence_object_emit(struct drm_fence_object * fence, + uint32_t fence_flags, uint32_t class, + uint32_t type); +extern void drm_fence_fill_arg(struct drm_fence_object *fence, + struct drm_fence_arg *arg); + extern int drm_fence_add_user_object(struct drm_file * priv, struct drm_fence_object * fence, int shareable); @@ -258,23 +270,22 @@ struct drm_ttm_backend_func { unsigned long num_pages, struct page ** pages); void (*clear) (struct drm_ttm_backend * backend); int (*bind) (struct drm_ttm_backend * backend, - unsigned long offset, int cached); + struct drm_bo_mem_reg * bo_mem); int (*unbind) (struct drm_ttm_backend * backend); void (*destroy) (struct drm_ttm_backend * backend); }; -struct drm_ttm_backend { - uint32_t flags; - int mem_type; - struct drm_ttm_backend_func *func; -}; +typedef struct drm_ttm_backend { + struct drm_device *dev; + uint32_t flags; + struct drm_ttm_backend_func *func; +} drm_ttm_backend_t; struct drm_ttm { struct page **pages; uint32_t page_flags; unsigned long num_pages; - unsigned long aper_offset; atomic_t vma_count; struct drm_device *dev; int destroy; @@ -290,11 +301,13 @@ struct drm_ttm { }; extern struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size); -extern int drm_bind_ttm(struct drm_ttm * ttm, int cached, unsigned long aper_offset); +extern int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem); extern void drm_ttm_unbind(struct drm_ttm * ttm); extern void drm_ttm_evict(struct drm_ttm * ttm); extern void drm_ttm_fixup_caching(struct drm_ttm * ttm); extern struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index); +extern void drm_ttm_cache_flush(void); +extern int drm_ttm_populate(struct drm_ttm * ttm); /* * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this, @@ -333,6 +346,8 @@ struct drm_bo_mem_reg { uint32_t mem_type; uint64_t flags; uint64_t mask; + uint32_t desired_tile_stride; + uint32_t hw_tile_stride; }; struct drm_buffer_object { @@ -356,10 +371,13 @@ struct drm_buffer_object { uint32_t fence_type; uint32_t fence_class; + uint32_t new_fence_type; + uint32_t new_fence_class; struct drm_fence_object *fence; uint32_t priv_flags; wait_queue_head_t event_queue; struct mutex mutex; + unsigned long num_pages; /* For pinned buffers */ int pinned; @@ -368,7 +386,6 @@ struct drm_buffer_object { struct list_head pinned_lru; /* For vm */ - struct drm_ttm *ttm; struct drm_map_list map_list; uint32_t memory_type; @@ -395,6 +412,7 @@ struct drm_mem_type_manager { struct list_head pinned; uint32_t flags; uint32_t drm_bus_maptype; + unsigned long gpu_offset; unsigned long io_offset; unsigned long io_size; void *io_addr; @@ -434,7 +452,8 @@ struct drm_bo_driver { uint32_t num_mem_busy_prio; struct drm_ttm_backend *(*create_ttm_backend_entry) (struct drm_device * dev); - int (*fence_type) (struct drm_buffer_object *bo, uint32_t * type); + int (*fence_type) (struct drm_buffer_object *bo, uint32_t *fclass, + uint32_t * type); int (*invalidate_caches) (struct drm_device * dev, uint64_t flags); int (*init_mem_type) (struct drm_device * dev, uint32_t type, struct drm_mem_type_manager * man); @@ -472,32 +491,44 @@ extern int drm_bo_pci_offset(struct drm_device *dev, extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg * mem); extern void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo); -extern int drm_fence_buffer_objects(struct drm_file * priv, +extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo); +extern void drm_putback_buffer_objects(struct drm_device *dev); +extern int drm_fence_buffer_objects(struct drm_device * dev, struct list_head *list, uint32_t fence_flags, struct drm_fence_object * fence, struct drm_fence_object ** used_fence); extern void drm_bo_add_to_lru(struct drm_buffer_object * bo); +extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size, + enum drm_bo_type type, uint64_t mask, + uint32_t hint, uint32_t page_alignment, + unsigned long buffer_start, + struct drm_buffer_object **bo); extern int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals, int no_wait); extern int drm_bo_mem_space(struct drm_buffer_object * bo, struct drm_bo_mem_reg * mem, int no_wait); extern int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags, int no_wait, int move_unfenced); -extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size, - enum drm_bo_type type, uint64_t mask, - uint32_t hint, uint32_t page_alignment, - unsigned long buffer_start, - struct drm_buffer_object **bo); -extern int drm_bo_init_mm(struct drm_device *dev, unsigned type, +extern int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type); +extern int drm_bo_init_mm(struct drm_device * dev, unsigned type, unsigned long p_offset, unsigned long p_size); -extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type); -extern int drm_bo_add_user_object(struct drm_file *file_priv, - struct drm_buffer_object *bo, int sharable); -extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo); +extern int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle, + uint32_t fence_class, uint64_t flags, + uint64_t mask, uint32_t hint, + struct drm_bo_info_rep * rep, + struct drm_buffer_object **bo_rep); +extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file * file_priv, + uint32_t handle, + int check_owner); +extern int drm_bo_do_validate(struct drm_buffer_object *bo, + uint64_t flags, uint64_t mask, uint32_t hint, + uint32_t fence_class, + int no_wait, + struct drm_bo_info_rep *rep); /* - * Buffer object memory move helpers. + * Buffer object memory move- and map helpers. * drm_bo_move.c */ @@ -513,11 +544,69 @@ extern int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo, uint32_t fence_type, uint32_t fence_flags, struct drm_bo_mem_reg * new_mem); +extern int drm_bo_same_page(unsigned long offset, unsigned long offset2); +extern unsigned long drm_bo_offset_end(unsigned long offset, + unsigned long end); -extern int drm_mem_reg_ioremap(struct drm_device *dev, - struct drm_bo_mem_reg *mem, void **virtual); -extern void drm_mem_reg_iounmap(struct drm_device *dev, - struct drm_bo_mem_reg *mem, void *virtual); +struct drm_bo_kmap_obj { + void *virtual; + struct page *page; + enum { + bo_map_iomap, + bo_map_vmap, + bo_map_kmap, + bo_map_premapped, + } bo_kmap_type; +}; + +static inline void *drm_bmo_virtual(struct drm_bo_kmap_obj *map, int *is_iomem) +{ + *is_iomem = (map->bo_kmap_type == bo_map_iomap || + map->bo_kmap_type == bo_map_premapped); + return map->virtual; +} +extern void drm_bo_kunmap(struct drm_bo_kmap_obj *map); +extern int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page, + unsigned long num_pages, struct drm_bo_kmap_obj *map); + + +/* + * drm_regman.c + */ + +struct drm_reg { + struct list_head head; + struct drm_fence_object *fence; + uint32_t fence_type; + uint32_t new_fence_type; +}; + +struct drm_reg_manager { + struct list_head free; + struct list_head lru; + struct list_head unfenced; + + int (*reg_reusable)(const struct drm_reg *reg, const void *data); + void (*reg_destroy)(struct drm_reg *reg); +}; + +extern int drm_regs_alloc(struct drm_reg_manager *manager, + const void *data, + uint32_t fence_class, + uint32_t fence_type, + int interruptible, + int no_wait, + struct drm_reg **reg); + +extern void drm_regs_fence(struct drm_reg_manager *regs, + struct drm_fence_object *fence); + +extern void drm_regs_free(struct drm_reg_manager *manager); +extern void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg); +extern void drm_regs_init(struct drm_reg_manager *manager, + int (*reg_reusable)(const struct drm_reg *, + const void *), + void (*reg_destroy)(struct drm_reg *)); #ifdef CONFIG_DEBUG_MUTEXES #define DRM_ASSERT_LOCKED(_mutex) \ @@ -526,5 +615,4 @@ extern void drm_mem_reg_iounmap(struct drm_device *dev, #else #define DRM_ASSERT_LOCKED(_mutex) #endif - #endif diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c index 60c64cba..33bbe1d4 100644 --- a/linux-core/drm_ttm.c +++ b/linux-core/drm_ttm.c @@ -35,11 +35,12 @@ static void drm_ttm_ipi_handler(void *null) flush_agp_cache(); } -static void drm_ttm_cache_flush(void) +void drm_ttm_cache_flush(void) { if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0) DRM_ERROR("Timed out waiting for drm cache flush.\n"); } +EXPORT_SYMBOL(drm_ttm_cache_flush); /* * Use kmalloc if possible. Otherwise fall back to vmalloc. @@ -207,7 +208,7 @@ struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index) return p; } -static int drm_ttm_populate(struct drm_ttm * ttm) +int drm_ttm_populate(struct drm_ttm * ttm) { struct page *page; unsigned long i; @@ -308,7 +309,7 @@ void drm_ttm_unbind(struct drm_ttm * ttm) drm_ttm_fixup_caching(ttm); } -int drm_bind_ttm(struct drm_ttm * ttm, int cached, unsigned long aper_offset) +int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem) { int ret = 0; @@ -325,17 +326,16 @@ int drm_bind_ttm(struct drm_ttm * ttm, int cached, unsigned long aper_offset) if (ret) return ret; - if (ttm->state == ttm_unbound && !cached) { + if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED)) { drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED); } - if ((ret = be->func->bind(be, aper_offset, cached))) { + if ((ret = be->func->bind(be, bo_mem))) { ttm->state = ttm_evicted; DRM_ERROR("Couldn't bind backend.\n"); return ret; } - ttm->aper_offset = aper_offset; ttm->state = ttm_bound; return 0; diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index bf500cc6..75763e71 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -38,7 +38,9 @@ struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device * dev) return drm_agp_init_ttm(dev); } -int i915_fence_types(struct drm_buffer_object *bo, uint32_t * type) +int i915_fence_types(struct drm_buffer_object *bo, + uint32_t * fclass, + uint32_t * type) { if (bo->mem.flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) *type = 3; @@ -71,6 +73,7 @@ int i915_init_mem_type(struct drm_device * dev, uint32_t type, man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_MEMTYPE_CACHED; man->drm_bus_maptype = 0; + man->gpu_offset = 0; break; case DRM_BO_MEM_TT: if (!(drm_core_has_AGP(dev) && dev->agp)) { @@ -84,6 +87,7 @@ int i915_init_mem_type(struct drm_device * dev, uint32_t type, man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP; man->drm_bus_maptype = _DRM_AGP; + man->gpu_offset = 0; break; case DRM_BO_MEM_PRIV0: if (!(drm_core_has_AGP(dev) && dev->agp)) { @@ -97,6 +101,7 @@ int i915_init_mem_type(struct drm_device * dev, uint32_t type, man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP; man->drm_bus_maptype = _DRM_AGP; + man->gpu_offset = 0; break; default: DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); @@ -196,7 +201,7 @@ static int i915_move_flip(struct drm_buffer_object * bo, if (ret) return ret; - ret = drm_bind_ttm(bo->ttm, 1, tmp_mem.mm_node->start); + ret = drm_bind_ttm(bo->ttm, &tmp_mem); if (ret) goto out_cleanup; diff --git a/linux-core/i915_fence.c b/linux-core/i915_fence.c index 6f0de2ca..5a1653e9 100644 --- a/linux-core/i915_fence.c +++ b/linux-core/i915_fence.c @@ -63,7 +63,8 @@ static void i915_perform_flush(struct drm_device * dev) diff = (sequence - fc->last_exe_flush) & BREADCRUMB_MASK; if (diff < driver->wrap_diff && diff != 0) { - drm_fence_handler(dev, 0, sequence, DRM_FENCE_TYPE_EXE); + drm_fence_handler(dev, 0, sequence, + DRM_FENCE_TYPE_EXE, 0); } if (dev_priv->fence_irq_on && !fc->pending_exe_flush) { @@ -82,7 +83,7 @@ static void i915_perform_flush(struct drm_device * dev) flush_flags = dev_priv->flush_flags; flush_sequence = dev_priv->flush_sequence; dev_priv->flush_pending = 0; - drm_fence_handler(dev, 0, flush_sequence, flush_flags); + drm_fence_handler(dev, 0, flush_sequence, flush_flags, 0); } } @@ -103,7 +104,7 @@ static void i915_perform_flush(struct drm_device * dev) flush_flags = dev_priv->flush_flags; flush_sequence = dev_priv->flush_sequence; dev_priv->flush_pending = 0; - drm_fence_handler(dev, 0, flush_sequence, flush_flags); + drm_fence_handler(dev, 0, flush_sequence, flush_flags, 0); } } diff --git a/linux-core/nouveau_sgdma.c b/linux-core/nouveau_sgdma.c index 97d5330b..b86c5d7c 100644 --- a/linux-core/nouveau_sgdma.c +++ b/linux-core/nouveau_sgdma.c @@ -80,16 +80,16 @@ nouveau_sgdma_clear(struct drm_ttm_backend *be) } static int -nouveau_sgdma_bind(struct drm_ttm_backend *be, unsigned long pg_start, - int cached) +nouveau_sgdma_bind(struct drm_ttm_backend *be, struct drm_bo_mem_reg *mem) { struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; - uint64_t offset = (pg_start << PAGE_SHIFT); + uint64_t offset = (mem->mm_node->start << PAGE_SHIFT); uint32_t i; - DRM_DEBUG("pg=0x%lx (0x%llx), cached=%d\n", pg_start, offset, cached); + DRM_DEBUG("pg=0x%lx (0x%llx), cached=%d\n", mem->mm_node->start, + offset, (mem->flags & DRM_BO_FLAG_CACHED) == 1); if (offset & NV_CTXDMA_PAGE_MASK) return -EINVAL; @@ -188,7 +188,6 @@ nouveau_sgdma_init_ttm(struct drm_device *dev) nvbe->dev = dev; nvbe->backend.func = &nouveau_sgdma_backend; - nvbe->backend.mem_type = DRM_BO_MEM_TT; return &nvbe->backend; } @@ -278,6 +277,8 @@ nouveau_sgdma_nottm_hack_init(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_ttm_backend *be; struct drm_scatter_gather sgreq; + struct drm_mm_node mm_node; + struct drm_bo_mem_reg mem; int ret; dev_priv->gart_info.sg_be = nouveau_sgdma_init_ttm(dev); @@ -303,7 +304,10 @@ nouveau_sgdma_nottm_hack_init(struct drm_device *dev) return ret; } - if ((ret = be->func->bind(be, 0, 0))) { + mm_node.start = 0; + mem.mm_node = &mm_node; + + if ((ret = be->func->bind(be, &mem))) { DRM_ERROR("failed bind: %d\n", ret); return ret; } diff --git a/linux-core/via_buffer.c b/linux-core/via_buffer.c index eb5ea826..a6c59832 100644 --- a/linux-core/via_buffer.c +++ b/linux-core/via_buffer.c @@ -37,7 +37,8 @@ struct drm_ttm_backend *via_create_ttm_backend_entry(struct drm_device * dev) return drm_agp_init_ttm(dev); } -int via_fence_types(struct drm_buffer_object *bo, uint32_t * type) +int via_fence_types(struct drm_buffer_object *bo, uint32_t * fclass, + uint32_t * type) { *type = 3; return 0; diff --git a/linux-core/via_fence.c b/linux-core/via_fence.c index a6d4ece9..4576dc90 100644 --- a/linux-core/via_fence.c +++ b/linux-core/via_fence.c @@ -98,7 +98,8 @@ static uint32_t via_perform_flush(struct drm_device *dev, uint32_t class) drm_idlelock_release(&dev->lock); dev_priv->have_idlelock = 0; } - drm_fence_handler(dev, 0, dev_priv->emit_0_sequence, signaled_flush_types); + drm_fence_handler(dev, 0, dev_priv->emit_0_sequence, + signaled_flush_types, 0); } } diff --git a/linux-core/xgi_fence.c b/linux-core/xgi_fence.c index adedf300..721cc1a9 100644 --- a/linux-core/xgi_fence.c +++ b/linux-core/xgi_fence.c @@ -60,7 +60,7 @@ static uint32_t xgi_do_flush(struct drm_device * dev, uint32_t class) if (signaled_flush_types) { drm_fence_handler(dev, 0, info->complete_sequence, - signaled_flush_types); + signaled_flush_types, 0); } } -- cgit v1.2.3 From bea727b8387f3094b9921004d7686a2d77184466 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Sat, 22 Sep 2007 13:38:36 +0200 Subject: Make nouveau compile on older kernels. --- linux-core/drm_compat.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h index 870f8b73..f74f4bc2 100644 --- a/linux-core/drm_compat.h +++ b/linux-core/drm_compat.h @@ -193,7 +193,10 @@ extern void drm_clear_vma(struct vm_area_struct *vma, extern pgprot_t vm_get_page_prot(unsigned long vm_flags); #ifndef GFP_DMA32 -#define GFP_DMA32 0 +#define GFP_DMA32 GFP_KERNEL +#endif +#ifndef __GFP_DMA32 +#define __GFP_DMA32 GFP_KERNEL #endif #if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) -- cgit v1.2.3 From 0774090d5b7d3eba734086b437021039bc19c365 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Sat, 22 Sep 2007 13:59:56 +0200 Subject: Fix drm_bo.c compiling. --- linux-core/drm_bo.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 1913df44..49a57a85 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1515,13 +1515,6 @@ int drm_bo_do_validate(struct drm_buffer_object *bo, if (ret) goto out; - if ((mask & flags & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) { - DRM_ERROR - ("DRM_BO_FLAG_NO_EVICT is only available to priviliged " - "processes\n"); - return -EPERM; - } - DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask); ret = drm_bo_new_mask(bo, flags, hint); @@ -1706,7 +1699,7 @@ int drm_buffer_object_create(struct drm_device *dev, } bo->fence_class = 0; - ret = driver->fence_type(bo, &bo->fence_type); + ret = driver->fence_type(bo, &bo->fence_class, &bo->fence_type); if (ret) { DRM_ERROR("Driver did not support given buffer permissions\n"); goto out_err; -- cgit v1.2.3 From 54df1b9ff3b79097fedd8ed7bf54aca30a660cbd Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Sat, 22 Sep 2007 14:30:55 +0200 Subject: Fix pinned buffer fence class. --- linux-core/drm_bo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index b46d0361..717e5dab 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1995,7 +1995,7 @@ drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, /* Validate the buffer into its pinned location, with no * pending fence. */ - ret = drm_buffer_object_validate(bo, 0, 0, 0); + ret = drm_buffer_object_validate(bo, bo->fence_class, 0, 0); if (ret) { mutex_unlock(&bo->mutex); return ret; -- cgit v1.2.3 From bb5516f4f47d16d5d59797fa170abd50d35377a7 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Wed, 12 Sep 2007 23:50:38 +1000 Subject: drm/ttm: fixup fence class naming and interfaces This is some code for nouveau that Ben Skeggs worked on, and also fixes the naming (having class in a system header file == C++ keyword == bad plan) --- linux-core/drm_bo.c | 19 ++++++++--------- linux-core/drm_fence.c | 53 ++++++++++++++++++++++++------------------------ linux-core/drm_objects.h | 18 ++++++++-------- linux-core/i915_fence.c | 2 +- linux-core/via_fence.c | 4 ++-- linux-core/xgi_fence.c | 2 +- 6 files changed, 48 insertions(+), 50 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 717e5dab..a2f66dc6 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -538,7 +538,7 @@ EXPORT_SYMBOL(drm_bo_usage_deref_unlocked); int drm_fence_buffer_objects(struct drm_file * file_priv, struct list_head *list, - uint32_t fence_flags, + uint32_t fence_class, uint32_t fence_flags, struct drm_fence_object * fence, struct drm_fence_object ** used_fence) { @@ -560,13 +560,8 @@ int drm_fence_buffer_objects(struct drm_file * file_priv, list_for_each_entry(entry, list, lru) { BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED)); fence_type |= entry->fence_type; - if (entry->fence_class != 0) { - DRM_ERROR("Fence class %d is not implemented yet.\n", - entry->fence_class); - ret = -EINVAL; - goto out; - } - count++; + if (entry->fence_class == fence_class) + count++; } if (!count) { @@ -583,7 +578,8 @@ int drm_fence_buffer_objects(struct drm_file * file_priv, list_splice_init(list, &f_list); if (fence) { - if ((fence_type & fence->type) != fence_type) { + if ((fence_type & fence->type) != fence_type || + (fence->fence_class != fence_class)) { DRM_ERROR("Given fence doesn't match buffers " "on unfenced list.\n"); ret = -EINVAL; @@ -591,7 +587,7 @@ int drm_fence_buffer_objects(struct drm_file * file_priv, } } else { mutex_unlock(&dev->struct_mutex); - ret = drm_fence_object_create(dev, 0, fence_type, + ret = drm_fence_object_create(dev, fence_class, fence_type, fence_flags | DRM_FENCE_FLAG_EMIT, &fence); mutex_lock(&dev->struct_mutex); @@ -609,7 +605,8 @@ int drm_fence_buffer_objects(struct drm_file * file_priv, mutex_lock(&entry->mutex); mutex_lock(&dev->struct_mutex); list_del_init(l); - if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) { + if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED && + entry->fence_class == fence_class) { count++; if (entry->fence) drm_fence_usage_deref_locked(&entry->fence); diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index 2f16f7ef..a6787b09 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -34,14 +34,14 @@ * Typically called by the IRQ handler. */ -void drm_fence_handler(struct drm_device * dev, uint32_t class, +void drm_fence_handler(struct drm_device * dev, uint32_t fence_class, uint32_t sequence, uint32_t type) { int wake = 0; uint32_t diff; uint32_t relevant; struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_class_manager *fc = &fm->class[class]; + struct drm_fence_class_manager *fc = &fm->fence_class[fence_class]; struct drm_fence_driver *driver = dev->driver->fence_driver; struct list_head *head; struct drm_fence_object *fence, *next; @@ -198,7 +198,7 @@ int drm_fence_object_signaled(struct drm_fence_object * fence, struct drm_fence_driver *driver = dev->driver->fence_driver; if (poke_flush) - driver->poke_flush(dev, fence->class); + driver->poke_flush(dev, fence->fence_class); read_lock_irqsave(&fm->lock, flags); signaled = (fence->type & mask & fence->signaled) == (fence->type & mask); @@ -229,7 +229,7 @@ int drm_fence_object_flush(struct drm_fence_object * fence, { struct drm_device *dev = fence->dev; struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_class_manager *fc = &fm->class[fence->class]; + struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class]; struct drm_fence_driver *driver = dev->driver->fence_driver; unsigned long flags; @@ -253,7 +253,7 @@ int drm_fence_object_flush(struct drm_fence_object * fence, } } write_unlock_irqrestore(&fm->lock, flags); - driver->poke_flush(dev, fence->class); + driver->poke_flush(dev, fence->fence_class); return 0; } @@ -262,10 +262,10 @@ int drm_fence_object_flush(struct drm_fence_object * fence, * wrapped around and reused. */ -void drm_fence_flush_old(struct drm_device * dev, uint32_t class, uint32_t sequence) +void drm_fence_flush_old(struct drm_device * dev, uint32_t fence_class, uint32_t sequence) { struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_class_manager *fc = &fm->class[class]; + struct drm_fence_class_manager *fc = &fm->fence_class[fence_class]; struct drm_fence_driver *driver = dev->driver->fence_driver; uint32_t old_sequence; unsigned long flags; @@ -308,7 +308,7 @@ static int drm_fence_lazy_wait(struct drm_fence_object *fence, { struct drm_device *dev = fence->dev; struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_class_manager *fc = &fm->class[fence->class]; + struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class]; int signaled; unsigned long _end = jiffies + 3*DRM_HZ; int ret = 0; @@ -366,7 +366,7 @@ int drm_fence_object_wait(struct drm_fence_object * fence, } else { - if (driver->has_irq(dev, fence->class, + if (driver->has_irq(dev, fence->fence_class, DRM_FENCE_TYPE_EXE)) { ret = drm_fence_lazy_wait(fence, ignore_signals, DRM_FENCE_TYPE_EXE); @@ -374,7 +374,7 @@ int drm_fence_object_wait(struct drm_fence_object * fence, return ret; } - if (driver->has_irq(dev, fence->class, + if (driver->has_irq(dev, fence->fence_class, mask & ~DRM_FENCE_TYPE_EXE)) { ret = drm_fence_lazy_wait(fence, ignore_signals, mask); @@ -409,7 +409,7 @@ int drm_fence_object_emit(struct drm_fence_object * fence, struct drm_device *dev = fence->dev; struct drm_fence_manager *fm = &dev->fm; struct drm_fence_driver *driver = dev->driver->fence_driver; - struct drm_fence_class_manager *fc = &fm->class[fence->class]; + struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class]; unsigned long flags; uint32_t sequence; uint32_t native_type; @@ -421,7 +421,7 @@ int drm_fence_object_emit(struct drm_fence_object * fence, return ret; write_lock_irqsave(&fm->lock, flags); - fence->class = class; + fence->fence_class = class; fence->type = type; fence->flush_mask = 0x00; fence->submitted_flush = 0x00; @@ -456,7 +456,7 @@ static int drm_fence_object_init(struct drm_device * dev, uint32_t class, */ INIT_LIST_HEAD(&fence->base.list); - fence->class = class; + fence->fence_class = class; fence->type = type; fence->flush_mask = 0; fence->submitted_flush = 0; @@ -466,7 +466,7 @@ static int drm_fence_object_init(struct drm_device * dev, uint32_t class, write_unlock_irqrestore(&fm->lock, flags); if (fence_flags & DRM_FENCE_FLAG_EMIT) { ret = drm_fence_object_emit(fence, fence_flags, - fence->class, type); + fence->fence_class, type); } return ret; } @@ -533,7 +533,7 @@ void drm_fence_manager_init(struct drm_device * dev) BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES); for (i=0; inum_classes; ++i) { - class = &fm->class[i]; + class = &fm->fence_class[i]; INIT_LIST_HEAD(&class->ring); class->pending_flush = 0; @@ -582,7 +582,7 @@ int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file * if (arg->flags & DRM_FENCE_FLAG_EMIT) LOCK_TEST_WITH_RETURN(dev, file_priv); - ret = drm_fence_object_create(dev, arg->class, + ret = drm_fence_object_create(dev, arg->fence_class, arg->type, arg->flags, &fence); if (ret) return ret; @@ -601,7 +601,7 @@ int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file * arg->handle = fence->base.hash.key; read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; + arg->fence_class = fence->fence_class; arg->type = fence->type; arg->signaled = fence->signaled; read_unlock_irqrestore(&fm->lock, flags); @@ -656,7 +656,7 @@ int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_fil fence = drm_lookup_fence_object(file_priv, arg->handle); read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; + arg->fence_class = fence->fence_class; arg->type = fence->type; arg->signaled = fence->signaled; read_unlock_irqrestore(&fm->lock, flags); @@ -700,7 +700,7 @@ int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file return -EINVAL; read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; + arg->fence_class = fence->fence_class; arg->type = fence->type; arg->signaled = fence->signaled; read_unlock_irqrestore(&fm->lock, flags); @@ -729,7 +729,7 @@ int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *f ret = drm_fence_object_flush(fence, arg->type); read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; + arg->fence_class = fence->fence_class; arg->type = fence->type; arg->signaled = fence->signaled; read_unlock_irqrestore(&fm->lock, flags); @@ -761,7 +761,7 @@ int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *fi 0, arg->type); read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; + arg->fence_class = fence->fence_class; arg->type = fence->type; arg->signaled = fence->signaled; read_unlock_irqrestore(&fm->lock, flags); @@ -789,11 +789,11 @@ int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *fi fence = drm_lookup_fence_object(file_priv, arg->handle); if (!fence) return -EINVAL; - ret = drm_fence_object_emit(fence, arg->flags, arg->class, + ret = drm_fence_object_emit(fence, arg->flags, arg->fence_class, arg->type); read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; + arg->fence_class = fence->fence_class; arg->type = fence->type; arg->signaled = fence->signaled; read_unlock_irqrestore(&fm->lock, flags); @@ -821,8 +821,8 @@ int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file return -EINVAL; } LOCK_TEST_WITH_RETURN(dev, file_priv); - ret = drm_fence_buffer_objects(file_priv, NULL, arg->flags, - NULL, &fence); + ret = drm_fence_buffer_objects(file_priv, NULL, arg->fence_class, + arg->flags, NULL, &fence); if (ret) return ret; ret = drm_fence_add_user_object(file_priv, fence, @@ -834,9 +834,10 @@ int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file arg->handle = fence->base.hash.key; read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; + arg->fence_class = fence->fence_class; arg->type = fence->type; arg->signaled = fence->signaled; + arg->sequence = fence->sequence; read_unlock_irqrestore(&fm->lock, flags); drm_fence_usage_deref_unlocked(&fence); diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 096041d7..b2f1ae17 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -149,7 +149,7 @@ struct drm_fence_object { */ struct list_head ring; - int class; + int fence_class; uint32_t native_type; uint32_t type; uint32_t signaled; @@ -173,7 +173,7 @@ struct drm_fence_class_manager { struct drm_fence_manager { int initialized; rwlock_t lock; - struct drm_fence_class_manager class[_DRM_FENCE_CLASSES]; + struct drm_fence_class_manager fence_class[_DRM_FENCE_CLASSES]; uint32_t num_classes; atomic_t count; }; @@ -184,18 +184,18 @@ struct drm_fence_driver { uint32_t flush_diff; uint32_t sequence_mask; int lazy_capable; - int (*has_irq) (struct drm_device * dev, uint32_t class, + int (*has_irq) (struct drm_device * dev, uint32_t fence_class, uint32_t flags); - int (*emit) (struct drm_device * dev, uint32_t class, uint32_t flags, + int (*emit) (struct drm_device * dev, uint32_t fence_class, uint32_t flags, uint32_t * breadcrumb, uint32_t * native_type); - void (*poke_flush) (struct drm_device * dev, uint32_t class); + void (*poke_flush) (struct drm_device * dev, uint32_t fence_class); }; -extern void drm_fence_handler(struct drm_device *dev, uint32_t class, +extern void drm_fence_handler(struct drm_device *dev, uint32_t fence_class, uint32_t sequence, uint32_t type); extern void drm_fence_manager_init(struct drm_device *dev); extern void drm_fence_manager_takedown(struct drm_device *dev); -extern void drm_fence_flush_old(struct drm_device *dev, uint32_t class, +extern void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class, uint32_t sequence); extern int drm_fence_object_flush(struct drm_fence_object * fence, uint32_t type); extern int drm_fence_object_signaled(struct drm_fence_object * fence, @@ -208,7 +208,7 @@ extern void drm_fence_reference_unlocked(struct drm_fence_object **dst, extern int drm_fence_object_wait(struct drm_fence_object * fence, int lazy, int ignore_signals, uint32_t mask); extern int drm_fence_object_create(struct drm_device *dev, uint32_t type, - uint32_t fence_flags, uint32_t class, + uint32_t fence_flags, uint32_t fence_class, struct drm_fence_object ** c_fence); extern int drm_fence_add_user_object(struct drm_file * priv, struct drm_fence_object * fence, int shareable); @@ -474,7 +474,7 @@ extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg * me extern void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo); extern int drm_fence_buffer_objects(struct drm_file * priv, struct list_head *list, - uint32_t fence_flags, + uint32_t fence_class, uint32_t fence_flags, struct drm_fence_object * fence, struct drm_fence_object ** used_fence); extern void drm_bo_add_to_lru(struct drm_buffer_object * bo); diff --git a/linux-core/i915_fence.c b/linux-core/i915_fence.c index 6f0de2ca..89830333 100644 --- a/linux-core/i915_fence.c +++ b/linux-core/i915_fence.c @@ -42,7 +42,7 @@ static void i915_perform_flush(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_class_manager *fc = &fm->class[0]; + struct drm_fence_class_manager *fc = &fm->fence_class[0]; struct drm_fence_driver *driver = dev->driver->fence_driver; uint32_t flush_flags = 0; uint32_t flush_sequence = 0; diff --git a/linux-core/via_fence.c b/linux-core/via_fence.c index a6d4ece9..8d60afa6 100644 --- a/linux-core/via_fence.c +++ b/linux-core/via_fence.c @@ -42,7 +42,7 @@ static uint32_t via_perform_flush(struct drm_device *dev, uint32_t class) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; - struct drm_fence_class_manager *fc = &dev->fm.class[class]; + struct drm_fence_class_manager *fc = &dev->fm.fence_class[class]; uint32_t pending_flush_types = 0; uint32_t signaled_flush_types = 0; uint32_t status; @@ -204,7 +204,7 @@ void via_fence_timer(unsigned long data) drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; struct drm_fence_manager *fm = &dev->fm; uint32_t pending_flush; - struct drm_fence_class_manager *fc = &dev->fm.class[0]; + struct drm_fence_class_manager *fc = &dev->fm.fence_class[0]; if (!dev_priv) return; diff --git a/linux-core/xgi_fence.c b/linux-core/xgi_fence.c index adedf300..22e1dced 100644 --- a/linux-core/xgi_fence.c +++ b/linux-core/xgi_fence.c @@ -33,7 +33,7 @@ static uint32_t xgi_do_flush(struct drm_device * dev, uint32_t class) { struct xgi_info * info = dev->dev_private; - struct drm_fence_class_manager * fc = &dev->fm.class[class]; + struct drm_fence_class_manager * fc = &dev->fm.fence_class[class]; uint32_t pending_flush_types = 0; uint32_t signaled_flush_types = 0; -- cgit v1.2.3 From 6671ad1917698b6174a1af314b63b3800d75248c Mon Sep 17 00:00:00 2001 From: Alan Hourihane Date: Wed, 26 Sep 2007 15:38:54 +0100 Subject: don't copy back if an error was returned. --- linux-core/drm_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index cedb6d50..8513a28f 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -645,7 +645,7 @@ long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retcode = func(dev, kdata, file_priv); } - if (cmd & IOC_OUT) { + if ((retcode == 0) && cmd & IOC_OUT) { if (copy_to_user((void __user *)arg, kdata, _IOC_SIZE(cmd)) != 0) retcode = -EACCES; -- cgit v1.2.3 From b44925b2a553df6a611db320b553336a946aa1a8 Mon Sep 17 00:00:00 2001 From: Alan Hourihane Date: Wed, 26 Sep 2007 16:18:19 +0100 Subject: Add brackets --- linux-core/drm_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 8513a28f..73598892 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -645,7 +645,7 @@ long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retcode = func(dev, kdata, file_priv); } - if ((retcode == 0) && cmd & IOC_OUT) { + if ((retcode == 0) && (cmd & IOC_OUT)) { if (copy_to_user((void __user *)arg, kdata, _IOC_SIZE(cmd)) != 0) retcode = -EACCES; -- cgit v1.2.3 From 24cdd2f8c494573e1f84a752ae4eccec8890347a Mon Sep 17 00:00:00 2001 From: Keith Packard Date: Wed, 26 Sep 2007 14:25:10 -0700 Subject: Allow parallel module compile --- linux-core/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/Makefile b/linux-core/Makefile index 1cdf3b30..f2519ed5 100644 --- a/linux-core/Makefile +++ b/linux-core/Makefile @@ -163,7 +163,7 @@ endif all: modules modules: includes - make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`pwd` DRMSRCDIR=`pwd` modules + +make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`pwd` DRMSRCDIR=`pwd` modules ifeq ($(HEADERFROMBOOT),1) -- cgit v1.2.3 From 215eab6ccfb6d3a22218f996c8215a7dcaf65d01 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Thu, 27 Sep 2007 08:01:58 +0200 Subject: Don't build without any optimization on Linux. Building without optimization causes the drm module not to link correctly on ppc. --- linux-core/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/Makefile b/linux-core/Makefile index f2519ed5..6eb5bf5c 100644 --- a/linux-core/Makefile +++ b/linux-core/Makefile @@ -269,7 +269,7 @@ PAGE_AGP := $(shell cat $(LINUXDIR)/include/asm/agp.h 2>/dev/null | \ ifneq ($(PAGE_AGP),0) EXTRA_CFLAGS += -DHAVE_PAGE_AGP endif -EXTRA_CFLAGS += -g -O0 +EXTRA_CFLAGS += -g # Start with all modules turned off. CONFIG_DRM_GAMMA := n -- cgit v1.2.3 From 205403aea8213ffc0e36f4103d78d62bf1584a69 Mon Sep 17 00:00:00 2001 From: Pekka Paalanen Date: Sun, 30 Sep 2007 21:10:06 +0300 Subject: nouveau: nv30 graph function renames, removed nv20_graph.c All nv30 functions in nv30_graph.c that can be used on nv20 are renamed as accordingly. nv20 specific parts from nv20_graph.c are moved into nv30_graph.c. --- linux-core/Makefile.kernel | 2 +- linux-core/nv20_graph.c | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) delete mode 120000 linux-core/nv20_graph.c (limited to 'linux-core') diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index b282bd05..6a06d867 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -27,7 +27,7 @@ nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ nv04_mc.o nv40_mc.o nv50_mc.o \ nv04_fb.o nv10_fb.o nv40_fb.o \ nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \ - nv04_graph.o nv10_graph.o nv20_graph.o nv30_graph.o \ + nv04_graph.o nv10_graph.o nv30_graph.o \ nv40_graph.o nv50_graph.o \ nv04_instmem.o nv50_instmem.o radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o diff --git a/linux-core/nv20_graph.c b/linux-core/nv20_graph.c deleted file mode 120000 index 73049914..00000000 --- a/linux-core/nv20_graph.c +++ /dev/null @@ -1 +0,0 @@ -../shared-core/nv20_graph.c \ No newline at end of file -- cgit v1.2.3 From aa135ba8e86d43a738973a25d638b7dc4cdddc55 Mon Sep 17 00:00:00 2001 From: Pekka Paalanen Date: Sun, 30 Sep 2007 22:04:53 +0300 Subject: nouveau: rename nv30_graph.c to nv20_graph.c --- linux-core/Makefile.kernel | 2 +- linux-core/nv20_graph.c | 1 + linux-core/nv30_graph.c | 1 - 3 files changed, 2 insertions(+), 2 deletions(-) create mode 120000 linux-core/nv20_graph.c delete mode 120000 linux-core/nv30_graph.c (limited to 'linux-core') diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 6a06d867..0eb10783 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -27,7 +27,7 @@ nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ nv04_mc.o nv40_mc.o nv50_mc.o \ nv04_fb.o nv10_fb.o nv40_fb.o \ nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \ - nv04_graph.o nv10_graph.o nv30_graph.o \ + nv04_graph.o nv10_graph.o nv20_graph.o \ nv40_graph.o nv50_graph.o \ nv04_instmem.o nv50_instmem.o radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o diff --git a/linux-core/nv20_graph.c b/linux-core/nv20_graph.c new file mode 120000 index 00000000..73049914 --- /dev/null +++ b/linux-core/nv20_graph.c @@ -0,0 +1 @@ +../shared-core/nv20_graph.c \ No newline at end of file diff --git a/linux-core/nv30_graph.c b/linux-core/nv30_graph.c deleted file mode 120000 index 25568ecb..00000000 --- a/linux-core/nv30_graph.c +++ /dev/null @@ -1 +0,0 @@ -../shared-core/nv30_graph.c \ No newline at end of file -- cgit v1.2.3 From b0473699ed7bef4efd0742e0a350d345a7cc9a0c Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 2 Oct 2007 15:48:28 +1000 Subject: ttm: returning into dummy causes a buffer object leak as nobody ever derefs dummy, however not returning does the deref correctly. --- linux-core/drm_bo.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index bcbcc662..4e735770 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1767,7 +1767,6 @@ int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr struct drm_bo_op_arg *arg = data; struct drm_bo_op_req *req = &arg->d.req; struct drm_bo_info_rep rep; - struct drm_buffer_object *dummy; unsigned long next = 0; void __user *curuserarg = NULL; int ret; @@ -1804,7 +1803,7 @@ int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr req->bo_req.flags, req->bo_req.mask, req->bo_req.hint, - &rep, &dummy); + &rep, NULL); break; case drm_bo_fence: ret = -EINVAL; -- cgit v1.2.3 From 7f99fd5d7aa1f0d2463907d9d8c483b6249ac831 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Wed, 3 Oct 2007 14:08:18 -0700 Subject: First round of byte-ordering fixes for PowerPC. This isn't 100% as command submission via PCI-e GART buffers doesn't work. I've hacked around that for the time being. This is essentially the code that was used at the POWER.org event to show Bimini. --- linux-core/xgi_cmdlist.c | 46 +++++++++++++++++++++++++++++++++++----------- linux-core/xgi_drv.c | 6 +++--- linux-core/xgi_drv.h | 4 ++-- linux-core/xgi_fence.c | 4 ++-- linux-core/xgi_misc.c | 24 ++++++++++++------------ linux-core/xgi_pcie.c | 3 ++- 6 files changed, 56 insertions(+), 31 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 261f4e13..35f7e1bd 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -45,7 +45,7 @@ static inline void dwWriteReg(struct drm_map * map, u32 addr, u32 data) DRM_INFO("mmio_map->handle = 0x%p, addr = 0x%x, data = 0x%x\n", map->handle, addr, data); #endif - DRM_WRITE32(map, addr, data); + DRM_WRITE32(map, addr, cpu_to_le32(data)); } @@ -98,6 +98,25 @@ int xgi_submit_cmdlist(struct drm_device * dev, void * data, const struct xgi_cmd_info *const pCmdInfo = (struct xgi_cmd_info *) data; const unsigned int cmd = get_batch_command(pCmdInfo->type); +#if __BIG_ENDIAN + const u32 *const ptr = xgi_find_pcie_virt(info, pCmdInfo->hw_addr); + unsigned i; + unsigned j; + + xgi_waitfor_pci_idle(info); + for (j = 4; j < pCmdInfo->size; j += 4) { + u32 reg = ptr[j]; + + for (i = 1; i < 4; i++) { + if ((reg & 1) != 0) { + const unsigned r = 0x2100 | (reg & 0x0fe); + DRM_WRITE32(info->mmio_map, r, ptr[j + i]); + } + + reg >>= 8; + } + } +#else u32 begin[4]; @@ -138,16 +157,17 @@ int xgi_submit_cmdlist(struct drm_device * dev, void * data, xgi_emit_flush(info, FALSE); } - info->cmdring.last_ptr[1] = begin[1]; - info->cmdring.last_ptr[2] = begin[2]; - info->cmdring.last_ptr[3] = begin[3]; + info->cmdring.last_ptr[1] = cpu_to_le32(begin[1]); + info->cmdring.last_ptr[2] = cpu_to_le32(begin[2]); + info->cmdring.last_ptr[3] = cpu_to_le32(begin[3]); DRM_WRITEMEMORYBARRIER(); - info->cmdring.last_ptr[0] = begin[0]; + info->cmdring.last_ptr[0] = cpu_to_le32(begin[0]); triggerHWCommandList(info); } info->cmdring.last_ptr = xgi_find_pcie_virt(info, pCmdInfo->hw_addr); +#endif drm_fence_flush_old(info->dev, 0, info->next_sequence); return 0; } @@ -258,6 +278,8 @@ void xgi_emit_flush(struct xgi_info * info, bool stop) const unsigned int flush_size = sizeof(flush_command); u32 *batch_addr; u32 hw_addr; + unsigned int i; + /* check buf is large enough to contain a new flush batch */ if ((info->cmdring.ring_offset + flush_size) >= info->cmdring.size) { @@ -269,18 +291,20 @@ void xgi_emit_flush(struct xgi_info * info, bool stop) batch_addr = info->cmdring.ptr + (info->cmdring.ring_offset / 4); - (void) memcpy(batch_addr, flush_command, flush_size); + for (i = 0; i < (flush_size / 4); i++) { + batch_addr[i] = cpu_to_le32(flush_command[i]); + } if (stop) { - *batch_addr |= BEGIN_STOP_STORE_CURRENT_POINTER_MASK; + *batch_addr |= cpu_to_le32(BEGIN_STOP_STORE_CURRENT_POINTER_MASK); } - info->cmdring.last_ptr[1] = BEGIN_LINK_ENABLE_MASK | (flush_size / 4); - info->cmdring.last_ptr[2] = hw_addr >> 4; + info->cmdring.last_ptr[1] = cpu_to_le32(BEGIN_LINK_ENABLE_MASK | (flush_size / 4)); + info->cmdring.last_ptr[2] = cpu_to_le32(hw_addr >> 4); info->cmdring.last_ptr[3] = 0; DRM_WRITEMEMORYBARRIER(); - info->cmdring.last_ptr[0] = (get_batch_command(BTYPE_CTRL) << 24) - | (BEGIN_VALID_MASK); + info->cmdring.last_ptr[0] = cpu_to_le32((get_batch_command(BTYPE_CTRL) << 24) + | (BEGIN_VALID_MASK)); triggerHWCommandList(info); diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index bc6873a9..4e66197e 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -351,9 +351,9 @@ irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; struct xgi_info *info = dev->dev_private; - const u32 irq_bits = DRM_READ32(info->mmio_map, + const u32 irq_bits = le32_to_cpu(DRM_READ32(info->mmio_map, (0x2800 - + M2REG_AUTO_LINK_STATUS_ADDRESS)) + + M2REG_AUTO_LINK_STATUS_ADDRESS))) & (M2REG_ACTIVE_TIMER_INTERRUPT_MASK | M2REG_ACTIVE_INTERRUPT_0_MASK | M2REG_ACTIVE_INTERRUPT_2_MASK @@ -363,7 +363,7 @@ irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS) if (irq_bits != 0) { DRM_WRITE32(info->mmio_map, 0x2800 + M2REG_AUTO_LINK_SETTING_ADDRESS, - M2REG_AUTO_LINK_SETTING_COMMAND | irq_bits); + cpu_to_le32(M2REG_AUTO_LINK_SETTING_COMMAND | irq_bits)); xgi_fence_handler(dev); return IRQ_HANDLED; } else { diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index a68dc03b..d9a94f5f 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -35,11 +35,11 @@ #define DRIVER_NAME "xgi" #define DRIVER_DESC "XGI XP5 / XP10 / XG47" -#define DRIVER_DATE "20070918" +#define DRIVER_DATE "20071003" #define DRIVER_MAJOR 1 #define DRIVER_MINOR 1 -#define DRIVER_PATCHLEVEL 0 +#define DRIVER_PATCHLEVEL 3 #include "xgi_cmdlist.h" #include "xgi_drm.h" diff --git a/linux-core/xgi_fence.c b/linux-core/xgi_fence.c index 22e1dced..a98a8422 100644 --- a/linux-core/xgi_fence.c +++ b/linux-core/xgi_fence.c @@ -48,8 +48,8 @@ static uint32_t xgi_do_flush(struct drm_device * dev, uint32_t class) if (pending_flush_types) { if (pending_flush_types & DRM_FENCE_TYPE_EXE) { - const u32 begin_id = DRM_READ32(info->mmio_map, - 0x2820) + const u32 begin_id = le32_to_cpu(DRM_READ32(info->mmio_map, + 0x2820)) & BEGIN_BEGIN_IDENTIFICATION_MASK; if (begin_id != info->complete_sequence) { diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 50a721c0..f39b3bb5 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -38,12 +38,12 @@ static unsigned int s_invalid_begin = 0; static bool xgi_validate_signal(struct drm_map * map) { - if (DRM_READ32(map, 0x2800) & 0x001c0000) { + if (le32_to_cpu(DRM_READ32(map, 0x2800) & 0x001c0000)) { u16 check; /* Check Read back status */ DRM_WRITE8(map, 0x235c, 0x80); - check = DRM_READ16(map, 0x2360); + check = le16_to_cpu(DRM_READ16(map, 0x2360)); if ((check & 0x3f) != ((check & 0x3f00) >> 8)) { return FALSE; @@ -51,28 +51,28 @@ static bool xgi_validate_signal(struct drm_map * map) /* Check RO channel */ DRM_WRITE8(map, 0x235c, 0x83); - check = DRM_READ16(map, 0x2360); + check = le16_to_cpu(DRM_READ16(map, 0x2360)); if ((check & 0x0f) != ((check & 0xf0) >> 4)) { return FALSE; } /* Check RW channel */ DRM_WRITE8(map, 0x235c, 0x88); - check = DRM_READ16(map, 0x2360); + check = le16_to_cpu(DRM_READ16(map, 0x2360)); if ((check & 0x0f) != ((check & 0xf0) >> 4)) { return FALSE; } /* Check RO channel outstanding */ DRM_WRITE8(map, 0x235c, 0x8f); - check = DRM_READ16(map, 0x2360); + check = le16_to_cpu(DRM_READ16(map, 0x2360)); if (0 != (check & 0x3ff)) { return FALSE; } /* Check RW channel outstanding */ DRM_WRITE8(map, 0x235c, 0x90); - check = DRM_READ16(map, 0x2360); + check = le16_to_cpu(DRM_READ16(map, 0x2360)); if (0 != (check & 0x3ff)) { return FALSE; } @@ -89,7 +89,7 @@ static void xgi_ge_hang_reset(struct drm_map * map) int time_out = 0xffff; DRM_WRITE8(map, 0xb057, 8); - while (0 != (DRM_READ32(map, 0x2800) & 0xf0000000)) { + while (0 != le32_to_cpu(DRM_READ32(map, 0x2800) & 0xf0000000)) { while (0 != ((--time_out) & 0xfff)) /* empty */ ; @@ -100,7 +100,7 @@ static void xgi_ge_hang_reset(struct drm_map * map) u8 old_36; DRM_INFO("Can not reset back 0x%x!\n", - DRM_READ32(map, 0x2800)); + le32_to_cpu(DRM_READ32(map, 0x2800))); DRM_WRITE8(map, 0xb057, 0); @@ -137,7 +137,7 @@ static void xgi_ge_hang_reset(struct drm_map * map) bool xgi_ge_irq_handler(struct xgi_info * info) { - const u32 int_status = DRM_READ32(info->mmio_map, 0x2810); + const u32 int_status = le32_to_cpu(DRM_READ32(info->mmio_map, 0x2810)); bool is_support_auto_reset = FALSE; /* Check GE on/off */ @@ -146,7 +146,7 @@ bool xgi_ge_irq_handler(struct xgi_info * info) /* We got GE stall interrupt. */ DRM_WRITE32(info->mmio_map, 0x2810, - int_status | 0x04000000); + cpu_to_le32(int_status | 0x04000000)); if (is_support_auto_reset) { static cycles_t last_tick; @@ -176,7 +176,7 @@ bool xgi_ge_irq_handler(struct xgi_info * info) } else if (0 != (0x1 & int_status)) { s_invalid_begin++; DRM_WRITE32(info->mmio_map, 0x2810, - (int_status & ~0x01) | 0x04000000); + cpu_to_le32((int_status & ~0x01) | 0x04000000)); } return TRUE; @@ -326,7 +326,7 @@ void xgi_waitfor_pci_idle(struct xgi_info * info) unsigned int same_count = 0; while (idleCount < 5) { - const u32 status = DRM_READ32(info->mmio_map, WHOLD_GE_STATUS) + const u32 status = le32_to_cpu(DRM_READ32(info->mmio_map, WHOLD_GE_STATUS)) & IDLE_MASK; if (status == old_status) { diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index a7d3ea24..4becf35b 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -40,7 +40,8 @@ void xgi_gart_flush(struct drm_device *dev) DRM_WRITE8(info->mmio_map, 0xB00C, temp & ~0x02); /* Set GART base address to HW */ - DRM_WRITE32(info->mmio_map, 0xB034, info->gart_info.bus_addr); + DRM_WRITE32(info->mmio_map, 0xB034, + cpu_to_le32(info->gart_info.bus_addr)); /* Flush GART table. */ DRM_WRITE8(info->mmio_map, 0xB03F, 0x40); -- cgit v1.2.3 From 0379919e99542bc50cf9d0a8a3996b2896ec4e64 Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Wed, 3 Oct 2007 14:12:16 -0700 Subject: Use 'ifdef __BIG_ENDIAN' instead of 'if __BIG_ENDIAN' --- linux-core/xgi_cmdlist.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 35f7e1bd..c25b0e0d 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -98,7 +98,7 @@ int xgi_submit_cmdlist(struct drm_device * dev, void * data, const struct xgi_cmd_info *const pCmdInfo = (struct xgi_cmd_info *) data; const unsigned int cmd = get_batch_command(pCmdInfo->type); -#if __BIG_ENDIAN +#ifdef __BIG_ENDIAN const u32 *const ptr = xgi_find_pcie_virt(info, pCmdInfo->hw_addr); unsigned i; unsigned j; -- cgit v1.2.3 From 495bbbaadc93c574eb98dd2ad64bdca4d91d4152 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Thu, 4 Oct 2007 16:13:22 +1000 Subject: drm: fix page count calculation Also no need to do pre-populate work on single page --- linux-core/drm_bo_move.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index dae99181..2a35d45b 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -488,16 +488,6 @@ static int drm_bo_kmap_ttm(struct drm_buffer_object *bo, unsigned long start_pag BUG_ON(!ttm); - /* - * Populate the part we're mapping; - */ - - for (i=start_page; i< num_pages; ++i) { - d = drm_ttm_get_page(ttm, i); - if (!d) - return -ENOMEM; - } - if (num_pages == 1 && (mem->flags & DRM_BO_FLAG_CACHED)) { /* @@ -509,6 +499,15 @@ static int drm_bo_kmap_ttm(struct drm_buffer_object *bo, unsigned long start_pag map->page = drm_ttm_get_page(ttm, start_page); map->virtual = kmap(map->page); } else { + /* + * Populate the part we're mapping; + */ + + for (i = start_page; i< start_page + num_pages; ++i) { + d = drm_ttm_get_page(ttm, i); + if (!d) + return -ENOMEM; + } /* * We need to use vmap to get the desired page protection -- cgit v1.2.3 From 5ca12104f8a3eebecae6d238c1c456c8e6540ae3 Mon Sep 17 00:00:00 2001 From: Maarten Maathuis Date: Tue, 2 Oct 2007 21:54:37 +0200 Subject: linux-drm: Obey device class requirements when detecting devices. --- linux-core/drm_drv.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'linux-core') diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 73598892..a09fa96e 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -321,6 +321,11 @@ int drm_init(struct drm_driver *driver, while ((pdev = pci_get_subsys(pid->vendor, pid->device, pid->subvendor, pid->subdevice, pdev))) { + /* Are there device class requirements? */ + if ((pid->class != 0) + && ((pdev->class & pid->class_mask) != pid->class)) { + continue; + } /* is there already a driver loaded, or (short circuit saves work) */ /* does something like VesaFB have control of the memory region? */ if (pci_dev_driver(pdev) @@ -347,6 +352,11 @@ int drm_init(struct drm_driver *driver, pci_get_subsys(pid->vendor, pid->device, pid->subvendor, pid->subdevice, pdev))) { + /* Are there device class requirements? */ + if ((pid->class != 0) + && ((pdev->class & pid->class_mask) != pid->class)) { + continue; + } /* stealth mode requires a manual probe */ pci_dev_get(pdev); if ((rc = drm_get_dev(pdev, &pciidlist[i], driver))) { -- cgit v1.2.3 From b510517d59efcb45cc7079743be967bee122b251 Mon Sep 17 00:00:00 2001 From: Maarten Maathuis Date: Thu, 4 Oct 2007 09:31:46 +0200 Subject: nouveau: Switch over to using PMC_BOOT_0 for card detection. --- linux-core/nouveau_drv.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/nouveau_drv.c b/linux-core/nouveau_drv.c index 6c73b0d3..01de67de 100644 --- a/linux-core/nouveau_drv.c +++ b/linux-core/nouveau_drv.c @@ -29,7 +29,16 @@ #include "drm_pciids.h" static struct pci_device_id pciidlist[] = { - nouveau_PCI_IDS + { + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID), + .class = PCI_BASE_CLASS_DISPLAY << 16, + .class_mask = 0xff << 16, + }, + { + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA_SGS, PCI_ANY_ID), + .class = PCI_BASE_CLASS_DISPLAY << 16, + .class_mask = 0xff << 16, + } }; extern struct drm_ioctl_desc nouveau_ioctls[]; -- cgit v1.2.3 From d4680333dc850832258d0f38fb2a236a3f568fc8 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 10 Oct 2007 09:31:51 +0200 Subject: Only add native-type on EXE signals. Otherwise flush flags may get out of sync. --- linux-core/drm_fence.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index c25ff3b8..9a29356b 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -90,7 +90,9 @@ void drm_fence_handler(struct drm_device * dev, uint32_t fence_class, break; } - type |= fence->native_type; + if (is_exe) + type |= fence->native_type; + relevant = type & fence->type; if ((fence->signaled | relevant) != fence->signaled) { -- cgit v1.2.3 From 83da774b192966b8c3f00b531ecfd4ec2b5eceaa Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Wed, 10 Oct 2007 15:25:30 -0700 Subject: Fix command list submission on big-endian. --- linux-core/xgi_cmdlist.c | 30 +++++------------------------- linux-core/xgi_misc.c | 2 +- linux-core/xgi_pcie.c | 3 +-- 3 files changed, 7 insertions(+), 28 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index c25b0e0d..69bf6465 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -45,7 +45,7 @@ static inline void dwWriteReg(struct drm_map * map, u32 addr, u32 data) DRM_INFO("mmio_map->handle = 0x%p, addr = 0x%x, data = 0x%x\n", map->handle, addr, data); #endif - DRM_WRITE32(map, addr, cpu_to_le32(data)); + DRM_WRITE32(map, addr, data); } @@ -98,25 +98,6 @@ int xgi_submit_cmdlist(struct drm_device * dev, void * data, const struct xgi_cmd_info *const pCmdInfo = (struct xgi_cmd_info *) data; const unsigned int cmd = get_batch_command(pCmdInfo->type); -#ifdef __BIG_ENDIAN - const u32 *const ptr = xgi_find_pcie_virt(info, pCmdInfo->hw_addr); - unsigned i; - unsigned j; - - xgi_waitfor_pci_idle(info); - for (j = 4; j < pCmdInfo->size; j += 4) { - u32 reg = ptr[j]; - - for (i = 1; i < 4; i++) { - if ((reg & 1) != 0) { - const unsigned r = 0x2100 | (reg & 0x0fe); - DRM_WRITE32(info->mmio_map, r, ptr[j + i]); - } - - reg >>= 8; - } - } -#else u32 begin[4]; @@ -167,7 +148,6 @@ int xgi_submit_cmdlist(struct drm_device * dev, void * data, } info->cmdring.last_ptr = xgi_find_pcie_virt(info, pCmdInfo->hw_addr); -#endif drm_fence_flush_old(info->dev, 0, info->next_sequence); return 0; } @@ -323,13 +303,13 @@ void xgi_emit_flush(struct xgi_info * info, bool stop) */ void xgi_emit_nop(struct xgi_info * info) { - info->cmdring.last_ptr[1] = BEGIN_LINK_ENABLE_MASK - | (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence); + info->cmdring.last_ptr[1] = cpu_to_le32(BEGIN_LINK_ENABLE_MASK + | (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence)); info->cmdring.last_ptr[2] = 0; info->cmdring.last_ptr[3] = 0; DRM_WRITEMEMORYBARRIER(); - info->cmdring.last_ptr[0] = (get_batch_command(BTYPE_CTRL) << 24) - | (BEGIN_VALID_MASK); + info->cmdring.last_ptr[0] = cpu_to_le32((get_batch_command(BTYPE_CTRL) << 24) + | (BEGIN_VALID_MASK)); triggerHWCommandList(info); diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index f39b3bb5..4a4a9844 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -326,7 +326,7 @@ void xgi_waitfor_pci_idle(struct xgi_info * info) unsigned int same_count = 0; while (idleCount < 5) { - const u32 status = le32_to_cpu(DRM_READ32(info->mmio_map, WHOLD_GE_STATUS)) + const u32 status = DRM_READ32(info->mmio_map, WHOLD_GE_STATUS) & IDLE_MASK; if (status == old_status) { diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 4becf35b..a7d3ea24 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -40,8 +40,7 @@ void xgi_gart_flush(struct drm_device *dev) DRM_WRITE8(info->mmio_map, 0xB00C, temp & ~0x02); /* Set GART base address to HW */ - DRM_WRITE32(info->mmio_map, 0xB034, - cpu_to_le32(info->gart_info.bus_addr)); + DRM_WRITE32(info->mmio_map, 0xB034, info->gart_info.bus_addr); /* Flush GART table. */ DRM_WRITE8(info->mmio_map, 0xB03F, 0x40); -- cgit v1.2.3 From fc7d4d19d36b6a12ed23d4d9e50826346258299f Mon Sep 17 00:00:00 2001 From: Ian Romanick Date: Wed, 10 Oct 2007 15:27:07 -0700 Subject: Eliminate trailing whitespace from last commit. --- linux-core/xgi_cmdlist.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'linux-core') diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 69bf6465..d7b23c89 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -303,12 +303,12 @@ void xgi_emit_flush(struct xgi_info * info, bool stop) */ void xgi_emit_nop(struct xgi_info * info) { - info->cmdring.last_ptr[1] = cpu_to_le32(BEGIN_LINK_ENABLE_MASK + info->cmdring.last_ptr[1] = cpu_to_le32(BEGIN_LINK_ENABLE_MASK | (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence)); info->cmdring.last_ptr[2] = 0; info->cmdring.last_ptr[3] = 0; DRM_WRITEMEMORYBARRIER(); - info->cmdring.last_ptr[0] = cpu_to_le32((get_batch_command(BTYPE_CTRL) << 24) + info->cmdring.last_ptr[0] = cpu_to_le32((get_batch_command(BTYPE_CTRL) << 24) | (BEGIN_VALID_MASK)); triggerHWCommandList(info); -- cgit v1.2.3 From 604f02ff619d87d1372bcb7969c826d981fefc60 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Fri, 12 Oct 2007 09:46:11 +1000 Subject: i915: check mask instead of flags for buffer fence types --- linux-core/i915_buffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index 75763e71..f3ba7ce5 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -42,7 +42,7 @@ int i915_fence_types(struct drm_buffer_object *bo, uint32_t * fclass, uint32_t * type) { - if (bo->mem.flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) + if (bo->mem.mask & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) *type = 3; else *type = 1; -- cgit v1.2.3 From 9d779e2c88a02f5f9d57618145654610f0f10e28 Mon Sep 17 00:00:00 2001 From: Arthur Huillet Date: Fri, 12 Oct 2007 22:39:58 +0200 Subject: nouveau: mandatory "oops I forgot half of the files" commit --- linux-core/Makefile.kernel | 2 +- linux-core/nouveau_swmthd.c | 1 + linux-core/nouveau_swmthd.h | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) create mode 120000 linux-core/nouveau_swmthd.c create mode 120000 linux-core/nouveau_swmthd.h (limited to 'linux-core') diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 0eb10783..715454bc 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -21,7 +21,7 @@ i810-objs := i810_drv.o i810_dma.o i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \ i915_buffer.o nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ - nouveau_object.o nouveau_irq.o nouveau_notifier.o \ + nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \ nouveau_sgdma.o nouveau_dma.o \ nv04_timer.o \ nv04_mc.o nv40_mc.o nv50_mc.o \ diff --git a/linux-core/nouveau_swmthd.c b/linux-core/nouveau_swmthd.c new file mode 120000 index 00000000..c5390801 --- /dev/null +++ b/linux-core/nouveau_swmthd.c @@ -0,0 +1 @@ +../shared-core/nouveau_swmthd.c \ No newline at end of file diff --git a/linux-core/nouveau_swmthd.h b/linux-core/nouveau_swmthd.h new file mode 120000 index 00000000..33425dcd --- /dev/null +++ b/linux-core/nouveau_swmthd.h @@ -0,0 +1 @@ +../shared-core/nouveau_swmthd.h \ No newline at end of file -- cgit v1.2.3 From 440fc5113ef1ffb1a22bff92cf34eaf23896db8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=5Butf-8=5D=20Kristian=20H=C3=B8gsberg?= Date: Tue, 9 Oct 2007 21:09:29 -0400 Subject: Eliminate support for fake buffers. --- linux-core/drm_bo.c | 68 ++--------------------------------------------------- 1 file changed, 2 insertions(+), 66 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 4e735770..7dd9856d 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -148,7 +148,6 @@ static int drm_bo_add_ttm(struct drm_buffer_object * bo) ret = -ENOMEM; break; case drm_bo_type_user: - case drm_bo_type_fake: break; default: DRM_ERROR("Illegal buffer object type\n"); @@ -695,12 +694,6 @@ static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type, evict_mem = bo->mem; evict_mem.mm_node = NULL; - if (bo->type == drm_bo_type_fake) { - bo->mem.mem_type = DRM_BO_MEM_LOCAL; - bo->mem.mm_node = NULL; - goto out1; - } - evict_mem = bo->mem; evict_mem.mask = dev->driver->bo_driver->evict_mask(bo); ret = drm_bo_mem_space(bo, &evict_mem, no_wait); @@ -720,7 +713,6 @@ static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type, goto out; } - out1: mutex_lock(&dev->struct_mutex); if (evict_mem.mm_node) { if (evict_mem.mm_node != bo->pinned_node) @@ -1355,44 +1347,6 @@ static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem) return 1; } -static int drm_bo_check_fake(struct drm_device * dev, struct drm_bo_mem_reg * mem) -{ - struct drm_buffer_manager *bm = &dev->bm; - struct drm_mem_type_manager *man; - uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio; - const uint32_t *prios = dev->driver->bo_driver->mem_type_prio; - uint32_t i; - int type_ok = 0; - uint32_t mem_type = 0; - uint32_t cur_flags; - - if (drm_bo_mem_compat(mem)) - return 0; - - BUG_ON(mem->mm_node); - - for (i = 0; i < num_prios; ++i) { - mem_type = prios[i]; - man = &bm->man[mem_type]; - type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask, - &cur_flags); - if (type_ok) - break; - } - - if (type_ok) { - mem->mm_node = NULL; - mem->mem_type = mem_type; - mem->flags = cur_flags; - DRM_FLAG_MASKED(mem->flags, mem->mask, ~DRM_BO_MASK_MEMTYPE); - return 0; - } - - DRM_ERROR("Illegal fake buffer flags 0x%016llx\n", - (unsigned long long) mem->mask); - return -EINVAL; -} - /* * bo locked. */ @@ -1449,11 +1403,6 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo, DRM_ERROR("Timed out waiting for buffer unmap.\n"); return ret; } - if (bo->type == drm_bo_type_fake) { - ret = drm_bo_check_fake(dev, &bo->mem); - if (ret) - return ret; - } /* * Check whether we need to move buffer. @@ -1642,7 +1591,7 @@ int drm_buffer_object_create(struct drm_device *dev, int ret = 0; unsigned long num_pages; - if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) { + if (buffer_start & ~PAGE_MASK) { DRM_ERROR("Invalid buffer object start.\n"); return -EINVAL; } @@ -1677,12 +1626,7 @@ int drm_buffer_object_create(struct drm_device *dev, bo->mem.num_pages = bo->num_pages; bo->mem.mm_node = NULL; bo->mem.page_alignment = page_alignment; - if (bo->type == drm_bo_type_fake) { - bo->offset = buffer_start; - bo->buffer_start = 0; - } else { - bo->buffer_start = buffer_start; - } + bo->buffer_start = buffer_start; bo->priv_flags = 0; bo->mem.flags = 0ULL; bo->mem.mask = 0ULL; @@ -1707,12 +1651,6 @@ int drm_buffer_object_create(struct drm_device *dev, goto out_err; } - if (bo->type == drm_bo_type_fake) { - ret = drm_bo_check_fake(dev, &bo->mem); - if (ret) - goto out_err; - } - ret = drm_bo_add_ttm(bo); if (ret) goto out_err; @@ -1852,8 +1790,6 @@ int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fil DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; } - if (req->type == drm_bo_type_fake) - LOCK_TEST_WITH_RETURN(dev, file_priv); ret = drm_buffer_object_create(file_priv->head->dev, req->size, req->type, req->mask, -- cgit v1.2.3 From dccefba71a65566e7e1628b3be67621866000411 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kristian=20H=C3=B8gsberg?= Date: Tue, 9 Oct 2007 21:09:30 -0400 Subject: Take bo type argument out of the ioctl interface. The buffer object type is still tracked internally, but it is no longer part of the user space visible ioctl interface. If the bo create ioctl specifies a non-NULL buffer address we assume drm_bo_type_user, otherwise drm_bo_type_dc. Kernel side allocations call drm_buffer_object_create() directly and can still specify drm_bo_type_kernel. Not 100% this makes sense either, but with this patch, the buffer type is no longer exported and we can clean up the internals later on. --- linux-core/drm_bo.c | 11 +++++++---- linux-core/drm_objects.h | 6 ++++++ 2 files changed, 13 insertions(+), 4 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 7dd9856d..e2f460ed 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1620,7 +1620,10 @@ int drm_buffer_object_create(struct drm_device *dev, INIT_LIST_HEAD(&bo->vma_list); #endif bo->dev = dev; - bo->type = type; + if (buffer_start != 0) + bo->type = drm_bo_type_user; + else + bo->type = type; bo->num_pages = num_pages; bo->mem.mem_type = DRM_BO_MEM_LOCAL; bo->mem.num_pages = bo->num_pages; @@ -1783,8 +1786,8 @@ int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fil struct drm_buffer_object *entry; int ret = 0; - DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align, %d type\n", - (int)(req->size / 1024), req->page_alignment * 4, req->type); + DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n", + (int)(req->size / 1024), req->page_alignment * 4); if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); @@ -1792,7 +1795,7 @@ int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fil } ret = drm_buffer_object_create(file_priv->head->dev, - req->size, req->type, req->mask, + req->size, drm_bo_type_dc, req->mask, req->hint, req->page_alignment, req->buffer_start, &entry); if (ret) diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 9748baae..b58db57f 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -350,6 +350,12 @@ struct drm_bo_mem_reg { uint32_t hw_tile_stride; }; +enum drm_bo_type { + drm_bo_type_dc, + drm_bo_type_user, + drm_bo_type_kernel, /* for initial kernel allocations */ +}; + struct drm_buffer_object { struct drm_device *dev; struct drm_user_object base; -- cgit v1.2.3 From a69c85fec8ed323bffb1324ea08157b3897e97db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kristian=20H=C3=B8gsberg?= Date: Tue, 9 Oct 2007 21:09:31 -0400 Subject: Drop destroy ioctls for fences and buffer objects. We now always create a drm_ref_object for user objects and this is then the only things that holds a reference to the user object. This way unreference on will destroy the user object when the last drm_ref_object goes way. --- linux-core/drmP.h | 1 - linux-core/drm_bo.c | 31 ++----------------------------- linux-core/drm_drv.c | 2 -- linux-core/drm_fence.c | 28 +--------------------------- linux-core/drm_fops.c | 13 ------------- linux-core/drm_object.c | 27 +++++++-------------------- linux-core/drm_objects.h | 12 ------------ 7 files changed, 10 insertions(+), 104 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drmP.h b/linux-core/drmP.h index f8ca3f4b..d0ab2c94 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -428,7 +428,6 @@ struct drm_file { */ struct list_head refd_objects; - struct list_head user_objects; struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES]; struct file *filp; diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index e2f460ed..fb360e7f 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1674,8 +1674,8 @@ int drm_buffer_object_create(struct drm_device *dev, } EXPORT_SYMBOL(drm_buffer_object_create); -int drm_bo_add_user_object(struct drm_file *file_priv, - struct drm_buffer_object *bo, int shareable) +static int drm_bo_add_user_object(struct drm_file *file_priv, + struct drm_buffer_object *bo, int shareable) { struct drm_device *dev = file_priv->head->dev; int ret; @@ -1694,7 +1694,6 @@ int drm_bo_add_user_object(struct drm_file *file_priv, mutex_unlock(&dev->struct_mutex); return ret; } -EXPORT_SYMBOL(drm_bo_add_user_object); static int drm_bo_lock_test(struct drm_device * dev, struct drm_file *file_priv) { @@ -1816,32 +1815,6 @@ out: return ret; } - -int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - struct drm_bo_handle_arg *arg = data; - struct drm_user_object *uo; - int ret = 0; - - DRM_DEBUG("drm_bo_destroy_ioctl: buffer %d\n", arg->handle); - - if (!dev->bm.initialized) { - DRM_ERROR("Buffer object manager is not initialized.\n"); - return -EINVAL; - } - - mutex_lock(&dev->struct_mutex); - uo = drm_lookup_user_object(file_priv, arg->handle); - if (!uo || (uo->type != drm_buffer_type) || uo->owner != file_priv) { - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } - ret = drm_remove_user_object(file_priv, uo); - mutex_unlock(&dev->struct_mutex); - - return ret; -} - int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_bo_map_wait_idle_arg *arg = data; diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index a09fa96e..0fca3a27 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -129,7 +129,6 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_FENCE_DESTROY, drm_fence_destroy_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_FENCE_REFERENCE, drm_fence_reference_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_FENCE_UNREFERENCE, drm_fence_unreference_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_FENCE_SIGNALED, drm_fence_signaled_ioctl, DRM_AUTH), @@ -139,7 +138,6 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_FENCE_BUFFERS, drm_fence_buffers_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_BO_CREATE, drm_bo_create_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_BO_DESTROY, drm_bo_destroy_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_BO_MAP, drm_bo_map_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_BO_UNMAP, drm_bo_unmap_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_BO_REFERENCE, drm_bo_reference_ioctl, DRM_AUTH), diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index 9a29356b..d1969f86 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -517,7 +517,7 @@ static int drm_fence_object_init(struct drm_device * dev, uint32_t fence_class, return ret; } -int drm_fence_add_user_object(struct drm_file * priv, struct drm_fence_object * fence, +static int drm_fence_add_user_object(struct drm_file * priv, struct drm_fence_object * fence, int shareable) { struct drm_device *dev = priv->head->dev; @@ -535,7 +535,6 @@ out: mutex_unlock(&dev->struct_mutex); return ret; } -EXPORT_SYMBOL(drm_fence_add_user_object); int drm_fence_object_create(struct drm_device * dev, uint32_t fence_class, uint32_t type, unsigned flags, struct drm_fence_object ** c_fence) @@ -670,31 +669,6 @@ int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file * return ret; } -int drm_fence_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - int ret; - struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_arg *arg = data; - struct drm_user_object *uo; - ret = 0; - - if (!fm->initialized) { - DRM_ERROR("The DRM driver does not support fencing.\n"); - return -EINVAL; - } - - mutex_lock(&dev->struct_mutex); - uo = drm_lookup_user_object(file_priv, arg->handle); - if (!uo || (uo->type != drm_fence_type) || uo->owner != file_priv) { - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } - ret = drm_remove_user_object(file_priv, uo); - mutex_unlock(&dev->struct_mutex); - return ret; -} - - int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { int ret; diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c index ab5f4ca5..0ccaed5b 100644 --- a/linux-core/drm_fops.c +++ b/linux-core/drm_fops.c @@ -263,7 +263,6 @@ static int drm_open_helper(struct inode *inode, struct file *filp, priv->lock_count = 0; INIT_LIST_HEAD(&priv->lhead); - INIT_LIST_HEAD(&priv->user_objects); INIT_LIST_HEAD(&priv->refd_objects); for (i=0; i<_DRM_NO_REF_TYPES; ++i) { @@ -338,7 +337,6 @@ static void drm_object_release(struct file *filp) { struct drm_file *priv = filp->private_data; struct list_head *head; - struct drm_user_object *user_object; struct drm_ref_object *ref_object; int i; @@ -357,17 +355,6 @@ static void drm_object_release(struct file *filp) { head = &priv->refd_objects; } - /* - * Free leftover user objects created by me. - */ - - head = &priv->user_objects; - while (head->next != head) { - user_object = list_entry(head->next, struct drm_user_object, list); - drm_remove_user_object(priv, user_object); - head = &priv->user_objects; - } - for(i=0; i<_DRM_NO_REF_TYPES; ++i) { drm_ht_remove(&priv->refd_object_hash[i]); } diff --git a/linux-core/drm_object.c b/linux-core/drm_object.c index 6bd89b1d..a6d6c0d7 100644 --- a/linux-core/drm_object.c +++ b/linux-core/drm_object.c @@ -38,7 +38,8 @@ int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item, DRM_ASSERT_LOCKED(&dev->struct_mutex); - atomic_set(&item->refcount, 1); + /* The refcount will be bumped to 1 when we add the ref object below. */ + atomic_set(&item->refcount, 0); item->shareable = shareable; item->owner = priv; @@ -47,8 +48,11 @@ int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item, if (ret) return ret; - list_add_tail(&item->list, &priv->user_objects); - return 0; + ret = drm_add_ref_object(priv, item, _DRM_REF_USE); + if (ret) + ret = drm_ht_remove_item(&dev->object_hash, &item->hash); + + return ret; } EXPORT_SYMBOL(drm_add_user_object); @@ -87,27 +91,10 @@ static void drm_deref_user_object(struct drm_file * priv, struct drm_user_object if (atomic_dec_and_test(&item->refcount)) { ret = drm_ht_remove_item(&dev->object_hash, &item->hash); BUG_ON(ret); - list_del_init(&item->list); item->remove(priv, item); } } -int drm_remove_user_object(struct drm_file * priv, struct drm_user_object * item) -{ - DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); - - if (item->owner != priv) { - DRM_ERROR("Cannot destroy object not owned by you.\n"); - return -EINVAL; - } - item->owner = 0; - item->shareable = 0; - list_del_init(&item->list); - drm_deref_user_object(priv, item); - return 0; -} -EXPORT_SYMBOL(drm_remove_user_object); - static int drm_object_ref_action(struct drm_file * priv, struct drm_user_object * ro, enum drm_ref_type action) { diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index b58db57f..67c33745 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -102,15 +102,6 @@ extern int drm_add_user_object(struct drm_file * priv, struct drm_user_object * extern struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t key); -/* - * Must be called with the struct_mutex held. - * If "item" has been obtained by a call to drm_lookup_user_object. You may not - * release the struct_mutex before calling drm_remove_ref_object. - * This function may temporarily release the struct_mutex. - */ - -extern int drm_remove_user_object(struct drm_file * priv, struct drm_user_object * item); - /* * Must be called with the struct_mutex held. May temporarily release it. */ @@ -222,9 +213,6 @@ extern int drm_fence_object_emit(struct drm_fence_object * fence, extern void drm_fence_fill_arg(struct drm_fence_object *fence, struct drm_fence_arg *arg); -extern int drm_fence_add_user_object(struct drm_file * priv, - struct drm_fence_object * fence, int shareable); - extern int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_fence_destroy_ioctl(struct drm_device *dev, void *data, -- cgit v1.2.3 From db1709f2f3f8cab2477fb149b58420de4db65654 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Tue, 16 Oct 2007 15:10:08 +0200 Subject: Revert part of earlier commit that caused an unresolved symbol for i915. --- linux-core/drm_fence.c | 3 ++- linux-core/drm_objects.h | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index d1969f86..e696b42d 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -517,7 +517,7 @@ static int drm_fence_object_init(struct drm_device * dev, uint32_t fence_class, return ret; } -static int drm_fence_add_user_object(struct drm_file * priv, struct drm_fence_object * fence, +int drm_fence_add_user_object(struct drm_file * priv, struct drm_fence_object * fence, int shareable) { struct drm_device *dev = priv->head->dev; @@ -535,6 +535,7 @@ out: mutex_unlock(&dev->struct_mutex); return ret; } +EXPORT_SYMBOL(drm_fence_add_user_object); int drm_fence_object_create(struct drm_device * dev, uint32_t fence_class, uint32_t type, unsigned flags, struct drm_fence_object ** c_fence) diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 67c33745..726ccbe2 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -213,6 +213,9 @@ extern int drm_fence_object_emit(struct drm_fence_object * fence, extern void drm_fence_fill_arg(struct drm_fence_object *fence, struct drm_fence_arg *arg); +extern int drm_fence_add_user_object(struct drm_file * priv, + struct drm_fence_object * fence, int shareable); + extern int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_fence_destroy_ioctl(struct drm_device *dev, void *data, -- cgit v1.2.3 From 12b989a7108a52f16b1b1bb6dd2ea818c235b52c Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 4 Oct 2007 09:51:01 +0200 Subject: Revert "Remove the pinned buffer from the LRU when pinning." This reverts 3a0bc518e35c62bb9c64c9105f836584d949653f commit. --- linux-core/Makefile | 1 - linux-core/drm_bo.c | 12 ++++-------- 2 files changed, 4 insertions(+), 9 deletions(-) (limited to 'linux-core') diff --git a/linux-core/Makefile b/linux-core/Makefile index 6eb5bf5c..7f6b123e 100644 --- a/linux-core/Makefile +++ b/linux-core/Makefile @@ -269,7 +269,6 @@ PAGE_AGP := $(shell cat $(LINUXDIR)/include/asm/agp.h 2>/dev/null | \ ifneq ($(PAGE_AGP),0) EXTRA_CFLAGS += -DHAVE_PAGE_AGP endif -EXTRA_CFLAGS += -g # Start with all modules turned off. CONFIG_DRM_GAMMA := n diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index fb360e7f..099ebe07 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1965,8 +1965,8 @@ drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, return ret; } - /* Validate the buffer into its pinned location, with no - * pending fence. + /* Validate the buffer into its pinned location, with no pending + * fence. */ ret = drm_buffer_object_validate(bo, bo->fence_class, 0, 0); if (ret) { @@ -1974,12 +1974,9 @@ drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, return ret; } - /* Pull the buffer off of the LRU and add it to the pinned - * list - */ + /* Add our buffer to the pinned list */ bo->pinned_mem_type = bo->mem.mem_type; mutex_lock(&dev->struct_mutex); - list_del_init(&bo->lru); list_del_init(&bo->pinned_lru); drm_bo_add_to_pinned_lru(bo); @@ -1989,7 +1986,6 @@ drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, bo->pinned_node = bo->mem.mm_node; } - bo->pinned = pin; mutex_unlock(&dev->struct_mutex); } else { @@ -2001,9 +1997,9 @@ drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, list_del_init(&bo->pinned_lru); bo->pinned_node = NULL; - bo->pinned = pin; mutex_unlock(&dev->struct_mutex); } + bo->pinned = pin; mutex_unlock(&bo->mutex); return 0; } -- cgit v1.2.3 From cd276d9cab0be8eff2d9450e5c95b6eb3cd639af Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 4 Oct 2007 10:01:30 +0200 Subject: Revert "Copy the important parts of object_validate into object_create()." This reverts f9c27aa50b715a7d21858f1ce9e4785120bd0c36 commit. --- linux-core/drm_bo.c | 44 +++++++++----------------------------------- 1 file changed, 9 insertions(+), 35 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 099ebe07..6e1de80b 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1587,7 +1587,6 @@ int drm_buffer_object_create(struct drm_device *dev, { struct drm_buffer_manager *bm = &dev->bm; struct drm_buffer_object *bo; - struct drm_bo_driver *driver = dev->driver->bo_driver; int ret = 0; unsigned long num_pages; @@ -1646,7 +1645,7 @@ int drm_buffer_object_create(struct drm_device *dev, if (ret) goto out_err; } - +#if 0 bo->fence_class = 0; ret = driver->fence_type(bo, &bo->fence_class, &bo->fence_type); if (ret) { @@ -1655,13 +1654,12 @@ int drm_buffer_object_create(struct drm_device *dev, } ret = drm_bo_add_ttm(bo); +#else + ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK); +#endif if (ret) goto out_err; - mutex_lock(&dev->struct_mutex); - drm_bo_add_to_lru(bo); - mutex_unlock(&dev->struct_mutex); - mutex_unlock(&bo->mutex); *buf_obj = bo; return 0; @@ -1711,8 +1709,6 @@ int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr void __user *curuserarg = NULL; int ret; - DRM_DEBUG("drm_bo_op_ioctl\n"); - if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -1792,6 +1788,11 @@ int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fil DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; } +#if 0 + ret = drm_bo_lock_test(dev, file_priv); + if (ret) + goto out; +#endif ret = drm_buffer_object_create(file_priv->head->dev, req->size, drm_bo_type_dc, req->mask, @@ -1821,9 +1822,6 @@ int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_p struct drm_bo_info_req *req = &arg->d.req; struct drm_bo_info_rep *rep = &arg->d.rep; int ret; - - DRM_DEBUG("drm_bo_map_ioctl: buffer %d\n", req->handle); - if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -1841,9 +1839,6 @@ int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file { struct drm_bo_handle_arg *arg = data; int ret; - - DRM_DEBUG("drm_bo_unmap_ioctl: buffer %d\n", arg->handle); - if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -1862,8 +1857,6 @@ int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file * struct drm_user_object *uo; int ret; - DRM_DEBUG("drm_bo_reference_ioctl: buffer %d\n", req->handle); - if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -1886,8 +1879,6 @@ int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file struct drm_bo_handle_arg *arg = data; int ret = 0; - DRM_DEBUG("drm_bo_unreference_ioctl: buffer %d\n", arg->handle); - if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -1904,8 +1895,6 @@ int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_ struct drm_bo_info_rep *rep = &arg->d.rep; int ret; - DRM_DEBUG("drm_bo_info_ioctl: buffer %d\n", req->handle); - if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -1924,9 +1913,6 @@ int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file * struct drm_bo_info_req *req = &arg->d.req; struct drm_bo_info_rep *rep = &arg->d.rep; int ret; - - DRM_DEBUG("drm_bo_wait_idle_ioctl: buffer %d\n", req->handle); - if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -2013,9 +1999,6 @@ int drm_bo_set_pin_ioctl(struct drm_device *dev, void *data, struct drm_buffer_object *bo; int ret; - DRM_DEBUG("drm_bo_set_pin_ioctl: buffer %d, pin %d\n", - req->handle, req->pin); - if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; @@ -2445,9 +2428,6 @@ int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_ struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; - DRM_DEBUG("drm_mm_init_ioctl: type %d, 0x%08llx offset, %dkb\n", - arg->mem_type, arg->p_offset * PAGE_SIZE, (int)(arg->p_size * 4)); - if (!driver) { DRM_ERROR("Buffer objects are not supported by this driver\n"); return -EINVAL; @@ -2502,8 +2482,6 @@ int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *f struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; - DRM_DEBUG("drm_mm_takedown_ioctl: %d type\n", arg->mem_type); - if (!driver) { DRM_ERROR("Buffer objects are not supported by this driver\n"); return -EINVAL; @@ -2541,8 +2519,6 @@ int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_ struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; - DRM_DEBUG("drm_mm_lock_ioctl: %d type\n", arg->mem_type); - if (!driver) { DRM_ERROR("Buffer objects are not supported by this driver\n"); return -EINVAL; @@ -2565,8 +2541,6 @@ int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *fil struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; - DRM_DEBUG("drm_mm_unlock_ioctl\n"); - if (!driver) { DRM_ERROR("Buffer objects are not supported by this driver\n"); return -EINVAL; -- cgit v1.2.3 From 0d1926d36e59ddfc34d8c9c0cdef10b71a49ecf1 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 4 Oct 2007 10:14:41 +0200 Subject: Revert "Replace NO_MOVE/NO_EVICT flags to buffer objects with an ioctl to set pinning." This reverts cf2d569daca6954d11a796f4d110148ae2e0c827 commit. --- linux-core/drm_bo.c | 174 ++++++++++++++--------------------------------- linux-core/drm_drv.c | 1 - linux-core/drm_objects.h | 3 +- 3 files changed, 52 insertions(+), 126 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 6e1de80b..7335d258 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -80,7 +80,8 @@ void drm_bo_add_to_lru(struct drm_buffer_object * bo) DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); - if (!bo->pinned || bo->mem.mem_type != bo->pinned_mem_type) { + if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)) + || bo->mem.mem_type != bo->pinned_mem_type) { man = &bo->dev->bm.man[bo->mem.mem_type]; list_add_tail(&bo->lru, &man->lru); } else { @@ -638,8 +639,7 @@ int drm_fence_buffer_objects(struct drm_device *dev, mutex_lock(&entry->mutex); mutex_lock(&dev->struct_mutex); list_del_init(l); - if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED && - entry->fence_class == fence_class) { + if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) { count++; if (entry->fence) drm_fence_usage_deref_locked(&entry->fence); @@ -761,7 +761,7 @@ static int drm_bo_mem_force_space(struct drm_device * dev, atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); mutex_lock(&entry->mutex); - BUG_ON(entry->pinned); + BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)); ret = drm_bo_evict(entry, mem_type, no_wait); mutex_unlock(&entry->mutex); @@ -929,6 +929,18 @@ static int drm_bo_new_mask(struct drm_buffer_object * bo, DRM_ERROR("User buffers are not supported yet\n"); return -EINVAL; } + if (bo->type == drm_bo_type_fake && + !(new_mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) { + DRM_ERROR("Fake buffers must be pinned.\n"); + return -EINVAL; + } + + if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) { + DRM_ERROR + ("DRM_BO_FLAG_NO_EVICT is only available to priviliged " + "processes\n"); + return -EPERM; + } new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_READ); @@ -1372,12 +1384,6 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo, return ret; } - if (bo->pinned && bo->pinned_mem_type != bo->mem.mem_type) { - DRM_ERROR("Attempt to validate pinned buffer into different memory " - "type\n"); - return -EINVAL; - } - /* * We're switching command submission mechanism, * or cannot simply rely on the hardware serializing for us. @@ -1418,6 +1424,37 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo, } } + /* + * Pinned buffers. + */ + + if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) { + bo->pinned_mem_type = bo->mem.mem_type; + mutex_lock(&dev->struct_mutex); + list_del_init(&bo->pinned_lru); + drm_bo_add_to_pinned_lru(bo); + + if (bo->pinned_node != bo->mem.mm_node) { + if (bo->pinned_node != NULL) + drm_mm_put_block(bo->pinned_node); + bo->pinned_node = bo->mem.mm_node; + } + + mutex_unlock(&dev->struct_mutex); + + } else if (bo->pinned_node != NULL) { + + mutex_lock(&dev->struct_mutex); + + if (bo->pinned_node != bo->mem.mm_node) + drm_mm_put_block(bo->pinned_node); + + list_del_init(&bo->pinned_lru); + bo->pinned_node = NULL; + mutex_unlock(&dev->struct_mutex); + + } + /* * We might need to add a TTM. */ @@ -1517,10 +1554,6 @@ int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle, } EXPORT_SYMBOL(drm_bo_handle_validate); -/** - * Fills out the generic buffer object ioctl reply with the information for - * the BO with id of handle. - */ static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle, struct drm_bo_info_rep *rep) { @@ -1926,112 +1959,6 @@ int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file * return 0; } -/** - * Pins or unpins the given buffer object in the given memory area. - * - * Pinned buffers will not be evicted from or move within their memory area. - * Must be called with the hardware lock held for pinning. - */ -static int -drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, - int pin) -{ - int ret = 0; - - mutex_lock(&bo->mutex); - if (bo->pinned == pin) { - mutex_unlock(&bo->mutex); - return 0; - } - - if (pin) { - ret = drm_bo_wait_unfenced(bo, 0, 0); - if (ret) { - mutex_unlock(&bo->mutex); - return ret; - } - - /* Validate the buffer into its pinned location, with no pending - * fence. - */ - ret = drm_buffer_object_validate(bo, bo->fence_class, 0, 0); - if (ret) { - mutex_unlock(&bo->mutex); - return ret; - } - - /* Add our buffer to the pinned list */ - bo->pinned_mem_type = bo->mem.mem_type; - mutex_lock(&dev->struct_mutex); - list_del_init(&bo->pinned_lru); - drm_bo_add_to_pinned_lru(bo); - - if (bo->pinned_node != bo->mem.mm_node) { - if (bo->pinned_node != NULL) - drm_mm_put_block(bo->pinned_node); - bo->pinned_node = bo->mem.mm_node; - } - - mutex_unlock(&dev->struct_mutex); - - } else { - mutex_lock(&dev->struct_mutex); - - /* Remove our buffer from the pinned list */ - if (bo->pinned_node != bo->mem.mm_node) - drm_mm_put_block(bo->pinned_node); - - list_del_init(&bo->pinned_lru); - bo->pinned_node = NULL; - mutex_unlock(&dev->struct_mutex); - } - bo->pinned = pin; - mutex_unlock(&bo->mutex); - return 0; -} - -int drm_bo_set_pin_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_bo_set_pin_arg *arg = data; - struct drm_bo_set_pin_req *req = &arg->d.req; - struct drm_bo_info_rep *rep = &arg->d.rep; - struct drm_buffer_object *bo; - int ret; - - if (!dev->bm.initialized) { - DRM_ERROR("Buffer object manager is not initialized.\n"); - return -EINVAL; - } - - if (req->pin < 0 || req->pin > 1) { - DRM_ERROR("Bad arguments to set_pin\n"); - return -EINVAL; - } - - if (req->pin) - LOCK_TEST_WITH_RETURN(dev, file_priv); - - mutex_lock(&dev->struct_mutex); - bo = drm_lookup_buffer_object(file_priv, req->handle, 1); - mutex_unlock(&dev->struct_mutex); - if (!bo) { - return -EINVAL; - } - - ret = drm_bo_set_pin(dev, bo, req->pin); - if (ret) { - drm_bo_usage_deref_unlocked(&bo); - return ret; - } - - drm_bo_fill_rep_arg(bo, rep); - drm_bo_usage_deref_unlocked(&bo); - - return 0; -} - - /** *Clean the unfenced list and put on regular LRU. *This is part of the memory manager cleanup and should only be @@ -2112,10 +2039,11 @@ static int drm_bo_leave_list(struct drm_buffer_object * bo, mutex_unlock(&dev->struct_mutex); } - if (bo->pinned) { - DRM_ERROR("A pinned buffer was present at " + if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) { + DRM_ERROR("A DRM_BO_NO_EVICT buffer present at " "cleanup. Removing flag and evicting.\n"); - bo->pinned = 0; + bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT; + bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT; } if (bo->mem.mem_type == mem_type) diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 0fca3a27..80e56938 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -145,7 +145,6 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_BO_OP, drm_bo_op_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_BO_INFO, drm_bo_info_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_BO_WAIT_IDLE, drm_bo_wait_idle_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_BO_SET_PIN, drm_bo_set_pin_ioctl, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 726ccbe2..91378b8a 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -377,7 +377,6 @@ struct drm_buffer_object { unsigned long num_pages; /* For pinned buffers */ - int pinned; struct drm_mm_node *pinned_node; uint32_t pinned_mem_type; struct list_head pinned_lru; @@ -472,7 +471,7 @@ extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct d extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -int drm_bo_set_pin_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); + extern int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -- cgit v1.2.3 From 086c058a417317491320129d2cbeb68d1cfcfefe Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 17 Oct 2007 10:55:21 +0200 Subject: Remove the op ioctl, and replace it with a setuser ioctl. Remove need for lock for now. May create races when we clean memory areas or on takedown. Needs to be fixed. Really do a validate on buffer creation in order to avoid problems with fixed memory buffers. --- linux-core/drm_bo.c | 146 +++++++++++++---------------------------------- linux-core/drm_drv.c | 2 +- linux-core/drm_objects.h | 4 +- 3 files changed, 41 insertions(+), 111 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 7335d258..bdeefec2 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -929,11 +929,6 @@ static int drm_bo_new_mask(struct drm_buffer_object * bo, DRM_ERROR("User buffers are not supported yet\n"); return -EINVAL; } - if (bo->type == drm_bo_type_fake && - !(new_mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) { - DRM_ERROR("Fake buffers must be pinned.\n"); - return -EINVAL; - } if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) { DRM_ERROR @@ -942,6 +937,12 @@ static int drm_bo_new_mask(struct drm_buffer_object * bo, return -EPERM; } + if ((new_mask & DRM_BO_FLAG_NO_MOVE)) { + DRM_ERROR + ("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n"); + return -EPERM; + } + new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_READ); @@ -1160,11 +1161,9 @@ static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle, return -EINVAL; mutex_lock(&bo->mutex); - if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) { - ret = drm_bo_wait_unfenced(bo, no_wait, 0); - if (ret) - goto out; - } + ret = drm_bo_wait_unfenced(bo, no_wait, 0); + if (ret) + goto out; /* * If this returns true, we are currently unmapped. @@ -1542,6 +1541,7 @@ int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle, return -EINVAL; } + ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, no_wait, rep); @@ -1663,8 +1663,10 @@ int drm_buffer_object_create(struct drm_device *dev, bo->mem.page_alignment = page_alignment; bo->buffer_start = buffer_start; bo->priv_flags = 0; - bo->mem.flags = 0ULL; - bo->mem.mask = 0ULL; + bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED | + DRM_BO_FLAG_MAPPABLE; + bo->mem.mask = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED | + DRM_BO_FLAG_MAPPABLE; atomic_inc(&bm->count); ret = drm_bo_new_mask(bo, mask, hint); @@ -1678,18 +1680,8 @@ int drm_buffer_object_create(struct drm_device *dev, if (ret) goto out_err; } -#if 0 - bo->fence_class = 0; - ret = driver->fence_type(bo, &bo->fence_class, &bo->fence_type); - if (ret) { - DRM_ERROR("Driver did not support given buffer permissions\n"); - goto out_err; - } - ret = drm_bo_add_ttm(bo); -#else ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK); -#endif if (ret) goto out_err; @@ -1705,6 +1697,7 @@ int drm_buffer_object_create(struct drm_device *dev, } EXPORT_SYMBOL(drm_buffer_object_create); + static int drm_bo_add_user_object(struct drm_file *file_priv, struct drm_buffer_object *bo, int shareable) { @@ -1726,86 +1719,6 @@ static int drm_bo_add_user_object(struct drm_file *file_priv, return ret; } -static int drm_bo_lock_test(struct drm_device * dev, struct drm_file *file_priv) -{ - LOCK_TEST_WITH_RETURN(dev, file_priv); - return 0; -} - -int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - struct drm_bo_op_arg curarg; - struct drm_bo_op_arg *arg = data; - struct drm_bo_op_req *req = &arg->d.req; - struct drm_bo_info_rep rep; - unsigned long next = 0; - void __user *curuserarg = NULL; - int ret; - - if (!dev->bm.initialized) { - DRM_ERROR("Buffer object manager is not initialized.\n"); - return -EINVAL; - } - - do { - if (next != 0) { - curuserarg = (void __user *)next; - if (copy_from_user(&curarg, curuserarg, - sizeof(curarg)) != 0) - return -EFAULT; - arg = &curarg; - } - - if (arg->handled) { - next = arg->next; - continue; - } - req = &arg->d.req; - ret = 0; - switch (req->op) { - case drm_bo_validate: - ret = drm_bo_lock_test(dev, file_priv); - if (ret) - break; - ret = drm_bo_handle_validate(file_priv, req->bo_req.handle, - req->bo_req.fence_class, - req->bo_req.flags, - req->bo_req.mask, - req->bo_req.hint, - &rep, NULL); - break; - case drm_bo_fence: - ret = -EINVAL; - DRM_ERROR("Function is not implemented yet.\n"); - break; - case drm_bo_ref_fence: - ret = -EINVAL; - DRM_ERROR("Function is not implemented yet.\n"); - break; - default: - ret = -EINVAL; - } - next = arg->next; - - /* - * A signal interrupted us. Make sure the ioctl is restartable. - */ - - if (ret == -EAGAIN) - return -EAGAIN; - - arg->handled = 1; - arg->d.rep.ret = ret; - arg->d.rep.bo_info = rep; - if (arg != data) { - if (copy_to_user(curuserarg, &curarg, - sizeof(curarg)) != 0) - return -EFAULT; - } - } while (next != 0); - return 0; -} - int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_bo_create_arg *arg = data; @@ -1821,11 +1734,6 @@ int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fil DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; } -#if 0 - ret = drm_bo_lock_test(dev, file_priv); - if (ret) - goto out; -#endif ret = drm_buffer_object_create(file_priv->head->dev, req->size, drm_bo_type_dc, req->mask, @@ -1849,6 +1757,30 @@ out: return ret; } +int drm_bo_setstatus_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_bo_map_wait_idle_arg *arg = data; + struct drm_bo_info_req *req = &arg->d.req; + struct drm_bo_info_rep *rep = &arg->d.rep; + int ret; + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + ret = drm_bo_handle_validate(file_priv, req->handle, req->fence_class, + req->flags, + req->mask, + req->hint | DRM_BO_HINT_DONT_FENCE, + rep, NULL); + + if (ret) + return ret; + + return 0; +} + int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_bo_map_wait_idle_arg *arg = data; diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 80e56938..9c867f1b 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -142,7 +142,7 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_BO_UNMAP, drm_bo_unmap_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_BO_REFERENCE, drm_bo_reference_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_BO_UNREFERENCE, drm_bo_unreference_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_BO_OP, drm_bo_op_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_SETSTATUS, drm_bo_setstatus_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_BO_INFO, drm_bo_info_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_BO_WAIT_IDLE, drm_bo_wait_idle_ioctl, DRM_AUTH), }; diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 91378b8a..4d1ec993 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -470,9 +470,7 @@ extern int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); - - +extern int drm_bo_setstatus_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -- cgit v1.2.3 From bb29ba7fa77659be284c365ebfb2f740491e8506 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 17 Oct 2007 10:57:12 +0200 Subject: Only allow creator to change shared buffer mask. --- linux-core/drm_bo.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index bdeefec2..d40be07f 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1540,8 +1540,16 @@ int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle, if (!bo) { return -EINVAL; } - + /* + * Only allow creator to change shared buffer mask. + */ + + if (bo->base.owner != file_priv) { + flags = 0x0; + mask = 0x0; + } + ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, no_wait, rep); -- cgit v1.2.3 From cf2d1bba5513ae38d8efbaf50251fc136ed1d414 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 19 Oct 2007 16:24:36 +0200 Subject: Remove the clean_unfenced function. Change the restriction that non-creators can't change the buffer flags to non-creators can't change EVICT and NO_MOVE flags. --- linux-core/drm_bo.c | 65 ++++------------------------------------------------- 1 file changed, 4 insertions(+), 61 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index d40be07f..89c014e3 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1545,10 +1545,9 @@ int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle, * Only allow creator to change shared buffer mask. */ - if (bo->base.owner != file_priv) { - flags = 0x0; - mask = 0x0; - } + if (bo->base.owner != file_priv) + mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE); + ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, no_wait, rep); @@ -1899,60 +1898,6 @@ int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file * return 0; } -/** - *Clean the unfenced list and put on regular LRU. - *This is part of the memory manager cleanup and should only be - *called with the DRI lock held. - *Call dev->struct_sem locked. - */ - -static void drm_bo_clean_unfenced(struct drm_device *dev) -{ - struct drm_buffer_manager *bm = &dev->bm; - struct list_head *head, *list; - struct drm_buffer_object *entry; - struct drm_fence_object *fence; - - head = &bm->unfenced; - - if (list_empty(head)) - return; - - DRM_ERROR("Clean unfenced\n"); - - if (drm_fence_buffer_objects(dev, NULL, 0, NULL, &fence)) { - - /* - * Fixme: Should really wait here. - */ - } - - if (fence) - drm_fence_usage_deref_locked(&fence); - - if (list_empty(head)) - return; - - DRM_ERROR("Really clean unfenced\n"); - - list = head->next; - while(list != head) { - prefetch(list->next); - entry = list_entry(list, struct drm_buffer_object, lru); - - atomic_inc(&entry->usage); - mutex_unlock(&dev->struct_mutex); - mutex_lock(&entry->mutex); - mutex_lock(&dev->struct_mutex); - - list_del(&entry->lru); - DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); - drm_bo_add_to_lru(entry); - mutex_unlock(&entry->mutex); - list = head->next; - } -} - static int drm_bo_leave_list(struct drm_buffer_object * bo, uint32_t mem_type, int free_pinned, int allow_errors) @@ -2103,8 +2048,7 @@ int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type) ret = 0; if (mem_type > 0) { - - drm_bo_clean_unfenced(dev); + BUG_ON(!list_empty(&bm->unfenced)); drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0); drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1); @@ -2142,7 +2086,6 @@ static int drm_bo_lock_mm(struct drm_device * dev, unsigned mem_type) return 0; } - drm_bo_clean_unfenced(dev); ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0); if (ret) return ret; -- cgit v1.2.3 From 733ff568346e8fe40e9790f21f8b7efc659d5d12 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 19 Oct 2007 16:28:47 +0200 Subject: No fence_class argument on drmBOSetStatus since it's not associated with a particular command submission. --- linux-core/drm_bo.c | 13 +++++++++---- linux-core/drm_objects.h | 1 + 2 files changed, 10 insertions(+), 4 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 89c014e3..cc4743dc 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1524,7 +1524,9 @@ EXPORT_SYMBOL(drm_bo_do_validate); int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle, uint32_t fence_class, - uint64_t flags, uint64_t mask, uint32_t hint, + uint64_t flags, uint64_t mask, + uint32_t hint, + int use_old_fence_class, struct drm_bo_info_rep * rep, struct drm_buffer_object **bo_rep) { @@ -1537,10 +1539,12 @@ int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle, bo = drm_lookup_buffer_object(file_priv, handle, 1); mutex_unlock(&dev->struct_mutex); - if (!bo) { + if (!bo) return -EINVAL; - } - + + if (use_old_fence_class) + fence_class = bo->fence_class; + /* * Only allow creator to change shared buffer mask. */ @@ -1780,6 +1784,7 @@ int drm_bo_setstatus_ioctl(struct drm_device *dev, req->flags, req->mask, req->hint | DRM_BO_HINT_DONT_FENCE, + 1, rep, NULL); if (ret) diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 4d1ec993..f153b84a 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -510,6 +510,7 @@ extern int drm_bo_init_mm(struct drm_device * dev, unsigned type, extern int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle, uint32_t fence_class, uint64_t flags, uint64_t mask, uint32_t hint, + int use_old_fence_class, struct drm_bo_info_rep * rep, struct drm_buffer_object **bo_rep); extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file * file_priv, -- cgit v1.2.3 From c0e3537e77f1765001f665f93e5349ccd0f1d092 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 19 Oct 2007 16:44:12 +0200 Subject: Some comment updates pending removal of the init mutex. --- linux-core/drm_bo.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index cc4743dc..35ac8a0a 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1072,13 +1072,6 @@ static int drm_bo_check_unfenced(struct drm_buffer_object * bo) /* * Wait until a buffer, scheduled to be fenced moves off the unfenced list. * Until then, we cannot really do anything with it except delete it. - * The unfenced list is a PITA, and the operations - * 1) validating - * 2) submitting commands - * 3) fencing - * Should really be an atomic operation. - * We now "solve" this problem by keeping - * the buffer "unfenced" after validating, but before fencing. */ static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait, @@ -2144,8 +2137,10 @@ int drm_bo_init_mm(struct drm_device * dev, EXPORT_SYMBOL(drm_bo_init_mm); /* - * This is called from lastclose, so we don't need to bother about - * any clients still running when we set the initialized flag to zero. + * This function is intended to be called on drm driver unload. + * If you decide to call it from lastclose, you must protect the call + * from a potentially racing drm_bo_driver_init in firstopen. + * (This may happen on X server restart). */ int drm_bo_driver_finish(struct drm_device * dev) @@ -2199,6 +2194,13 @@ int drm_bo_driver_finish(struct drm_device * dev) return ret; } +/* + * This function is intended to be called on drm driver load. + * If you decide to call it from firstopen, you must protect the call + * from a potentially racing drm_bo_driver_finish in lastclose. + * (This may happen on X server restart). + */ + int drm_bo_driver_init(struct drm_device * dev) { struct drm_bo_driver *driver = dev->driver->bo_driver; -- cgit v1.2.3 From 48b5eaf303b60077faed09db77785d7a544ac335 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Sat, 20 Oct 2007 16:49:43 +0200 Subject: Simple replacement for hardware lock in some cases. Fix i915 since last commit. --- linux-core/Makefile.kernel | 2 +- linux-core/drm_bo.c | 65 ++++++++++++++++++------------ linux-core/drm_bo_lock2.c | 99 ++++++++++++++++++++++++++++++++++++++++++++++ linux-core/drm_objects.h | 27 ++++++++++++- linux-core/drm_stub.c | 1 - 5 files changed, 165 insertions(+), 29 deletions(-) create mode 100644 linux-core/drm_bo_lock2.c (limited to 'linux-core') diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 715454bc..86b225f3 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -13,7 +13,7 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \ drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \ drm_memory_debug.o ati_pcigart.o drm_sman.o \ drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \ - drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o + drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_bo_lock2.o tdfx-objs := tdfx_drv.o r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 35ac8a0a..a2a0291d 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1768,11 +1768,16 @@ int drm_bo_setstatus_ioctl(struct drm_device *dev, struct drm_bo_info_req *req = &arg->d.req; struct drm_bo_info_rep *rep = &arg->d.rep; int ret; + if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; } + ret = drm_bo_read_lock(&dev->bm.bm_lock); + if (ret) + return ret; + ret = drm_bo_handle_validate(file_priv, req->handle, req->fence_class, req->flags, req->mask, @@ -1780,6 +1785,7 @@ int drm_bo_setstatus_ioctl(struct drm_device *dev, 1, rep, NULL); + (void) drm_bo_read_unlock(&dev->bm.bm_lock); if (ret) return ret; @@ -1898,7 +1904,8 @@ int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file * static int drm_bo_leave_list(struct drm_buffer_object * bo, uint32_t mem_type, - int free_pinned, int allow_errors) + int free_pinned, + int allow_errors) { struct drm_device *dev = bo->dev; int ret = 0; @@ -2150,7 +2157,6 @@ int drm_bo_driver_finish(struct drm_device * dev) unsigned i = DRM_BO_MEM_TYPES; struct drm_mem_type_manager *man; - mutex_lock(&dev->bm.init_mutex); mutex_lock(&dev->struct_mutex); if (!bm->initialized) @@ -2190,7 +2196,6 @@ int drm_bo_driver_finish(struct drm_device * dev) } out: mutex_unlock(&dev->struct_mutex); - mutex_unlock(&dev->bm.init_mutex); return ret; } @@ -2207,7 +2212,7 @@ int drm_bo_driver_init(struct drm_device * dev) struct drm_buffer_manager *bm = &dev->bm; int ret = -EINVAL; - mutex_lock(&dev->bm.init_mutex); + drm_bo_init_lock(&bm->bm_lock); mutex_lock(&dev->struct_mutex); if (!driver) goto out_unlock; @@ -2233,7 +2238,6 @@ int drm_bo_driver_init(struct drm_device * dev) INIT_LIST_HEAD(&bm->ddestroy); out_unlock: mutex_unlock(&dev->struct_mutex); - mutex_unlock(&dev->bm.init_mutex); return ret; } @@ -2252,6 +2256,10 @@ int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_ } ret = -EINVAL; + ret = drm_bo_write_lock(&bm->bm_lock, file_priv); + if (ret) + return ret; + if (arg->magic != DRM_BO_INIT_MAGIC) { DRM_ERROR("You are using an old libdrm that is not compatible with\n" "\tthe kernel DRM module. Please upgrade your libdrm.\n"); @@ -2271,7 +2279,6 @@ int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_ return -EINVAL; } - mutex_lock(&dev->bm.init_mutex); mutex_lock(&dev->struct_mutex); if (!bm->initialized) { DRM_ERROR("DRM memory manager was not initialized.\n"); @@ -2286,7 +2293,8 @@ int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_ out: mutex_unlock(&dev->struct_mutex); - mutex_unlock(&dev->bm.init_mutex); + (void) drm_bo_write_unlock(&bm->bm_lock, file_priv); + if (ret) return ret; @@ -2305,8 +2313,10 @@ int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *f return -EINVAL; } - LOCK_TEST_WITH_RETURN(dev, file_priv); - mutex_lock(&dev->bm.init_mutex); + ret = drm_bo_write_lock(&bm->bm_lock, file_priv); + if (ret) + return ret; + mutex_lock(&dev->struct_mutex); ret = -EINVAL; if (!bm->initialized) { @@ -2324,7 +2334,8 @@ int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *f } out: mutex_unlock(&dev->struct_mutex); - mutex_unlock(&dev->bm.init_mutex); + (void) drm_bo_write_unlock(&bm->bm_lock, file_priv); + if (ret) return ret; @@ -2342,20 +2353,28 @@ int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_ return -EINVAL; } - LOCK_TEST_WITH_RETURN(dev, file_priv); - mutex_lock(&dev->bm.init_mutex); + if (arg->lock_unlock_bm) { + ret = drm_bo_write_lock(&dev->bm.bm_lock, file_priv); + if (ret) + return ret; + } + mutex_lock(&dev->struct_mutex); ret = drm_bo_lock_mm(dev, arg->mem_type); mutex_unlock(&dev->struct_mutex); - mutex_unlock(&dev->bm.init_mutex); - if (ret) + if (ret) { + (void) drm_bo_write_unlock(&dev->bm.bm_lock, file_priv); return ret; + } return 0; } -int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +int drm_mm_unlock_ioctl(struct drm_device *dev, + void *data, + struct drm_file *file_priv) { + struct drm_mm_type_arg *arg = data; struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; @@ -2364,16 +2383,12 @@ int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *fil return -EINVAL; } - LOCK_TEST_WITH_RETURN(dev, file_priv); - mutex_lock(&dev->bm.init_mutex); - mutex_lock(&dev->struct_mutex); - ret = 0; - - mutex_unlock(&dev->struct_mutex); - mutex_unlock(&dev->bm.init_mutex); - if (ret) - return ret; - + if (arg->lock_unlock_bm) { + ret = drm_bo_write_unlock(&dev->bm.bm_lock, file_priv); + if (ret) + return ret; + } + return 0; } diff --git a/linux-core/drm_bo_lock2.c b/linux-core/drm_bo_lock2.c new file mode 100644 index 00000000..73e58bc0 --- /dev/null +++ b/linux-core/drm_bo_lock2.c @@ -0,0 +1,99 @@ +#include "drmP.h" + +void drm_bo_init_lock(struct drm_bo_lock *lock) +{ + DRM_INIT_WAITQUEUE(&lock->queue); + atomic_set(&lock->write_lock_pending, 0); + atomic_set(&lock->readers, 0); + +} + +void drm_bo_read_unlock(struct drm_bo_lock *lock) +{ + if (unlikely(atomic_add_negative(-1, &lock->readers) == 0)) + BUG(); + if (atomic_read(&lock->readers) == 0) + wake_up_interruptible(&lock->queue); +} + +int drm_bo_read_lock(struct drm_bo_lock *lock) +{ + while( unlikely(atomic_read(&lock->write_lock_pending) != 0)) { + int ret; + ret = wait_event_interruptible + (lock->queue, + atomic_read(&lock->write_lock_pending) == 0); + if (ret) + return -EAGAIN; + } + + while( unlikely (!atomic_add_unless(&lock->readers, 1, -1))) { + int ret; + ret = wait_event_interruptible + (lock->queue, + atomic_add_unless(&lock->readers, 1, -1)); + if (ret) + return -EAGAIN; + } + return 0; +} + +static int __drm_bo_write_unlock(struct drm_bo_lock *lock) +{ + if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1)) + return -EINVAL; + if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 1, 0) != 1)) + return -EINVAL; + wake_up_interruptible(&lock->queue); + return 0; +} + +static void drm_bo_write_lock_remove(struct drm_file *file_priv, + struct drm_user_object *item) +{ + struct drm_bo_lock *lock = + container_of(item, struct drm_bo_lock, base); + int ret; + + ret = __drm_bo_write_unlock(lock); + BUG_ON(ret); +} + +int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv) +{ + int ret = 0; + struct drm_device *dev; + + if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 0, 1) != 0)) + return -EINVAL; + + while(unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) { + ret = wait_event_interruptible + (lock->queue, + atomic_cmpxchg(&lock->readers, 0, -1) == 0); + + if (ret) { + atomic_set(&lock->write_lock_pending, 0); + wake_up_interruptible(&lock->queue); + return -EAGAIN; + } + } + + dev = file_priv->head->dev; + mutex_lock(&dev->struct_mutex); + ret = drm_add_user_object(file_priv, &lock->base, 0); + lock->base.remove = &drm_bo_write_lock_remove; + lock->base.type = drm_lock_type; + if (ret) + (void) __drm_bo_write_unlock(lock); + mutex_unlock(&dev->struct_mutex); + + return ret; +} + + +int drm_bo_write_unlock(struct drm_bo_lock *lock, struct drm_file *file_priv) +{ + return drm_user_object_unref(file_priv, lock->base.hash.key, + drm_lock_type); +} diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index f153b84a..0b937dc0 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -43,6 +43,7 @@ struct drm_bo_mem_reg; enum drm_object_type { drm_fence_type, drm_buffer_type, + drm_lock_type, /* * Add other user space object types here. */ @@ -414,6 +415,13 @@ struct drm_mem_type_manager { void *io_addr; }; +struct drm_bo_lock { + struct drm_user_object base; + wait_queue_head_t queue; + atomic_t write_lock_pending; + atomic_t readers; +}; + #define _DRM_FLAG_MEMTYPE_FIXED 0x00000001 /* Fixed (on-card) PCI memory */ #define _DRM_FLAG_MEMTYPE_MAPPABLE 0x00000002 /* Memory mappable */ #define _DRM_FLAG_MEMTYPE_CACHED 0x00000004 /* Cached binding */ @@ -423,8 +431,8 @@ struct drm_mem_type_manager { #define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */ struct drm_buffer_manager { - struct mutex init_mutex; - struct mutex evict_mutex; + struct drm_bo_lock bm_lock; + struct mutex evict_mutex; int nice_mode; int initialized; struct drm_file *last_to_validate; @@ -603,6 +611,21 @@ extern void drm_regs_init(struct drm_reg_manager *manager, const void *), void (*reg_destroy)(struct drm_reg *)); +/* + * drm_bo_lock.c + * Simple replacement for the hardware lock on buffer manager init and clean. + */ + + +extern void drm_bo_init_lock(struct drm_bo_lock *lock); +extern void drm_bo_read_unlock(struct drm_bo_lock *lock); +extern int drm_bo_read_lock(struct drm_bo_lock *lock); +extern int drm_bo_write_lock(struct drm_bo_lock *lock, + struct drm_file *file_priv); + +extern int drm_bo_write_unlock(struct drm_bo_lock *lock, + struct drm_file *file_priv); + #ifdef CONFIG_DEBUG_MUTEXES #define DRM_ASSERT_LOCKED(_mutex) \ BUG_ON(!mutex_is_locked(_mutex) || \ diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c index 07ea91e0..9e140ac2 100644 --- a/linux-core/drm_stub.c +++ b/linux-core/drm_stub.c @@ -72,7 +72,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, init_timer(&dev->timer); mutex_init(&dev->struct_mutex); mutex_init(&dev->ctxlist_mutex); - mutex_init(&dev->bm.init_mutex); mutex_init(&dev->bm.evict_mutex); idr_init(&dev->drw_idr); -- cgit v1.2.3 From 3b19b50cb5cd31e60eb03e99dd1109b6d0f5b8a3 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Sun, 21 Oct 2007 12:20:56 +0200 Subject: Remove the need for the hardware lock in the buffer manager. Add interface entry cleaning a memory type without touching NO_EVICT buffers. --- linux-core/Makefile.kernel | 2 +- linux-core/drm_bo.c | 11 ++- linux-core/drm_bo_lock.c | 178 +++++++++++++++++++++++++++++++++++++++++++++ linux-core/drm_compat.c | 3 + linux-core/drm_vm.c | 11 ++- 5 files changed, 199 insertions(+), 6 deletions(-) create mode 100644 linux-core/drm_bo_lock.c (limited to 'linux-core') diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 86b225f3..79136431 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -13,7 +13,7 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \ drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \ drm_memory_debug.o ati_pcigart.o drm_sman.o \ drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \ - drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_bo_lock2.o + drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_bo_lock.o tdfx-objs := tdfx_drv.o r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index a2a0291d..e6eb6320 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -2255,11 +2255,11 @@ int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_ return -EINVAL; } - ret = -EINVAL; ret = drm_bo_write_lock(&bm->bm_lock, file_priv); if (ret) return ret; + ret = -EINVAL; if (arg->magic != DRM_BO_INIT_MAGIC) { DRM_ERROR("You are using an old libdrm that is not compatible with\n" "\tthe kernel DRM module. Please upgrade your libdrm.\n"); @@ -2353,7 +2353,12 @@ int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_ return -EINVAL; } - if (arg->lock_unlock_bm) { + if (arg->lock_flags & DRM_BO_LOCK_IGNORE_NO_EVICT) { + DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n"); + return -EINVAL; + } + + if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) { ret = drm_bo_write_lock(&dev->bm.bm_lock, file_priv); if (ret) return ret; @@ -2383,7 +2388,7 @@ int drm_mm_unlock_ioctl(struct drm_device *dev, return -EINVAL; } - if (arg->lock_unlock_bm) { + if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) { ret = drm_bo_write_unlock(&dev->bm.bm_lock, file_priv); if (ret) return ret; diff --git a/linux-core/drm_bo_lock.c b/linux-core/drm_bo_lock.c new file mode 100644 index 00000000..e5a86826 --- /dev/null +++ b/linux-core/drm_bo_lock.c @@ -0,0 +1,178 @@ +/************************************************************************** + * + * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellström + */ + +/* + * This file implements a simple replacement for the buffer manager use + * of the heavyweight hardware lock. + * The lock is a read-write lock. Taking it in read mode is fast, and + * intended for in-kernel use only. + * Taking it in write mode is slow. + * + * The write mode is used only when there is a need to block all + * user-space processes from allocating a + * new memory area. + * Typical use in write mode is X server VT switching, and it's allowed + * to leave kernel space with the write lock held. If a user-space process + * dies while having the write-lock, it will be released during the file + * descriptor release. + * + * The read lock is typically placed at the start of an IOCTL- or + * user-space callable function that may end up allocating a memory area. + * This includes setstatus, super-ioctls and no_pfn; the latter may move + * unmappable regions to mappable. It's a bug to leave kernel space with the + * read lock held. + * + * Both read- and write lock taking is interruptible for low signal-delivery + * latency. The locking functions will return -EAGAIN if interrupted by a + * signal. + * + * Locking order: The lock should be taken BEFORE any kernel mutexes + * or spinlocks. + */ + +#include "drmP.h" + +void drm_bo_init_lock(struct drm_bo_lock *lock) +{ + DRM_INIT_WAITQUEUE(&lock->queue); + atomic_set(&lock->write_lock_pending, 0); + atomic_set(&lock->readers, 0); +} + +void drm_bo_read_unlock(struct drm_bo_lock *lock) +{ + if (unlikely(atomic_add_negative(-1, &lock->readers))) + BUG(); + if (atomic_read(&lock->readers) == 0) + wake_up_interruptible(&lock->queue); +} + +EXPORT_SYMBOL(drm_bo_read_unlock); + +int drm_bo_read_lock(struct drm_bo_lock *lock) +{ + while (unlikely(atomic_read(&lock->write_lock_pending) != 0)) { + int ret; + ret = wait_event_interruptible + (lock->queue, atomic_read(&lock->write_lock_pending) == 0); + if (ret) + return -EAGAIN; + } + + while (unlikely(!atomic_add_unless(&lock->readers, 1, -1))) { + int ret; + ret = wait_event_interruptible + (lock->queue, atomic_add_unless(&lock->readers, 1, -1)); + if (ret) + return -EAGAIN; + } + return 0; +} + +EXPORT_SYMBOL(drm_bo_read_lock); + +static int __drm_bo_write_unlock(struct drm_bo_lock *lock) +{ + if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1)) + return -EINVAL; + if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 1, 0) != 1)) + return -EINVAL; + wake_up_interruptible(&lock->queue); + return 0; +} + +static void drm_bo_write_lock_remove(struct drm_file *file_priv, + struct drm_user_object *item) +{ + struct drm_bo_lock *lock = container_of(item, struct drm_bo_lock, base); + int ret; + + ret = __drm_bo_write_unlock(lock); + BUG_ON(ret); +} + +int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv) +{ + int ret = 0; + struct drm_device *dev; + + if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 0, 1) != 0)) { + return -EINVAL; + } + + while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) { + ret = wait_event_interruptible + (lock->queue, atomic_cmpxchg(&lock->readers, 0, -1) == 0); + + if (ret) { + atomic_set(&lock->write_lock_pending, 0); + wake_up_interruptible(&lock->queue); + return -EAGAIN; + } + } + + /* + * Add a dummy user-object, the destructor of which will + * make sure the lock is released if the client dies + * while holding it. + */ + + dev = file_priv->head->dev; + mutex_lock(&dev->struct_mutex); + ret = drm_add_user_object(file_priv, &lock->base, 0); + lock->base.remove = &drm_bo_write_lock_remove; + lock->base.type = drm_lock_type; + if (ret) { + (void)__drm_bo_write_unlock(lock); + } + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +int drm_bo_write_unlock(struct drm_bo_lock *lock, struct drm_file *file_priv) +{ + struct drm_device *dev = file_priv->head->dev; + struct drm_ref_object *ro; + + mutex_lock(&dev->struct_mutex); + + if (lock->base.owner != file_priv) { + mutex_unlock(&dev->struct_mutex); + return -EINVAL; + } + ro = drm_lookup_ref_object(file_priv, &lock->base, _DRM_REF_USE); + BUG_ON(!ro); + drm_remove_ref_object(file_priv, ro); + lock->base.owner = NULL; + + mutex_unlock(&dev->struct_mutex); + return 0; +} diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index e51aedb7..ae44e500 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -212,6 +212,8 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma, unsigned long bus_offset; unsigned long bus_size; + dev = bo->dev; + while(drm_bo_read_lock(&dev->bm.bm_lock)); mutex_lock(&bo->mutex); @@ -289,6 +291,7 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma, data->type = VM_FAULT_OOM; out_unlock: mutex_unlock(&bo->mutex); + drm_bo_read_unlock(&dev->bm.bm_lock); return NULL; } diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index c4e790ef..d2554f31 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -728,10 +728,17 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, if (address > vma->vm_end) return NOPFN_SIGBUS; - err = mutex_lock_interruptible(&bo->mutex); + dev = bo->dev; + err = drm_bo_read_lock(&dev->bm.bm_lock); if (err) return NOPFN_REFAULT; + err = mutex_lock_interruptible(&bo->mutex); + if (err) { + drm_bo_read_unlock(&dev->bm.bm_lock); + return NOPFN_REFAULT; + } + err = drm_bo_wait(bo, 0, 0, 0); if (err) { ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT; @@ -754,7 +761,6 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, } } - dev = bo->dev; err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset, &bus_size); @@ -792,6 +798,7 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, } out_unlock: mutex_unlock(&bo->mutex); + drm_bo_read_unlock(&dev->bm.bm_lock); return ret; } #endif -- cgit v1.2.3 From 4ebe7471cbfdd6afa33485ea9ec55812da38445f Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Sun, 21 Oct 2007 12:31:00 +0200 Subject: Disable i915 accelerated blit copy moves for now until we can guarantee that it doesn't clash with the X server. --- linux-core/i915_buffer.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) (limited to 'linux-core') diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index f3ba7ce5..f81def8f 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -121,6 +121,8 @@ uint32_t i915_evict_mask(struct drm_buffer_object *bo) } } +#if 0 /* See comment below */ + static void i915_emit_copy_blit(struct drm_device * dev, uint32_t src_offset, uint32_t dst_offset, @@ -221,6 +223,16 @@ out_cleanup: return ret; } +#endif + +/* + * Disable i915_move_flip for now, since we can't guarantee that the hardware lock + * is held here. To re-enable we need to make sure either + * a) The X server is using DRM to submit commands to the ring, or + * b) DRM can use the HP ring for these blits. This means i915 needs to implement + * a new ring submission mechanism and fence class. + */ + int i915_move(struct drm_buffer_object * bo, int evict, int no_wait, struct drm_bo_mem_reg * new_mem) { @@ -229,10 +241,10 @@ int i915_move(struct drm_buffer_object * bo, if (old_mem->mem_type == DRM_BO_MEM_LOCAL) { return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); } else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) { - if (i915_move_flip(bo, evict, no_wait, new_mem)) + if (0 /*i915_move_flip(bo, evict, no_wait, new_mem)*/) return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); } else { - if (i915_move_blit(bo, evict, no_wait, new_mem)) + if (0 /*i915_move_blit(bo, evict, no_wait, new_mem)*/) return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); } return 0; -- cgit v1.2.3 From 824330d0e652e0bab1851437f120c7e76feee832 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Mon, 22 Oct 2007 19:09:36 +0200 Subject: Don't clobber the unfenced list with DONT_FENCE operations. --- linux-core/drm_bo.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index e6eb6320..9598e353 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -1299,10 +1299,7 @@ int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags, mutex_lock(&bm->evict_mutex); mutex_lock(&dev->struct_mutex); - list_del(&bo->lru); - list_add_tail(&bo->lru, &bm->unfenced); - DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, - _DRM_BO_FLAG_UNFENCED); + list_del_init(&bo->lru); mutex_unlock(&dev->struct_mutex); /* @@ -1322,10 +1319,6 @@ int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags, drm_mm_put_block(mem.mm_node); mem.mm_node = NULL; } - DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); - DRM_WAKEUP(&bo->event_queue); - list_del(&bo->lru); - drm_bo_add_to_lru(bo); mutex_unlock(&dev->struct_mutex); } -- cgit v1.2.3 From 3d4b32e91647f61712d54a46f0a173deff46e6b4 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Mon, 22 Oct 2007 19:16:39 +0200 Subject: Remove duplicate file. --- linux-core/drm_bo_lock2.c | 99 ----------------------------------------------- 1 file changed, 99 deletions(-) delete mode 100644 linux-core/drm_bo_lock2.c (limited to 'linux-core') diff --git a/linux-core/drm_bo_lock2.c b/linux-core/drm_bo_lock2.c deleted file mode 100644 index 73e58bc0..00000000 --- a/linux-core/drm_bo_lock2.c +++ /dev/null @@ -1,99 +0,0 @@ -#include "drmP.h" - -void drm_bo_init_lock(struct drm_bo_lock *lock) -{ - DRM_INIT_WAITQUEUE(&lock->queue); - atomic_set(&lock->write_lock_pending, 0); - atomic_set(&lock->readers, 0); - -} - -void drm_bo_read_unlock(struct drm_bo_lock *lock) -{ - if (unlikely(atomic_add_negative(-1, &lock->readers) == 0)) - BUG(); - if (atomic_read(&lock->readers) == 0) - wake_up_interruptible(&lock->queue); -} - -int drm_bo_read_lock(struct drm_bo_lock *lock) -{ - while( unlikely(atomic_read(&lock->write_lock_pending) != 0)) { - int ret; - ret = wait_event_interruptible - (lock->queue, - atomic_read(&lock->write_lock_pending) == 0); - if (ret) - return -EAGAIN; - } - - while( unlikely (!atomic_add_unless(&lock->readers, 1, -1))) { - int ret; - ret = wait_event_interruptible - (lock->queue, - atomic_add_unless(&lock->readers, 1, -1)); - if (ret) - return -EAGAIN; - } - return 0; -} - -static int __drm_bo_write_unlock(struct drm_bo_lock *lock) -{ - if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1)) - return -EINVAL; - if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 1, 0) != 1)) - return -EINVAL; - wake_up_interruptible(&lock->queue); - return 0; -} - -static void drm_bo_write_lock_remove(struct drm_file *file_priv, - struct drm_user_object *item) -{ - struct drm_bo_lock *lock = - container_of(item, struct drm_bo_lock, base); - int ret; - - ret = __drm_bo_write_unlock(lock); - BUG_ON(ret); -} - -int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv) -{ - int ret = 0; - struct drm_device *dev; - - if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 0, 1) != 0)) - return -EINVAL; - - while(unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) { - ret = wait_event_interruptible - (lock->queue, - atomic_cmpxchg(&lock->readers, 0, -1) == 0); - - if (ret) { - atomic_set(&lock->write_lock_pending, 0); - wake_up_interruptible(&lock->queue); - return -EAGAIN; - } - } - - dev = file_priv->head->dev; - mutex_lock(&dev->struct_mutex); - ret = drm_add_user_object(file_priv, &lock->base, 0); - lock->base.remove = &drm_bo_write_lock_remove; - lock->base.type = drm_lock_type; - if (ret) - (void) __drm_bo_write_unlock(lock); - mutex_unlock(&dev->struct_mutex); - - return ret; -} - - -int drm_bo_write_unlock(struct drm_bo_lock *lock, struct drm_file *file_priv) -{ - return drm_user_object_unref(file_priv, lock->base.hash.key, - drm_lock_type); -} -- cgit v1.2.3 From 07abc3384e24356d1302459e2e5c4699ed7b0072 Mon Sep 17 00:00:00 2001 From: Roel Kluin <12o3l@tiscali.nl> Date: Thu, 25 Oct 2007 10:24:55 +1000 Subject: missing mutex unlock bug --- linux-core/sis_mm.c | 1 + 1 file changed, 1 insertion(+) (limited to 'linux-core') diff --git a/linux-core/sis_mm.c b/linux-core/sis_mm.c index 7e162a8e..9222b08d 100644 --- a/linux-core/sis_mm.c +++ b/linux-core/sis_mm.c @@ -133,6 +133,7 @@ static int sis_drm_alloc(struct drm_device * dev, struct drm_file *file_priv, dev_priv->agp_initialized)) { DRM_ERROR ("Attempt to allocate from uninitialized memory manager.\n"); + mutex_unlock(&dev->struct_mutex); return -EINVAL; } -- cgit v1.2.3 From b5cad27e05ad3666be8ccdf71e10d743efa5849e Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 25 Oct 2007 09:49:33 +0200 Subject: Fix buffer object flag / mask checking. --- linux-core/drm_bo.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 9598e353..039873ca 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -921,37 +921,37 @@ int drm_bo_mem_space(struct drm_buffer_object * bo, EXPORT_SYMBOL(drm_bo_mem_space); static int drm_bo_new_mask(struct drm_buffer_object * bo, - uint64_t new_mask, uint32_t hint) + uint64_t new_flags, uint64_t used_mask) { uint32_t new_props; if (bo->type == drm_bo_type_user) { - DRM_ERROR("User buffers are not supported yet\n"); + DRM_ERROR("User buffers are not supported yet.\n"); return -EINVAL; } - if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) { + if ((used_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) { DRM_ERROR ("DRM_BO_FLAG_NO_EVICT is only available to priviliged " - "processes\n"); + "processes.\n"); return -EPERM; } - if ((new_mask & DRM_BO_FLAG_NO_MOVE)) { + if ((new_flags & DRM_BO_FLAG_NO_MOVE)) { DRM_ERROR ("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n"); return -EPERM; } - new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE | - DRM_BO_FLAG_READ); + new_props = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE | + DRM_BO_FLAG_READ); if (!new_props) { DRM_ERROR("Invalid buffer object rwx properties\n"); return -EINVAL; } - bo->mem.mask = new_mask; + bo->mem.mask = new_flags; return 0; } @@ -1490,7 +1490,7 @@ int drm_bo_do_validate(struct drm_buffer_object *bo, DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask); - ret = drm_bo_new_mask(bo, flags, hint); + ret = drm_bo_new_mask(bo, flags, mask); if (ret) goto out; -- cgit v1.2.3 From 11f3e5e53f8fc4de90d1c289e0ba218ddfca23dc Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 25 Oct 2007 10:12:21 +0200 Subject: Buffer manager: Implement a version check IOCTL for drivers that don't use drmMMInit from user-space. Remove the minor check from the kernel code. That's really up to the driver. Bump major. --- linux-core/drm_bo.c | 21 +++++++++++++-------- linux-core/drm_drv.c | 1 + linux-core/drm_objects.h | 1 + 3 files changed, 15 insertions(+), 8 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 039873ca..8d1e2f56 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -2260,17 +2260,10 @@ int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_ } if (arg->major != DRM_BO_INIT_MAJOR) { DRM_ERROR("libdrm and kernel DRM buffer object interface major\n" - "\tversion don't match. Got %d, expected %d,\n", + "\tversion don't match. Got %d, expected %d.\n", arg->major, DRM_BO_INIT_MAJOR); return -EINVAL; } - if (arg->minor > DRM_BO_INIT_MINOR) { - DRM_ERROR("libdrm expects a newer DRM buffer object interface.\n" - "\tlibdrm buffer object interface version is %d.%d.\n" - "\tkernel DRM buffer object interface version is %d.%d\n", - arg->major, arg->minor, DRM_BO_INIT_MAJOR, DRM_BO_INIT_MINOR); - return -EINVAL; - } mutex_lock(&dev->struct_mutex); if (!bm->initialized) { @@ -2535,3 +2528,15 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo) return 0; } + +int drm_bo_version_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data; + + arg->major = DRM_BO_INIT_MAJOR; + arg->minor = DRM_BO_INIT_MINOR; + arg->patchlevel = DRM_BO_INIT_PATCH; + + return 0; +} diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 9c867f1b..330566bb 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -145,6 +145,7 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_BO_SETSTATUS, drm_bo_setstatus_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_BO_INFO, drm_bo_info_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_BO_WAIT_IDLE, drm_bo_wait_idle_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_VERSION, drm_bo_version_ioctl, 0), }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 0b937dc0..702ece56 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -483,6 +483,7 @@ extern int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file extern int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_version_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_bo_driver_finish(struct drm_device *dev); extern int drm_bo_driver_init(struct drm_device *dev); extern int drm_bo_pci_offset(struct drm_device *dev, -- cgit v1.2.3 From b9d9c30474238ac8ba4899a19fe4a97e9376f6c4 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 25 Oct 2007 10:29:15 +0200 Subject: Tighten permissions on some buffer manager ioctls. Set bo init minor to 0. Add the version function to header. --- linux-core/drm_drv.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 330566bb..fe2b1200 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -123,10 +123,14 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl, + DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_FENCE_REFERENCE, drm_fence_reference_ioctl, DRM_AUTH), -- cgit v1.2.3 From 1681189e11b5a00ae72a55de932146ea37f7afd9 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 26 Oct 2007 10:25:57 +0200 Subject: Buffer flags and masks are 64-bit. don't mask off the high dword. Signed-off-by: Thomas Hellstrom --- linux-core/drm_bo.c | 12 ++++++------ linux-core/drm_bo_move.c | 12 ++++++------ linux-core/drm_objects.h | 3 ++- 3 files changed, 14 insertions(+), 13 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 8d1e2f56..16203c77 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -54,9 +54,9 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo); static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo); static void drm_bo_unmap_virtual(struct drm_buffer_object * bo); -static inline uint32_t drm_bo_type_flags(unsigned type) +static inline uint64_t drm_bo_type_flags(unsigned type) { - return (1 << (24 + type)); + return (1ULL << (24 + type)); } /* @@ -785,10 +785,10 @@ static int drm_bo_mem_force_space(struct drm_device * dev, static int drm_bo_mt_compatible(struct drm_mem_type_manager * man, uint32_t mem_type, - uint32_t mask, uint32_t * res_mask) + uint64_t mask, uint32_t * res_mask) { - uint32_t cur_flags = drm_bo_type_flags(mem_type); - uint32_t flag_diff; + uint64_t cur_flags = drm_bo_type_flags(mem_type); + uint64_t flag_diff; if (man->flags & _DRM_FLAG_MEMTYPE_CACHED) cur_flags |= DRM_BO_FLAG_CACHED; @@ -1271,7 +1271,7 @@ static void drm_buffer_user_object_unmap(struct drm_file *file_priv, * Note that new_mem_flags are NOT transferred to the bo->mem.mask. */ -int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags, +int drm_bo_move_buffer(struct drm_buffer_object * bo, uint64_t new_mem_flags, int no_wait, int move_unfenced) { struct drm_device *dev = bo->dev; diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 2a35d45b..7c86c4aa 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -53,8 +53,8 @@ int drm_bo_move_ttm(struct drm_buffer_object * bo, { struct drm_ttm *ttm = bo->ttm; struct drm_bo_mem_reg *old_mem = &bo->mem; - uint32_t save_flags = old_mem->flags; - uint32_t save_mask = old_mem->mask; + uint64_t save_flags = old_mem->flags; + uint64_t save_mask = old_mem->mask; int ret; if (old_mem->mem_type == DRM_BO_MEM_TT) { @@ -210,8 +210,8 @@ int drm_bo_move_memcpy(struct drm_buffer_object * bo, void *old_iomap; void *new_iomap; int ret; - uint32_t save_flags = old_mem->flags; - uint32_t save_mask = old_mem->mask; + uint64_t save_flags = old_mem->flags; + uint64_t save_mask = old_mem->mask; unsigned long i; unsigned long page; unsigned long add = 0; @@ -333,8 +333,8 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo, struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type]; struct drm_bo_mem_reg *old_mem = &bo->mem; int ret; - uint32_t save_flags = old_mem->flags; - uint32_t save_mask = old_mem->mask; + uint64_t save_flags = old_mem->flags; + uint64_t save_mask = old_mem->mask; struct drm_buffer_object *old_obj; if (bo->fence) diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 702ece56..8b14ac6f 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -511,7 +511,8 @@ extern int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signa int no_wait); extern int drm_bo_mem_space(struct drm_buffer_object * bo, struct drm_bo_mem_reg * mem, int no_wait); -extern int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags, +extern int drm_bo_move_buffer(struct drm_buffer_object * bo, + uint64_t new_mem_flags, int no_wait, int move_unfenced); extern int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type); extern int drm_bo_init_mm(struct drm_device * dev, unsigned type, -- cgit v1.2.3 From 6707ab862656d766a4c78b85e5584a29d2434126 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 26 Oct 2007 16:08:54 -0700 Subject: update DRM sysfs support Make DRM devices use real Linux devices instead of class devices, which are going away. While we're at it, clean up some of the interfaces to take struct drm_device * or struct device * and use the global drm_class where needed instead of passing it around. --- linux-core/drmP.h | 10 ++-- linux-core/drm_drv.c | 4 +- linux-core/drm_stub.c | 7 +-- linux-core/drm_sysfs.c | 151 +++++++++++++++++++++++++++++++++---------------- 4 files changed, 113 insertions(+), 59 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drmP.h b/linux-core/drmP.h index d0ab2c94..82a3a23c 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -619,6 +619,8 @@ struct drm_driver { void (*postclose) (struct drm_device *, struct drm_file *); void (*lastclose) (struct drm_device *); int (*unload) (struct drm_device *); + int (*suspend) (struct drm_device *); + int (*resume) (struct drm_device *); int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); void (*dma_ready) (struct drm_device *); int (*dma_quiescent) (struct drm_device *); @@ -697,6 +699,7 @@ struct drm_head { * may contain multiple heads. */ struct drm_device { + struct device dev; /**< Linux device */ char *unique; /**< Unique identifier: e.g., busid */ int unique_len; /**< Length of unique field */ char *devname; /**< For /proc/interrupts */ @@ -1163,10 +1166,9 @@ extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah); /* sysfs support (drm_sysfs.c) */ struct drm_sysfs_class; extern struct class *drm_sysfs_create(struct module *owner, char *name); -extern void drm_sysfs_destroy(struct class *cs); -extern struct class_device *drm_sysfs_device_add(struct class *cs, - struct drm_head * head); -extern void drm_sysfs_device_remove(struct class_device *class_dev); +extern void drm_sysfs_destroy(void); +extern int drm_sysfs_device_add(struct drm_device *dev, struct drm_head * head); +extern void drm_sysfs_device_remove(struct drm_device *dev); /* * Basic memory manager support (drm_mm.c) diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index fe2b1200..47d17651 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -519,7 +519,7 @@ static int __init drm_core_init(void) CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE); return 0; err_p3: - drm_sysfs_destroy(drm_class); + drm_sysfs_destroy(); err_p2: unregister_chrdev(DRM_MAJOR, "drm"); drm_free(drm_heads, sizeof(*drm_heads) * drm_cards_limit, DRM_MEM_STUB); @@ -530,7 +530,7 @@ err_p1: static void __exit drm_core_exit(void) { remove_proc_entry("dri", NULL); - drm_sysfs_destroy(drm_class); + drm_sysfs_destroy(); unregister_chrdev(DRM_MAJOR, "drm"); diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c index 9e140ac2..1d88d375 100644 --- a/linux-core/drm_stub.c +++ b/linux-core/drm_stub.c @@ -183,11 +183,10 @@ static int drm_get_head(struct drm_device * dev, struct drm_head * head) goto err_g1; } - head->dev_class = drm_sysfs_device_add(drm_class, head); - if (IS_ERR(head->dev_class)) { + ret = drm_sysfs_device_add(dev, head); + if (ret) { printk(KERN_ERR "DRM: Error sysfs_device_add.\n"); - ret = PTR_ERR(head->dev_class); goto err_g2; } *heads = head; @@ -316,7 +315,7 @@ int drm_put_head(struct drm_head * head) DRM_DEBUG("release secondary minor %d\n", minor); drm_proc_cleanup(minor, drm_proc_root, head->dev_root); - drm_sysfs_device_remove(head->dev_class); + drm_sysfs_device_remove(head->dev); *head = (struct drm_head){.dev = NULL}; diff --git a/linux-core/drm_sysfs.c b/linux-core/drm_sysfs.c index cf4349b0..6f8623ce 100644 --- a/linux-core/drm_sysfs.c +++ b/linux-core/drm_sysfs.c @@ -19,6 +19,45 @@ #include "drm_core.h" #include "drmP.h" +#define to_drm_device(d) container_of(d, struct drm_device, dev) + +/** + * drm_sysfs_suspend - DRM class suspend hook + * @dev: Linux device to suspend + * @state: power state to enter + * + * Just figures out what the actual struct drm_device associated with + * @dev is and calls its suspend hook, if present. + */ +static int drm_sysfs_suspend(struct device *dev, pm_message_t state) +{ + struct drm_device *drm_dev = to_drm_device(dev); + + printk(KERN_ERR "%s\n", __FUNCTION__); + + if (drm_dev->driver->suspend) + return drm_dev->driver->suspend(drm_dev); + + return 0; +} + +/** + * drm_sysfs_resume - DRM class resume hook + * @dev: Linux device to resume + * + * Just figures out what the actual struct drm_device associated with + * @dev is and calls its resume hook, if present. + */ +static int drm_sysfs_resume(struct device *dev) +{ + struct drm_device *drm_dev = to_drm_device(dev); + + if (drm_dev->driver->resume) + return drm_dev->driver->resume(drm_dev); + + return 0; +} + /* Display the version of drm_core. This doesn't work right in current design */ static ssize_t version_show(struct class *dev, char *buf) { @@ -33,7 +72,7 @@ static CLASS_ATTR(version, S_IRUGO, version_show, NULL); * @owner: pointer to the module that is to "own" this struct drm_sysfs_class * @name: pointer to a string for the name of this class. * - * This is used to create a struct drm_sysfs_class pointer that can then be used + * This is used to create DRM class pointer that can then be used * in calls to drm_sysfs_device_add(). * * Note, the pointer created here is to be destroyed when finished by making a @@ -50,6 +89,9 @@ struct class *drm_sysfs_create(struct module *owner, char *name) goto err_out; } + class->suspend = drm_sysfs_suspend; + class->resume = drm_sysfs_resume; + err = class_create_file(class, &class_attr_version); if (err) goto err_out_class; @@ -63,94 +105,105 @@ err_out: } /** - * drm_sysfs_destroy - destroys a struct drm_sysfs_class structure - * @cs: pointer to the struct drm_sysfs_class that is to be destroyed + * drm_sysfs_destroy - destroys DRM class * - * Note, the pointer to be destroyed must have been created with a call to - * drm_sysfs_create(). + * Destroy the DRM device class. */ -void drm_sysfs_destroy(struct class *class) +void drm_sysfs_destroy(void) { - if ((class == NULL) || (IS_ERR(class))) + if ((drm_class == NULL) || (IS_ERR(drm_class))) return; - - class_remove_file(class, &class_attr_version); - class_destroy(class); + class_remove_file(drm_class, &class_attr_version); + class_destroy(drm_class); } -static ssize_t show_dri(struct class_device *class_device, char *buf) +static ssize_t show_dri(struct device *device, struct device_attribute *attr, + char *buf) { - struct drm_device * dev = ((struct drm_head *)class_get_devdata(class_device))->dev; + struct drm_device *dev = to_drm_device(device); if (dev->driver->dri_library_name) return dev->driver->dri_library_name(dev, buf); return snprintf(buf, PAGE_SIZE, "%s\n", dev->driver->pci_driver.name); } -static struct class_device_attribute class_device_attrs[] = { +static struct device_attribute device_attrs[] = { __ATTR(dri_library_name, S_IRUGO, show_dri, NULL), }; +/** + * drm_sysfs_device_release - do nothing + * @dev: Linux device + * + * Normally, this would free the DRM device associated with @dev, along + * with cleaning up any other stuff. But we do that in the DRM core, so + * this function can just return and hope that the core does its job. + */ +static void drm_sysfs_device_release(struct device *dev) +{ + return; +} + /** * drm_sysfs_device_add - adds a class device to sysfs for a character driver - * @cs: pointer to the struct class that this device should be registered to. - * @dev: the dev_t for the device to be added. - * @device: a pointer to a struct device that is assiociated with this class device. - * @fmt: string for the class device's name + * @dev: DRM device to be added + * @head: DRM head in question * - * A struct class_device will be created in sysfs, registered to the specified - * class. A "dev" file will be created, showing the dev_t for the device. The - * pointer to the struct class_device will be returned from the call. Any further - * sysfs files that might be required can be created using this pointer. - * Note: the struct class passed to this function must have previously been - * created with a call to drm_sysfs_create(). + * Add a DRM device to the DRM's device model class. We use @dev's PCI device + * as the parent for the Linux device, and make sure it has a file containing + * the driver we're using (for userspace compatibility). */ -struct class_device *drm_sysfs_device_add(struct class *cs, struct drm_head *head) +int drm_sysfs_device_add(struct drm_device *dev, struct drm_head *head) { - struct class_device *class_dev; - int i, j, err; - - class_dev = class_device_create(cs, NULL, - MKDEV(DRM_MAJOR, head->minor), - &(head->dev->pdev)->dev, - "card%d", head->minor); - if (IS_ERR(class_dev)) { - err = PTR_ERR(class_dev); + int err; + int i, j; + + dev->dev.parent = &dev->pdev->dev; + dev->dev.class = drm_class; + dev->dev.release = drm_sysfs_device_release; + /* + * This will actually add the major:minor file so that udev + * will create the device node. We don't want to do that just + * yet... + */ + /* dev->dev.devt = head->device; */ + snprintf(dev->dev.bus_id, BUS_ID_SIZE, "card%d", head->minor); + + err = device_register(&dev->dev); + if (err) { + DRM_ERROR("device add failed: %d\n", err); goto err_out; } - class_set_devdata(class_dev, head); - - for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) { - err = class_device_create_file(class_dev, - &class_device_attrs[i]); + for (i = 0; i < ARRAY_SIZE(device_attrs); i++) { + err = device_create_file(&dev->dev, &device_attrs[i]); if (err) goto err_out_files; } - return class_dev; + return 0; err_out_files: if (i > 0) for (j = 0; j < i; j++) - class_device_remove_file(class_dev, - &class_device_attrs[i]); - class_device_unregister(class_dev); + device_remove_file(&dev->dev, &device_attrs[i]); + device_unregister(&dev->dev); err_out: - return ERR_PTR(err); + + return err; } /** - * drm_sysfs_device_remove - removes a class device that was created with drm_sysfs_device_add() - * @dev: the dev_t of the device that was previously registered. + * drm_sysfs_device_remove - remove DRM device + * @dev: DRM device to remove * * This call unregisters and cleans up a class device that was created with a * call to drm_sysfs_device_add() */ -void drm_sysfs_device_remove(struct class_device *class_dev) +void drm_sysfs_device_remove(struct drm_device *dev) { int i; - for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) - class_device_remove_file(class_dev, &class_device_attrs[i]); - class_device_unregister(class_dev); + for (i = 0; i < ARRAY_SIZE(device_attrs); i++) + device_remove_file(&dev->dev, &device_attrs[i]); + device_unregister(&dev->dev); } -- cgit v1.2.3 From 1e2a2bababf3fbaa0a665983856761c2284dba30 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 26 Oct 2007 16:10:02 -0700 Subject: i915: suspend/resume support Add suspend/resume support to the i915 driver. Moves some of the initialization into the driver load routine, and fixes up places where we assumed no dev_private existed in some of the cleanup paths. This allows us to suspend/resume properly even if X isn't running. --- linux-core/i915_drv.c | 455 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 455 insertions(+) (limited to 'linux-core') diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c index e337e1d2..f34d218c 100644 --- a/linux-core/i915_drv.c +++ b/linux-core/i915_drv.c @@ -69,6 +69,458 @@ static struct drm_bo_driver i915_bo_driver = { }; #endif +enum pipe { + PIPE_A = 0, + PIPE_B, +}; + +static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (pipe == PIPE_A) + return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE); + else + return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE); +} + +static void i915_save_palette(struct drm_device *dev, enum pipe pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B); + u32 *array; + int i; + + if (!i915_pipe_enabled(dev, pipe)) + return; + + if (pipe == PIPE_A) + array = dev_priv->save_palette_a; + else + array = dev_priv->save_palette_b; + + for(i = 0; i < 256; i++) + array[i] = I915_READ(reg + (i << 2)); +} + +static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B); + u32 *array; + int i; + + if (!i915_pipe_enabled(dev, pipe)) + return; + + if (pipe == PIPE_A) + array = dev_priv->save_palette_a; + else + array = dev_priv->save_palette_b; + + for(i = 0; i < 256; i++) + I915_WRITE(reg + (i << 2), array[i]); +} + +static u8 i915_read_indexed(u16 index_port, u16 data_port, u8 reg) +{ + outb(reg, index_port); + return inb(data_port); +} + +static u8 i915_read_ar(u16 st01, u8 reg, u16 palette_enable) +{ + inb(st01); + outb(palette_enable | reg, VGA_AR_INDEX); + return inb(VGA_AR_DATA_READ); +} + +static void i915_write_ar(u8 st01, u8 reg, u8 val, u16 palette_enable) +{ + inb(st01); + outb(palette_enable | reg, VGA_AR_INDEX); + outb(val, VGA_AR_DATA_WRITE); +} + +static void i915_write_indexed(u16 index_port, u16 data_port, u8 reg, u8 val) +{ + outb(reg, index_port); + outb(val, data_port); +} + +static void i915_save_vga(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + u16 cr_index, cr_data, st01; + + /* VGA color palette registers */ + dev_priv->saveDACMASK = inb(VGA_DACMASK); + /* DACCRX automatically increments during read */ + outb(0, VGA_DACRX); + /* Read 3 bytes of color data from each index */ + for (i = 0; i < 256 * 3; i++) + dev_priv->saveDACDATA[i] = inb(VGA_DACDATA); + + /* MSR bits */ + dev_priv->saveMSR = inb(VGA_MSR_READ); + if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { + cr_index = VGA_CR_INDEX_CGA; + cr_data = VGA_CR_DATA_CGA; + st01 = VGA_ST01_CGA; + } else { + cr_index = VGA_CR_INDEX_MDA; + cr_data = VGA_CR_DATA_MDA; + st01 = VGA_ST01_MDA; + } + + /* CRT controller regs */ + i915_write_indexed(cr_index, cr_data, 0x11, + i915_read_indexed(cr_index, cr_data, 0x11) & + (~0x80)); + for (i = 0; i < 0x24; i++) + dev_priv->saveCR[i] = + i915_read_indexed(cr_index, cr_data, i); + /* Make sure we don't turn off CR group 0 writes */ + dev_priv->saveCR[0x11] &= ~0x80; + + /* Attribute controller registers */ + inb(st01); + dev_priv->saveAR_INDEX = inb(VGA_AR_INDEX); + for (i = 0; i < 20; i++) + dev_priv->saveAR[i] = i915_read_ar(st01, i, 0); + inb(st01); + outb(dev_priv->saveAR_INDEX, VGA_AR_INDEX); + + /* Graphics controller registers */ + for (i = 0; i < 9; i++) + dev_priv->saveGR[i] = + i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, i); + + dev_priv->saveGR[0x10] = + i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10); + dev_priv->saveGR[0x11] = + i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11); + dev_priv->saveGR[0x18] = + i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18); + + /* Sequencer registers */ + for (i = 0; i < 8; i++) + dev_priv->saveSR[i] = + i915_read_indexed(VGA_SR_INDEX, VGA_SR_DATA, i); +} + +static void i915_restore_vga(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + u16 cr_index, cr_data, st01; + + /* MSR bits */ + outb(dev_priv->saveMSR, VGA_MSR_WRITE); + if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { + cr_index = VGA_CR_INDEX_CGA; + cr_data = VGA_CR_DATA_CGA; + st01 = VGA_ST01_CGA; + } else { + cr_index = VGA_CR_INDEX_MDA; + cr_data = VGA_CR_DATA_MDA; + st01 = VGA_ST01_MDA; + } + + /* Sequencer registers, don't write SR07 */ + for (i = 0; i < 7; i++) + i915_write_indexed(VGA_SR_INDEX, VGA_SR_DATA, i, + dev_priv->saveSR[i]); + + /* CRT controller regs */ + /* Enable CR group 0 writes */ + i915_write_indexed(cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]); + for (i = 0; i < 0x24; i++) + i915_write_indexed(cr_index, cr_data, i, dev_priv->saveCR[i]); + + /* Graphics controller regs */ + for (i = 0; i < 9; i++) + i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, i, + dev_priv->saveGR[i]); + + i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10, + dev_priv->saveGR[0x10]); + i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11, + dev_priv->saveGR[0x11]); + i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18, + dev_priv->saveGR[0x18]); + + /* Attribute controller registers */ + for (i = 0; i < 20; i++) + i915_write_ar(st01, i, dev_priv->saveAR[i], 0); + inb(st01); /* switch back to index mode */ + outb(dev_priv->saveAR_INDEX | 0x20, VGA_AR_INDEX); + + /* VGA color palette registers */ + outb(dev_priv->saveDACMASK, VGA_DACMASK); + /* DACCRX automatically increments during read */ + outb(0, VGA_DACWX); + /* Read 3 bytes of color data from each index */ + for (i = 0; i < 256 * 3; i++) + outb(dev_priv->saveDACDATA[i], VGA_DACDATA); + +} + +static int i915_suspend(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + if (!dev || !dev_priv) { + printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv); + printk(KERN_ERR "DRM not initialized, aborting suspend.\n"); + return -ENODEV; + } + + pci_save_state(dev->pdev); + pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); + + /* Pipe & plane A info */ + dev_priv->savePIPEACONF = I915_READ(PIPEACONF); + dev_priv->savePIPEASRC = I915_READ(PIPEASRC); + dev_priv->saveFPA0 = I915_READ(FPA0); + dev_priv->saveFPA1 = I915_READ(FPA1); + dev_priv->saveDPLL_A = I915_READ(DPLL_A); + if (IS_I965G(dev)) + dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD); + dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A); + dev_priv->saveHBLANK_A = I915_READ(HBLANK_A); + dev_priv->saveHSYNC_A = I915_READ(HSYNC_A); + dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A); + dev_priv->saveVBLANK_A = I915_READ(VBLANK_A); + dev_priv->saveVSYNC_A = I915_READ(VSYNC_A); + dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); + + dev_priv->saveDSPACNTR = I915_READ(DSPACNTR); + dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE); + dev_priv->saveDSPASIZE = I915_READ(DSPASIZE); + dev_priv->saveDSPAPOS = I915_READ(DSPAPOS); + dev_priv->saveDSPABASE = I915_READ(DSPABASE); + if (IS_I965G(dev)) { + dev_priv->saveDSPASURF = I915_READ(DSPASURF); + dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF); + } + i915_save_palette(dev, PIPE_A); + + /* Pipe & plane B info */ + dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF); + dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC); + dev_priv->saveFPB0 = I915_READ(FPB0); + dev_priv->saveFPB1 = I915_READ(FPB1); + dev_priv->saveDPLL_B = I915_READ(DPLL_B); + if (IS_I965G(dev)) + dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD); + dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B); + dev_priv->saveHBLANK_B = I915_READ(HBLANK_B); + dev_priv->saveHSYNC_B = I915_READ(HSYNC_B); + dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B); + dev_priv->saveVBLANK_B = I915_READ(VBLANK_B); + dev_priv->saveVSYNC_B = I915_READ(VSYNC_B); + dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); + + dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR); + dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE); + dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE); + dev_priv->saveDSPBPOS = I915_READ(DSPBPOS); + dev_priv->saveDSPBBASE = I915_READ(DSPBBASE); + if (IS_I965GM(dev)) { + dev_priv->saveDSPBSURF = I915_READ(DSPBSURF); + dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF); + } + i915_save_palette(dev, PIPE_B); + + /* CRT state */ + dev_priv->saveADPA = I915_READ(ADPA); + + /* LVDS state */ + dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL); + dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); + dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); + if (IS_I965G(dev)) + dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); + if (IS_MOBILE(dev) && !IS_I830(dev)) + dev_priv->saveLVDS = I915_READ(LVDS); + if (!IS_I830(dev) && !IS_845G(dev)) + dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL); + dev_priv->saveLVDSPP_ON = I915_READ(LVDSPP_ON); + dev_priv->saveLVDSPP_OFF = I915_READ(LVDSPP_OFF); + dev_priv->savePP_CYCLE = I915_READ(PP_CYCLE); + + /* FIXME: save TV & SDVO state */ + + /* FBC state */ + dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); + dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); + dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); + dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); + + /* VGA state */ + dev_priv->saveVCLK_DIVISOR_VGA0 = I915_READ(VCLK_DIVISOR_VGA0); + dev_priv->saveVCLK_DIVISOR_VGA1 = I915_READ(VCLK_DIVISOR_VGA1); + dev_priv->saveVCLK_POST_DIV = I915_READ(VCLK_POST_DIV); + dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); + + /* Scratch space */ + for (i = 0; i < 16; i++) { + dev_priv->saveSWF0[i] = I915_READ(SWF0 + (i << 2)); + dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2)); + } + for (i = 0; i < 3; i++) + dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); + + i915_save_vga(dev); + + /* Shut down the device */ + pci_disable_device(dev->pdev); + pci_set_power_state(dev->pdev, PCI_D3hot); + + return 0; +} + +static int i915_resume(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + pci_set_power_state(dev->pdev, PCI_D0); + pci_restore_state(dev->pdev); + if (pci_enable_device(dev->pdev)) + return -1; + + pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); + + /* Pipe & plane A info */ + /* Prime the clock */ + if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { + I915_WRITE(DPLL_A, dev_priv->saveDPLL_A & + ~DPLL_VCO_ENABLE); + udelay(150); + } + I915_WRITE(FPA0, dev_priv->saveFPA0); + I915_WRITE(FPA1, dev_priv->saveFPA1); + /* Actually enable it */ + I915_WRITE(DPLL_A, dev_priv->saveDPLL_A); + udelay(150); + if (IS_I965G(dev)) + I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); + udelay(150); + + /* Restore mode */ + I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A); + I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A); + I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A); + I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A); + I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A); + I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A); + I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); + + /* Restore plane info */ + I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE); + I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS); + I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC); + I915_WRITE(DSPABASE, dev_priv->saveDSPABASE); + I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE); + if (IS_I965G(dev)) { + I915_WRITE(DSPASURF, dev_priv->saveDSPASURF); + I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF); + } + I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF); + i915_restore_palette(dev, PIPE_A); + /* Enable the plane */ + I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR); + I915_WRITE(DSPABASE, I915_READ(DSPABASE)); + + /* Pipe & plane B info */ + if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { + I915_WRITE(DPLL_B, dev_priv->saveDPLL_B & + ~DPLL_VCO_ENABLE); + udelay(150); + } + I915_WRITE(FPB0, dev_priv->saveFPB0); + I915_WRITE(FPB1, dev_priv->saveFPB1); + /* Actually enable it */ + I915_WRITE(DPLL_B, dev_priv->saveDPLL_B); + udelay(150); + if (IS_I965G(dev)) + I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); + udelay(150); + + /* Restore mode */ + I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B); + I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B); + I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B); + I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B); + I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B); + I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B); + I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); + + /* Restore plane info */ + I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE); + I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS); + I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC); + I915_WRITE(DSPBBASE, dev_priv->saveDSPBBASE); + I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); + if (IS_I965G(dev)) { + I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF); + I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); + } + I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF); + i915_restore_palette(dev, PIPE_A); + /* Enable the plane */ + I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); + I915_WRITE(DSPBBASE, I915_READ(DSPBBASE)); + + /* CRT state */ + I915_WRITE(ADPA, dev_priv->saveADPA); + + /* LVDS state */ + if (IS_I965G(dev)) + I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); + if (IS_MOBILE(dev) && !IS_I830(dev)) + I915_WRITE(LVDS, dev_priv->saveLVDS); + if (!IS_I830(dev) && !IS_845G(dev)) + I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL); + + I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); + I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); + I915_WRITE(LVDSPP_ON, dev_priv->saveLVDSPP_ON); + I915_WRITE(LVDSPP_OFF, dev_priv->saveLVDSPP_OFF); + I915_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE); + I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); + + /* FIXME: restore TV & SDVO state */ + + /* FBC info */ + I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); + I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); + I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); + I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL); + + /* VGA state */ + I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); + I915_WRITE(VCLK_DIVISOR_VGA0, dev_priv->saveVCLK_DIVISOR_VGA0); + I915_WRITE(VCLK_DIVISOR_VGA1, dev_priv->saveVCLK_DIVISOR_VGA1); + I915_WRITE(VCLK_POST_DIV, dev_priv->saveVCLK_POST_DIV); + udelay(150); + + for (i = 0; i < 16; i++) { + I915_WRITE(SWF0 + (i << 2), dev_priv->saveSWF0[i]); + I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]); + } + for (i = 0; i < 3; i++) + I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); + + i915_restore_vga(dev); + + return 0; +} + static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); static struct drm_driver driver = { /* don't use mtrr's here, the Xserver or user space app should @@ -79,9 +531,12 @@ static struct drm_driver driver = { DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL | DRIVER_IRQ_VBL2, .load = i915_driver_load, + .unload = i915_driver_unload, .firstopen = i915_driver_firstopen, .lastclose = i915_driver_lastclose, .preclose = i915_driver_preclose, + .suspend = i915_suspend, + .resume = i915_resume, .device_is_agp = i915_driver_device_is_agp, .vblank_wait = i915_driver_vblank_wait, .vblank_wait2 = i915_driver_vblank_wait2, -- cgit v1.2.3 From ff5889f8316e0c16112f114c1c8f57645b8dc54f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kristian=20H=C3=B8gsberg?= Date: Mon, 29 Oct 2007 19:32:32 -0400 Subject: Move struct drm_drawable_info out of public header file. --- linux-core/drmP.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'linux-core') diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 82a3a23c..ac3ca4d2 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -586,6 +586,15 @@ struct drm_vbl_sig { struct task_struct *task; }; +/** + * Drawable information. + */ +struct drm_drawable_info { + unsigned int num_rects; + struct drm_clip_rect *rects; +}; + + /* location of GART table */ #define DRM_ATI_GART_MAIN 1 #define DRM_ATI_GART_FB 2 -- cgit v1.2.3 From 50dec29c800a6e980a01be38190e44a0ba7916b5 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 30 Oct 2007 17:51:59 +1000 Subject: drm/i915: add driver cache flush entry point Use clflush on Intel hardware to flush cached objects. --- linux-core/drm_objects.h | 1 + linux-core/drm_ttm.c | 7 +++++-- linux-core/i915_buffer.c | 33 +++++++++++++++++++++++++++++++++ linux-core/i915_drv.c | 1 + 4 files changed, 40 insertions(+), 2 deletions(-) (limited to 'linux-core') diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 8b14ac6f..cea811eb 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -464,6 +464,7 @@ struct drm_bo_driver { uint32_t(*evict_mask) (struct drm_buffer_object *bo); int (*move) (struct drm_buffer_object * bo, int evict, int no_wait, struct drm_bo_mem_reg * new_mem); + void (*ttm_cache_flush)(struct drm_ttm *ttm); }; /* diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c index 33bbe1d4..df9e7e44 100644 --- a/linux-core/drm_ttm.c +++ b/linux-core/drm_ttm.c @@ -207,6 +207,7 @@ struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index) } return p; } +EXPORT_SYMBOL(drm_ttm_get_page); int drm_ttm_populate(struct drm_ttm * ttm) { @@ -311,7 +312,7 @@ void drm_ttm_unbind(struct drm_ttm * ttm) int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem) { - + struct drm_bo_driver *bo_driver = ttm->dev->driver->bo_driver; int ret = 0; struct drm_ttm_backend *be; @@ -328,7 +329,9 @@ int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem) if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED)) { drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED); - } + } else if ((bo_mem->flags & DRM_BO_FLAG_CACHED) && + bo_driver->ttm_cache_flush) + bo_driver->ttm_cache_flush(ttm); if ((ret = be->func->bind(be, bo_mem))) { ttm->state = ttm_evicted; diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index f81def8f..bbc7e1db 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -249,3 +249,36 @@ int i915_move(struct drm_buffer_object * bo, } return 0; } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) +static inline void clflush(volatile void *__p) +{ + asm volatile("clflush %0" : "+m" (*(char __force *)__p)); +} +#endif + +static inline void drm_cache_flush_addr(void *virt) +{ + int i; + + for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) + clflush(virt+i); +} + +static inline void drm_cache_flush_page(struct page *p) +{ + drm_cache_flush_addr(page_address(p)); +} + +void i915_flush_ttm(struct drm_ttm *ttm) +{ + int i; + + if (!ttm) + return; + + DRM_MEMORYBARRIER(); + for (i = ttm->num_pages-1; i >= 0; i--) + drm_cache_flush_page(drm_ttm_get_page(ttm, i)); + DRM_MEMORYBARRIER(); +} diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c index f34d218c..124db68f 100644 --- a/linux-core/i915_drv.c +++ b/linux-core/i915_drv.c @@ -66,6 +66,7 @@ static struct drm_bo_driver i915_bo_driver = { .init_mem_type = i915_init_mem_type, .evict_mask = i915_evict_mask, .move = i915_move, + .ttm_cache_flush = i915_flush_ttm, }; #endif -- cgit v1.2.3