summaryrefslogtreecommitdiff
path: root/linux-core
diff options
context:
space:
mode:
authorDave Airlie <airlied@linux.ie>2007-11-05 12:42:22 +1000
committerDave Airlie <airlied@linux.ie>2007-11-05 12:42:22 +1000
commit7f6bf84c238a1859ffd409c0ef1f1ca7eb5e6e72 (patch)
treedf368e11dd564ce38ffcb108e7b836aa33621838 /linux-core
parent3664de73955aafe912318c91717ff9ecc1027af2 (diff)
drm: remove lots of spurious whitespace.
Kernel "cleanfile" script run.
Diffstat (limited to 'linux-core')
-rw-r--r--linux-core/drm_agpsupport.c10
-rw-r--r--linux-core/drm_bo.c34
-rw-r--r--linux-core/drm_bo_lock.c12
-rw-r--r--linux-core/drm_bo_move.c20
-rw-r--r--linux-core/drm_bufs.c16
-rw-r--r--linux-core/drm_compat.c58
-rw-r--r--linux-core/drm_compat.h24
-rw-r--r--linux-core/drm_context.c4
-rw-r--r--linux-core/drm_drv.c22
-rw-r--r--linux-core/drm_fence.c6
-rw-r--r--linux-core/drm_fops.c1
-rw-r--r--linux-core/drm_hashtab.c2
-rw-r--r--linux-core/drm_hashtab.h1
-rw-r--r--linux-core/drm_mm.c4
-rw-r--r--linux-core/drm_objects.h6
-rw-r--r--linux-core/drm_os_linux.h5
-rw-r--r--linux-core/drm_scatter.c2
-rw-r--r--linux-core/drm_stub.c2
-rw-r--r--linux-core/ffb_drv.h4
-rw-r--r--linux-core/i810_dma.c6
-rw-r--r--linux-core/i810_drv.h48
-rw-r--r--linux-core/i915_buffer.c18
-rw-r--r--linux-core/i915_compat.c2
-rw-r--r--linux-core/i915_drv.c14
-rw-r--r--linux-core/i915_fence.c16
-rw-r--r--linux-core/i915_ioc32.c16
-rw-r--r--linux-core/mga_drv.c2
-rw-r--r--linux-core/mga_ioc32.c32
-rw-r--r--linux-core/nouveau_buffer.c3
-rw-r--r--linux-core/nouveau_drv.c2
-rw-r--r--linux-core/nouveau_fence.c1
-rw-r--r--linux-core/nouveau_sgdma.c3
-rw-r--r--linux-core/r128_ioc32.c6
-rw-r--r--linux-core/radeon_drv.c2
-rw-r--r--linux-core/sis_mm.c2
-rw-r--r--linux-core/via_buffer.c14
-rw-r--r--linux-core/via_dmablit.c178
-rw-r--r--linux-core/via_dmablit.h84
-rw-r--r--linux-core/via_mm.c2
-rw-r--r--linux-core/xgi_cmdlist.c14
-rw-r--r--linux-core/xgi_drv.c6
-rw-r--r--linux-core/xgi_fb.c6
-rw-r--r--linux-core/xgi_fence.c2
-rw-r--r--linux-core/xgi_ioc32.c2
-rw-r--r--linux-core/xgi_misc.c12
-rw-r--r--linux-core/xgi_misc.h2
-rw-r--r--linux-core/xgi_regs.h2
47 files changed, 361 insertions, 369 deletions
diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c
index 7c50f411..08ea7c48 100644
--- a/linux-core/drm_agpsupport.c
+++ b/linux-core/drm_agpsupport.c
@@ -506,7 +506,7 @@ static int drm_agp_needs_unbind_cache_adjust(struct drm_ttm_backend *backend) {
static int drm_agp_populate(struct drm_ttm_backend *backend, unsigned long num_pages,
struct page **pages) {
- struct drm_agp_ttm_backend *agp_be =
+ struct drm_agp_ttm_backend *agp_be =
container_of(backend, struct drm_agp_ttm_backend, backend);
struct page **cur_page, **last_page = pages + num_pages;
DRM_AGP_MEM *mem;
@@ -562,7 +562,7 @@ static int drm_agp_bind_ttm(struct drm_ttm_backend *backend,
static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend) {
- struct drm_agp_ttm_backend *agp_be =
+ struct drm_agp_ttm_backend *agp_be =
container_of(backend, struct drm_agp_ttm_backend, backend);
DRM_DEBUG("drm_agp_unbind_ttm\n");
@@ -574,7 +574,7 @@ static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend) {
static void drm_agp_clear_ttm(struct drm_ttm_backend *backend) {
- struct drm_agp_ttm_backend *agp_be =
+ struct drm_agp_ttm_backend *agp_be =
container_of(backend, struct drm_agp_ttm_backend, backend);
DRM_AGP_MEM *mem = agp_be->mem;
@@ -604,7 +604,7 @@ static void drm_agp_destroy_ttm(struct drm_ttm_backend *backend) {
}
}
-static struct drm_ttm_backend_func agp_ttm_backend =
+static struct drm_ttm_backend_func agp_ttm_backend =
{
.needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust,
.populate = drm_agp_populate,
@@ -637,7 +637,7 @@ struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev)
return NULL;
}
-
+
agp_be = drm_ctl_calloc(1, sizeof(*agp_be), DRM_MEM_TTM);
if (!agp_be)
return NULL;
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c
index 4f95f236..4cdf8891 100644
--- a/linux-core/drm_bo.c
+++ b/linux-core/drm_bo.c
@@ -1537,7 +1537,7 @@ EXPORT_SYMBOL(drm_bo_do_validate);
int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle,
uint32_t fence_class,
- uint64_t flags, uint64_t mask,
+ uint64_t flags, uint64_t mask,
uint32_t hint,
int use_old_fence_class,
struct drm_bo_info_rep * rep,
@@ -1552,7 +1552,7 @@ int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle,
bo = drm_lookup_buffer_object(file_priv, handle, 1);
mutex_unlock(&dev->struct_mutex);
- if (!bo)
+ if (!bo)
return -EINVAL;
if (use_old_fence_class)
@@ -1562,10 +1562,10 @@ int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle,
* Only allow creator to change shared buffer mask.
*/
- if (bo->base.owner != file_priv)
+ if (bo->base.owner != file_priv)
mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
-
+
ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class,
no_wait, rep);
@@ -1764,14 +1764,14 @@ int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
req->buffer_start, &entry);
if (ret)
goto out;
-
+
ret = drm_bo_add_user_object(file_priv, entry,
req->mask & DRM_BO_FLAG_SHAREABLE);
if (ret) {
drm_bo_usage_deref_unlocked(&entry);
goto out;
}
-
+
mutex_lock(&entry->mutex);
drm_bo_fill_rep_arg(entry, rep);
mutex_unlock(&entry->mutex);
@@ -1780,7 +1780,7 @@ out:
return ret;
}
-int drm_bo_setstatus_ioctl(struct drm_device *dev,
+int drm_bo_setstatus_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_bo_map_wait_idle_arg *arg = data;
@@ -1861,7 +1861,7 @@ int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *
drm_buffer_type, &uo);
if (ret)
return ret;
-
+
ret = drm_bo_handle_info(file_priv, req->handle, rep);
if (ret)
return ret;
@@ -2165,7 +2165,7 @@ EXPORT_SYMBOL(drm_bo_init_mm);
/*
* This function is intended to be called on drm driver unload.
* If you decide to call it from lastclose, you must protect the call
- * from a potentially racing drm_bo_driver_init in firstopen.
+ * from a potentially racing drm_bo_driver_init in firstopen.
* (This may happen on X server restart).
*/
@@ -2229,7 +2229,7 @@ out:
/*
* This function is intended to be called on drm driver load.
* If you decide to call it from firstopen, you must protect the call
- * from a potentially racing drm_bo_driver_finish in lastclose.
+ * from a potentially racing drm_bo_driver_finish in lastclose.
* (This may happen on X server restart).
*/
@@ -2390,13 +2390,13 @@ int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_
DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n");
return -EINVAL;
}
-
+
if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
ret = drm_bo_write_lock(&dev->bm.bm_lock, file_priv);
if (ret)
return ret;
}
-
+
mutex_lock(&dev->struct_mutex);
ret = drm_bo_lock_mm(dev, arg->mem_type);
mutex_unlock(&dev->struct_mutex);
@@ -2408,8 +2408,8 @@ int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_
return 0;
}
-int drm_mm_unlock_ioctl(struct drm_device *dev,
- void *data,
+int drm_mm_unlock_ioctl(struct drm_device *dev,
+ void *data,
struct drm_file *file_priv)
{
struct drm_mm_type_arg *arg = data;
@@ -2426,7 +2426,7 @@ int drm_mm_unlock_ioctl(struct drm_device *dev,
if (ret)
return ret;
}
-
+
return 0;
}
@@ -2580,11 +2580,11 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo)
return 0;
}
-int drm_bo_version_ioctl(struct drm_device *dev, void *data,
+int drm_bo_version_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data;
-
+
arg->major = DRM_BO_INIT_MAJOR;
arg->minor = DRM_BO_INIT_MINOR;
arg->patchlevel = DRM_BO_INIT_PATCH;
diff --git a/linux-core/drm_bo_lock.c b/linux-core/drm_bo_lock.c
index e5a86826..46318f6a 100644
--- a/linux-core/drm_bo_lock.c
+++ b/linux-core/drm_bo_lock.c
@@ -31,19 +31,19 @@
/*
* This file implements a simple replacement for the buffer manager use
* of the heavyweight hardware lock.
- * The lock is a read-write lock. Taking it in read mode is fast, and
+ * The lock is a read-write lock. Taking it in read mode is fast, and
* intended for in-kernel use only.
* Taking it in write mode is slow.
*
- * The write mode is used only when there is a need to block all
- * user-space processes from allocating a
+ * The write mode is used only when there is a need to block all
+ * user-space processes from allocating a
* new memory area.
* Typical use in write mode is X server VT switching, and it's allowed
* to leave kernel space with the write lock held. If a user-space process
* dies while having the write-lock, it will be released during the file
* descriptor release.
*
- * The read lock is typically placed at the start of an IOCTL- or
+ * The read lock is typically placed at the start of an IOCTL- or
* user-space callable function that may end up allocating a memory area.
* This includes setstatus, super-ioctls and no_pfn; the latter may move
* unmappable regions to mappable. It's a bug to leave kernel space with the
@@ -53,7 +53,7 @@
* latency. The locking functions will return -EAGAIN if interrupted by a
* signal.
*
- * Locking order: The lock should be taken BEFORE any kernel mutexes
+ * Locking order: The lock should be taken BEFORE any kernel mutexes
* or spinlocks.
*/
@@ -140,7 +140,7 @@ int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv)
/*
* Add a dummy user-object, the destructor of which will
- * make sure the lock is released if the client dies
+ * make sure the lock is released if the client dies
* while holding it.
*/
diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c
index 7c86c4aa..9ab28b03 100644
--- a/linux-core/drm_bo_move.c
+++ b/linux-core/drm_bo_move.c
@@ -1,8 +1,8 @@
/**************************************************************************
- *
+ *
* Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
@@ -10,7 +10,7 @@
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
- *
+ *
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
@@ -19,8 +19,8 @@
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
@@ -90,13 +90,13 @@ EXPORT_SYMBOL(drm_bo_move_ttm);
*
* \param bo The buffer object.
* \return Failure indication.
- *
+ *
* Returns -EINVAL if the buffer object is currently not mappable.
* Returns -ENOMEM if the ioremap operation failed.
* Otherwise returns zero.
- *
+ *
* After a successfull call, bo->iomap contains the virtual address, or NULL
- * if the buffer object content is not accessible through PCI space.
+ * if the buffer object content is not accessible through PCI space.
* Call bo->mutex locked.
*/
@@ -349,11 +349,11 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo,
#ifdef DRM_ODD_MM_COMPAT
/*
* In this mode, we don't allow pipelining a copy blit,
- * since the buffer will be accessible from user space
+ * since the buffer will be accessible from user space
* the moment we return and rebuild the page tables.
*
* With normal vm operation, page tables are rebuilt
- * on demand using fault(), which waits for buffer idle.
+ * on demand using fault(), which waits for buffer idle.
*/
if (1)
#else
diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c
index 60eca60c..65818c6f 100644
--- a/linux-core/drm_bufs.c
+++ b/linux-core/drm_bufs.c
@@ -53,7 +53,7 @@ struct drm_map_list *drm_find_matching_map(struct drm_device *dev, drm_local_map
struct drm_map_list *entry;
list_for_each_entry(entry, &dev->maplist, head) {
if (entry->map && map->type == entry->map->type &&
- ((entry->map->offset == map->offset) ||
+ ((entry->map->offset == map->offset) ||
(map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) {
return entry;
}
@@ -80,10 +80,10 @@ static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
int ret;
hash->key = user_token >> PAGE_SHIFT;
ret = drm_ht_insert_item(&dev->map_hash, hash);
- if (ret != -EINVAL)
+ if (ret != -EINVAL)
return ret;
}
- return drm_ht_just_insert_please(&dev->map_hash, hash,
+ return drm_ht_just_insert_please(&dev->map_hash, hash,
user_token, 32 - PAGE_SHIFT - 3,
0, DRM_MAP_HASH_OFFSET >> PAGE_SHIFT);
}
@@ -297,7 +297,7 @@ static int drm_addmap_core(struct drm_device *dev, unsigned int offset,
/* Assign a 32-bit handle */
- user_token = (map->type == _DRM_SHM) ? (unsigned long) map->handle :
+ user_token = (map->type == _DRM_SHM) ? (unsigned long) map->handle :
map->offset;
ret = drm_map_handle(dev, &list->hash, user_token, 0);
@@ -379,7 +379,7 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
if (r_list->map == map) {
list_del(&r_list->head);
- drm_ht_remove_key(&dev->map_hash,
+ drm_ht_remove_key(&dev->map_hash,
r_list->user_token >> PAGE_SHIFT);
drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS);
found = 1;
@@ -821,9 +821,9 @@ int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request)
page_count = 0;
while (entry->buf_count < count) {
-
+
dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
-
+
if (!dmah) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
@@ -1600,5 +1600,3 @@ int drm_order(unsigned long size)
return order;
}
EXPORT_SYMBOL(drm_order);
-
-
diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c
index ae44e500..67c8c998 100644
--- a/linux-core/drm_compat.c
+++ b/linux-core/drm_compat.c
@@ -1,5 +1,5 @@
/**************************************************************************
- *
+ *
* This kernel module is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
@@ -13,7 +13,7 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
+ *
**************************************************************************/
/*
* This code provides access to unexported mm kernel features. It is necessary
@@ -21,7 +21,7 @@
* directly.
*
* Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
- * Linux kernel mm subsystem authors.
+ * Linux kernel mm subsystem authors.
* (Most code taken from there).
*/
@@ -50,7 +50,7 @@ int drm_unmap_page_from_agp(struct page *page)
* performance reasons */
return i;
}
-#endif
+#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
@@ -80,22 +80,22 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags)
/*
* vm code for kernels below 2.6.15 in which version a major vm write
- * occured. This implement a simple straightforward
+ * occured. This implement a simple straightforward
* version similar to what's going to be
* in kernel 2.6.19+
* Kernels below 2.6.15 use nopage whereas 2.6.19 and upwards use
* nopfn.
- */
+ */
static struct {
spinlock_t lock;
struct page *dummy_page;
atomic_t present;
-} drm_np_retry =
+} drm_np_retry =
{SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
-static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
+static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
struct fault_data *data);
@@ -126,7 +126,7 @@ void free_nopage_retry(void)
}
struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
- unsigned long address,
+ unsigned long address,
int *type)
{
struct fault_data data;
@@ -204,14 +204,14 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
unsigned long page_offset;
struct page *page = NULL;
- struct drm_ttm *ttm;
+ struct drm_ttm *ttm;
struct drm_device *dev;
unsigned long pfn;
int err;
unsigned long bus_base;
unsigned long bus_offset;
unsigned long bus_size;
-
+
dev = bo->dev;
while(drm_bo_read_lock(&dev->bm.bm_lock));
@@ -219,12 +219,12 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
err = drm_bo_wait(bo, 0, 1, 0);
if (err) {
- data->type = (err == -EAGAIN) ?
+ data->type = (err == -EAGAIN) ?
VM_FAULT_MINOR : VM_FAULT_SIGBUS;
goto out_unlock;
}
-
-
+
+
/*
* If buffer happens to be in a non-mappable location,
* move it to a mappable.
@@ -253,7 +253,7 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
}
dev = bo->dev;
- err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
+ err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
&bus_size);
if (err) {
@@ -286,7 +286,7 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
err = vm_insert_pfn(vma, address, pfn);
if (!err || err == -EBUSY)
- data->type = VM_FAULT_MINOR;
+ data->type = VM_FAULT_MINOR;
else
data->type = VM_FAULT_OOM;
out_unlock:
@@ -330,7 +330,7 @@ unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma,
* VM compatibility code for 2.6.15-2.6.18. This code implements a complicated
* workaround for a single BUG statement in do_no_page in these versions. The
* tricky thing is that we need to take the mmap_sem in exclusive mode for _all_
- * vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to
+ * vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to
* check first take the dev->struct_mutex, and then trylock all mmap_sems. If this
* fails for a single mmap_sem, we have to release all sems and the dev->struct_mutex,
* release the cpu and retry. We also need to keep track of all vmas mapping the ttm.
@@ -351,13 +351,13 @@ typedef struct vma_entry {
struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
- unsigned long address,
+ unsigned long address,
int *type)
{
struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
unsigned long page_offset;
struct page *page;
- struct drm_ttm *ttm;
+ struct drm_ttm *ttm;
struct drm_device *dev;
mutex_lock(&bo->mutex);
@@ -369,7 +369,7 @@ struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
page = NOPAGE_SIGBUS;
goto out_unlock;
}
-
+
dev = bo->dev;
if (drm_mem_reg_is_pci(dev, &bo->mem)) {
@@ -403,8 +403,8 @@ int drm_bo_map_bound(struct vm_area_struct *vma)
unsigned long bus_base;
unsigned long bus_offset;
unsigned long bus_size;
-
- ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
+
+ ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
&bus_offset, &bus_size);
BUG_ON(ret);
@@ -419,7 +419,7 @@ int drm_bo_map_bound(struct vm_area_struct *vma)
return ret;
}
-
+
int drm_bo_add_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma)
{
@@ -493,7 +493,7 @@ int drm_bo_lock_kmm(struct drm_buffer_object * bo)
{
p_mm_entry_t *entry;
int lock_ok = 1;
-
+
list_for_each_entry(entry, &bo->p_mm_list, head) {
BUG_ON(entry->locked);
if (!down_write_trylock(&entry->mm->mmap_sem)) {
@@ -507,7 +507,7 @@ int drm_bo_lock_kmm(struct drm_buffer_object * bo)
return 0;
list_for_each_entry(entry, &bo->p_mm_list, head) {
- if (!entry->locked)
+ if (!entry->locked)
break;
up_write(&entry->mm->mmap_sem);
entry->locked = 0;
@@ -524,7 +524,7 @@ int drm_bo_lock_kmm(struct drm_buffer_object * bo)
void drm_bo_unlock_kmm(struct drm_buffer_object * bo)
{
p_mm_entry_t *entry;
-
+
list_for_each_entry(entry, &bo->p_mm_list, head) {
BUG_ON(!entry->locked);
up_write(&entry->mm->mmap_sem);
@@ -532,7 +532,7 @@ void drm_bo_unlock_kmm(struct drm_buffer_object * bo)
}
}
-int drm_bo_remap_bound(struct drm_buffer_object *bo)
+int drm_bo_remap_bound(struct drm_buffer_object *bo)
{
vma_entry_t *v_entry;
int ret = 0;
@@ -553,9 +553,9 @@ void drm_bo_finish_unmap(struct drm_buffer_object *bo)
vma_entry_t *v_entry;
list_for_each_entry(v_entry, &bo->vma_list, head) {
- v_entry->vma->vm_flags &= ~VM_PFNMAP;
+ v_entry->vma->vm_flags &= ~VM_PFNMAP;
}
-}
+}
#endif
diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h
index f74f4bc2..f8933e0c 100644
--- a/linux-core/drm_compat.h
+++ b/linux-core/drm_compat.h
@@ -89,7 +89,7 @@
#define __user
#endif
-#if !defined(__put_page)
+#if !defined(__put_page)
#define __put_page(p) atomic_dec(&(p)->count)
#endif
@@ -104,7 +104,7 @@
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
static inline int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t pgprot)
{
- return remap_page_range(vma, from,
+ return remap_page_range(vma, from,
pfn << PAGE_SHIFT,
size,
pgprot);
@@ -178,7 +178,7 @@ static __inline__ void *kcalloc(size_t nmemb, size_t size, int flags)
/*
- * Flush relevant caches and clear a VMA structure so that page references
+ * Flush relevant caches and clear a VMA structure so that page references
* will cause a page fault. Don't flush tlbs.
*/
@@ -186,7 +186,7 @@ extern void drm_clear_vma(struct vm_area_struct *vma,
unsigned long addr, unsigned long end);
/*
- * Return the PTE protection map entries for the VMA flags given by
+ * Return the PTE protection map entries for the VMA flags given by
* flags. This is a functional interface to the kernel's protection map.
*/
@@ -223,7 +223,7 @@ extern void free_nopage_retry(void);
#ifndef DRM_FULL_MM_COMPAT
/*
- * For now, just return a dummy page that we've allocated out of
+ * For now, just return a dummy page that we've allocated out of
* static space. The page will be put by do_nopage() since we've already
* filled out the pte.
*/
@@ -233,13 +233,13 @@ struct fault_data {
unsigned long address;
pgoff_t pgoff;
unsigned int flags;
-
+
int type;
};
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
extern struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
- unsigned long address,
+ unsigned long address,
int *type);
#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
!defined(DRM_FULL_MM_COMPAT)
@@ -254,22 +254,22 @@ struct drm_buffer_object;
/*
- * Add a vma to the ttm vma list, and the
+ * Add a vma to the ttm vma list, and the
* process mm pointer to the ttm mm list. Needs the ttm mutex.
*/
-extern int drm_bo_add_vma(struct drm_buffer_object * bo,
+extern int drm_bo_add_vma(struct drm_buffer_object * bo,
struct vm_area_struct *vma);
/*
* Delete a vma and the corresponding mm pointer from the
* ttm lists. Needs the ttm mutex.
*/
-extern void drm_bo_delete_vma(struct drm_buffer_object * bo,
+extern void drm_bo_delete_vma(struct drm_buffer_object * bo,
struct vm_area_struct *vma);
/*
* Attempts to lock all relevant mmap_sems for a ttm, while
- * not releasing the ttm mutex. May return -EAGAIN to avoid
+ * not releasing the ttm mutex. May return -EAGAIN to avoid
* deadlocks. In that case the caller shall release the ttm mutex,
* schedule() and try again.
*/
@@ -292,7 +292,7 @@ extern void drm_bo_unlock_kmm(struct drm_buffer_object * bo);
extern void drm_bo_finish_unmap(struct drm_buffer_object *bo);
/*
- * Remap all vmas of this ttm using io_remap_pfn_range. We cannot
+ * Remap all vmas of this ttm using io_remap_pfn_range. We cannot
* fault these pfns in, because the first one will set the vma VM_PFNMAP
* flag, which will make the next fault bug in do_nopage(). The function
* releases the mmap_sems for this ttm.
diff --git a/linux-core/drm_context.c b/linux-core/drm_context.c
index 7854e89c..83ad291e 100644
--- a/linux-core/drm_context.c
+++ b/linux-core/drm_context.c
@@ -89,7 +89,7 @@ again:
mutex_unlock(&dev->struct_mutex);
goto again;
}
-
+
mutex_unlock(&dev->struct_mutex);
return new_id;
}
@@ -160,7 +160,7 @@ int drm_getsareactx(struct drm_device *dev, void *data,
request->handle = NULL;
list_for_each_entry(_entry, &dev->maplist, head) {
if (_entry->map == map) {
- request->handle =
+ request->handle =
(void *)(unsigned long)_entry->user_token;
break;
}
diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c
index 47d17651..bba84143 100644
--- a/linux-core/drm_drv.c
+++ b/linux-core/drm_drv.c
@@ -123,13 +123,13 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl,
+ DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl,
+ DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl,
+ DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl,
+ DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH),
@@ -244,7 +244,7 @@ int drm_lastclose(struct drm_device * dev)
list_del(&vma->head);
drm_ctl_free(vma, sizeof(*vma), DRM_MEM_VMAS);
}
-
+
list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
drm_rmmap_locked(dev, r_list->map);
r_list = NULL;
@@ -324,7 +324,7 @@ int drm_init(struct drm_driver *driver,
pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
pid->subdevice, pdev))) {
/* Are there device class requirements? */
- if ((pid->class != 0)
+ if ((pid->class != 0)
&& ((pdev->class & pid->class_mask) != pid->class)) {
continue;
}
@@ -355,7 +355,7 @@ int drm_init(struct drm_driver *driver,
pid->subvendor, pid->subdevice,
pdev))) {
/* Are there device class requirements? */
- if ((pid->class != 0)
+ if ((pid->class != 0)
&& ((pdev->class & pid->class_mask) != pid->class)) {
continue;
}
@@ -469,19 +469,19 @@ static int __init drm_core_init(void)
unsigned long max_memctl_mem;
si_meminfo(&si);
-
+
/*
* AGP only allows low / DMA32 memory ATM.
*/
avail_memctl_mem = si.totalram - si.totalhigh;
- /*
- * Avoid overflows
+ /*
+ * Avoid overflows
*/
max_memctl_mem = 1UL << (32 - PAGE_SHIFT);
- max_memctl_mem = (max_memctl_mem / si.mem_unit) * PAGE_SIZE;
+ max_memctl_mem = (max_memctl_mem / si.mem_unit) * PAGE_SIZE;
if (avail_memctl_mem >= max_memctl_mem)
avail_memctl_mem = max_memctl_mem;
diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c
index e696b42d..d09efeb4 100644
--- a/linux-core/drm_fence.c
+++ b/linux-core/drm_fence.c
@@ -127,7 +127,7 @@ void drm_fence_handler(struct drm_device * dev, uint32_t fence_class,
list_for_each_entry(fence, head, ring) {
if (&fence->ring == &fc->ring)
break;
- diff = (fc->last_exe_flush - fence->sequence) &
+ diff = (fc->last_exe_flush - fence->sequence) &
driver->sequence_mask;
if (diff > driver->wrap_diff)
break;
@@ -497,7 +497,7 @@ static int drm_fence_object_init(struct drm_device * dev, uint32_t fence_class,
write_lock_irqsave(&fm->lock, flags);
INIT_LIST_HEAD(&fence->ring);
- /*
+ /*
* Avoid hitting BUG() for kernel-only fence objects.
*/
@@ -656,7 +656,7 @@ int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *
drm_fence_usage_deref_unlocked(&fence);
return ret;
}
-
+
/*
* usage > 0. No need to lock dev->struct_mutex;
*/
diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c
index 0ccaed5b..4d0d6be0 100644
--- a/linux-core/drm_fops.c
+++ b/linux-core/drm_fops.c
@@ -528,4 +528,3 @@ unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
return 0;
}
EXPORT_SYMBOL(drm_poll);
-
diff --git a/linux-core/drm_hashtab.c b/linux-core/drm_hashtab.c
index a8ec8468..dacc83be 100644
--- a/linux-core/drm_hashtab.c
+++ b/linux-core/drm_hashtab.c
@@ -128,7 +128,7 @@ int drm_ht_insert_item(struct drm_open_hash * ht, struct drm_hash_item * item)
}
/*
- * Just insert an item and return any "bits" bit key that hasn't been
+ * Just insert an item and return any "bits" bit key that hasn't been
* used before.
*/
int drm_ht_just_insert_please(struct drm_open_hash * ht, struct drm_hash_item * item,
diff --git a/linux-core/drm_hashtab.h b/linux-core/drm_hashtab.h
index 0f137677..c090677b 100644
--- a/linux-core/drm_hashtab.h
+++ b/linux-core/drm_hashtab.h
@@ -65,4 +65,3 @@ extern void drm_ht_remove(struct drm_open_hash *ht);
#endif
-
diff --git a/linux-core/drm_mm.c b/linux-core/drm_mm.c
index cf0d92fa..59110293 100644
--- a/linux-core/drm_mm.c
+++ b/linux-core/drm_mm.c
@@ -235,12 +235,12 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
entry = list_entry(list, struct drm_mm_node, fl_entry);
wasted = 0;
- if (entry->size < size)
+ if (entry->size < size)
continue;
if (alignment) {
register unsigned tmp = entry->start % alignment;
- if (tmp)
+ if (tmp)
wasted += alignment - tmp;
}
diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h
index 0a05e5fe..c9c1fdb8 100644
--- a/linux-core/drm_objects.h
+++ b/linux-core/drm_objects.h
@@ -628,7 +628,7 @@ extern void drm_regs_init(struct drm_reg_manager *manager,
void (*reg_destroy)(struct drm_reg *));
/*
- * drm_bo_lock.c
+ * drm_bo_lock.c
* Simple replacement for the hardware lock on buffer manager init and clean.
*/
@@ -636,10 +636,10 @@ extern void drm_regs_init(struct drm_reg_manager *manager,
extern void drm_bo_init_lock(struct drm_bo_lock *lock);
extern void drm_bo_read_unlock(struct drm_bo_lock *lock);
extern int drm_bo_read_lock(struct drm_bo_lock *lock);
-extern int drm_bo_write_lock(struct drm_bo_lock *lock,
+extern int drm_bo_write_lock(struct drm_bo_lock *lock,
struct drm_file *file_priv);
-extern int drm_bo_write_unlock(struct drm_bo_lock *lock,
+extern int drm_bo_write_unlock(struct drm_bo_lock *lock,
struct drm_file *file_priv);
#ifdef CONFIG_DEBUG_MUTEXES
diff --git a/linux-core/drm_os_linux.h b/linux-core/drm_os_linux.h
index 84c294e1..8921944e 100644
--- a/linux-core/drm_os_linux.h
+++ b/linux-core/drm_os_linux.h
@@ -92,9 +92,9 @@ static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size)
#define DRM_COPY_TO_USER(arg1, arg2, arg3) \
copy_to_user(arg1, arg2, arg3)
/* Macros for copyfrom user, but checking readability only once */
-#define DRM_VERIFYAREA_READ( uaddr, size ) \
+#define DRM_VERIFYAREA_READ( uaddr, size ) \
(access_ok( VERIFY_READ, uaddr, size) ? 0 : -EFAULT)
-#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
+#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
__copy_from_user(arg1, arg2, arg3)
#define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \
__copy_to_user(arg1, arg2, arg3)
@@ -143,4 +143,3 @@ do { \
#define DRM_SPINLOCK_IRQSAVE(l, _flags) spin_lock_irqsave(l, _flags);
#define DRM_SPINUNLOCK_IRQRESTORE(l, _flags) spin_unlock_irqrestore(l, _flags);
#define DRM_SPINLOCK_ASSERT(l) do {} while (0)
-
diff --git a/linux-core/drm_scatter.c b/linux-core/drm_scatter.c
index 3c0f672e..920b11c8 100644
--- a/linux-core/drm_scatter.c
+++ b/linux-core/drm_scatter.c
@@ -126,7 +126,7 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
DRM_DEBUG("sg alloc handle = %08lx\n", entry->handle);
DRM_DEBUG("sg alloc virtual = %p\n", entry->virtual);
- for (i = (unsigned long)entry->virtual, j = 0; j < pages;
+ for (i = (unsigned long)entry->virtual, j = 0; j < pages;
i += PAGE_SIZE, j++) {
entry->pagelist[j] = vmalloc_to_page((void *)i);
if (!entry->pagelist[j])
diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c
index dabd174b..401704f7 100644
--- a/linux-core/drm_stub.c
+++ b/linux-core/drm_stub.c
@@ -75,7 +75,7 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
mutex_init(&dev->bm.evict_mutex);
idr_init(&dev->drw_idr);
-
+
dev->pdev = pdev;
dev->pci_device = pdev->device;
dev->pci_vendor = pdev->vendor;
diff --git a/linux-core/ffb_drv.h b/linux-core/ffb_drv.h
index bad3c94d..f961ba47 100644
--- a/linux-core/ffb_drv.h
+++ b/linux-core/ffb_drv.h
@@ -124,7 +124,7 @@ typedef struct _ffb_fbc {
/*294*/ volatile unsigned int xpmask; /* X PlaneMask */
/*298*/ volatile unsigned int ypmask; /* Y PlaneMask */
/*29c*/ volatile unsigned int zpmask; /* Z PlaneMask */
-/*2a0*/ ffb_auxclip auxclip[4]; /* Auxilliary Viewport Clip */
+/*2a0*/ ffb_auxclip auxclip[4]; /* Auxilliary Viewport Clip */
/* New 3dRAM III support regs */
/*2c0*/ volatile unsigned int rawblend2;
@@ -266,7 +266,7 @@ typedef struct ffb_dev_priv {
int prom_node;
enum ffb_chip_type ffb_type;
u64 card_phys_base;
- struct miscdevice miscdev;
+ struct miscdevice miscdev;
/* Controller registers. */
ffb_fbcPtr regs;
diff --git a/linux-core/i810_dma.c b/linux-core/i810_dma.c
index 7c37b4bb..949355aa 100644
--- a/linux-core/i810_dma.c
+++ b/linux-core/i810_dma.c
@@ -41,7 +41,7 @@
#define I810_BUF_FREE 2
#define I810_BUF_CLIENT 1
-#define I810_BUF_HARDWARE 0
+#define I810_BUF_HARDWARE 0
#define I810_BUF_UNMAPPED 0
#define I810_BUF_MAPPED 1
@@ -867,7 +867,7 @@ static void i810_dma_quiescent(struct drm_device * dev)
drm_i810_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
-/* printk("%s\n", __FUNCTION__); */
+/* printk("%s\n", __FUNCTION__); */
i810_kernel_lost_context(dev);
@@ -888,7 +888,7 @@ static int i810_flush_queue(struct drm_device * dev)
int i, ret = 0;
RING_LOCALS;
-/* printk("%s\n", __FUNCTION__); */
+/* printk("%s\n", __FUNCTION__); */
i810_kernel_lost_context(dev);
diff --git a/linux-core/i810_drv.h b/linux-core/i810_drv.h
index c525e165..86278e3d 100644
--- a/linux-core/i810_drv.h
+++ b/linux-core/i810_drv.h
@@ -25,7 +25,7 @@
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@valinux.com>
- * Jeff Hartmann <jhartmann@valinux.com>
+ * Jeff Hartmann <jhartmann@valinux.com>
*
*/
@@ -134,7 +134,7 @@ extern int i810_max_ioctl;
#define I810_ADDR(reg) (I810_BASE(reg) + reg)
#define I810_DEREF(reg) *(__volatile__ int *)I810_ADDR(reg)
#define I810_READ(reg) I810_DEREF(reg)
-#define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0)
+#define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0)
#define I810_DEREF16(reg) *(__volatile__ u16 *)I810_ADDR(reg)
#define I810_READ16(reg) I810_DEREF16(reg)
#define I810_WRITE16(reg,val) do { I810_DEREF16(reg) = val; } while (0)
@@ -155,19 +155,19 @@ extern int i810_max_ioctl;
} while (0)
#define ADVANCE_LP_RING() do { \
- if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \
+ if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \
dev_priv->ring.tail = outring; \
I810_WRITE(LP_RING + RING_TAIL, outring); \
} while(0)
-#define OUT_RING(n) do { \
+#define OUT_RING(n) do { \
if (I810_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
*(volatile unsigned int *)(virt + outring) = n; \
outring += 4; \
outring &= ringmask; \
} while (0)
-#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
+#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
#define CMD_REPORT_HEAD (7<<23)
#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
@@ -184,28 +184,28 @@ extern int i810_max_ioctl;
#define I810REG_HWSTAM 0x02098
#define I810REG_INT_IDENTITY_R 0x020a4
-#define I810REG_INT_MASK_R 0x020a8
+#define I810REG_INT_MASK_R 0x020a8
#define I810REG_INT_ENABLE_R 0x020a0
-#define LP_RING 0x2030
-#define HP_RING 0x2040
-#define RING_TAIL 0x00
+#define LP_RING 0x2030
+#define HP_RING 0x2040
+#define RING_TAIL 0x00
#define TAIL_ADDR 0x000FFFF8
-#define RING_HEAD 0x04
-#define HEAD_WRAP_COUNT 0xFFE00000
-#define HEAD_WRAP_ONE 0x00200000
-#define HEAD_ADDR 0x001FFFFC
-#define RING_START 0x08
-#define START_ADDR 0x00FFFFF8
-#define RING_LEN 0x0C
-#define RING_NR_PAGES 0x000FF000
-#define RING_REPORT_MASK 0x00000006
-#define RING_REPORT_64K 0x00000002
-#define RING_REPORT_128K 0x00000004
-#define RING_NO_REPORT 0x00000000
-#define RING_VALID_MASK 0x00000001
-#define RING_VALID 0x00000001
-#define RING_INVALID 0x00000000
+#define RING_HEAD 0x04
+#define HEAD_WRAP_COUNT 0xFFE00000
+#define HEAD_WRAP_ONE 0x00200000
+#define HEAD_ADDR 0x001FFFFC
+#define RING_START 0x08
+#define START_ADDR 0x00FFFFF8
+#define RING_LEN 0x0C
+#define RING_NR_PAGES 0x000FF000
+#define RING_REPORT_MASK 0x00000006
+#define RING_REPORT_64K 0x00000002
+#define RING_REPORT_128K 0x00000004
+#define RING_NO_REPORT 0x00000000
+#define RING_VALID_MASK 0x00000001
+#define RING_VALID 0x00000001
+#define RING_INVALID 0x00000000
#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
#define SC_UPDATE_SCISSOR (0x1<<1)
diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c
index bbc7e1db..1f88a513 100644
--- a/linux-core/i915_buffer.c
+++ b/linux-core/i915_buffer.c
@@ -1,8 +1,8 @@
/**************************************************************************
- *
+ *
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
@@ -10,20 +10,20 @@
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
- *
- *
+ *
+ *
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
@@ -183,7 +183,7 @@ static int i915_move_blit(struct drm_buffer_object * bo,
}
/*
- * Flip destination ttm into cached-coherent AGP,
+ * Flip destination ttm into cached-coherent AGP,
* then blit and subsequently move out again.
*/
@@ -258,7 +258,7 @@ static inline void clflush(volatile void *__p)
#endif
static inline void drm_cache_flush_addr(void *virt)
-{
+{
int i;
for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
diff --git a/linux-core/i915_compat.c b/linux-core/i915_compat.c
index 969d5977..3a437a1c 100644
--- a/linux-core/i915_compat.c
+++ b/linux-core/i915_compat.c
@@ -91,7 +91,7 @@ static void intel_i965_g33_setup_chipset_flush(struct pci_dev *pdev)
pci_write_config_dword(pdev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
} else {
u64 l64;
-
+
temp_lo &= ~0x1;
l64 = ((u64)temp_hi << 32) | temp_lo;
diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c
index 124db68f..798491ae 100644
--- a/linux-core/i915_drv.c
+++ b/linux-core/i915_drv.c
@@ -1,10 +1,10 @@
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
*/
/*
- *
+ *
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
@@ -12,11 +12,11 @@
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
- *
+ *
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
@@ -24,7 +24,7 @@
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
+ *
*/
#include "drmP.h"
@@ -420,7 +420,7 @@ static int i915_resume(struct drm_device *dev)
I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
-
+
/* Restore plane info */
I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
@@ -451,7 +451,7 @@ static int i915_resume(struct drm_device *dev)
if (IS_I965G(dev))
I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
udelay(150);
-
+
/* Restore mode */
I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
diff --git a/linux-core/i915_fence.c b/linux-core/i915_fence.c
index a0f22785..f2c29828 100644
--- a/linux-core/i915_fence.c
+++ b/linux-core/i915_fence.c
@@ -1,8 +1,8 @@
/**************************************************************************
- *
+ *
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
@@ -10,20 +10,20 @@
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
- *
- *
+ *
+ *
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
@@ -70,7 +70,7 @@ static void i915_perform_flush(struct drm_device * dev)
if (dev_priv->fence_irq_on && !fc->pending_exe_flush) {
i915_user_irq_off(dev_priv);
dev_priv->fence_irq_on = 0;
- } else if (!dev_priv->fence_irq_on && fc->pending_exe_flush) {
+ } else if (!dev_priv->fence_irq_on && fc->pending_exe_flush) {
i915_user_irq_on(dev_priv);
dev_priv->fence_irq_on = 1;
}
diff --git a/linux-core/i915_ioc32.c b/linux-core/i915_ioc32.c
index c1e776b7..7e733d33 100644
--- a/linux-core/i915_ioc32.c
+++ b/linux-core/i915_ioc32.c
@@ -3,7 +3,7 @@
*
* 32-bit ioctl compatibility routines for the i915 DRM.
*
- * \author Alan Hourihane <alanh@fairlite.demon.co.uk>
+ * \author Alan Hourihane <alanh@fairlite.demon.co.uk>
*
*
* Copyright (C) Paul Mackerras 2005
@@ -49,11 +49,11 @@ static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
{
drm_i915_batchbuffer32_t batchbuffer32;
drm_i915_batchbuffer_t __user *batchbuffer;
-
+
if (copy_from_user
(&batchbuffer32, (void __user *)arg, sizeof(batchbuffer32)))
return -EFAULT;
-
+
batchbuffer = compat_alloc_user_space(sizeof(*batchbuffer));
if (!access_ok(VERIFY_WRITE, batchbuffer, sizeof(*batchbuffer))
|| __put_user(batchbuffer32.start, &batchbuffer->start)
@@ -65,7 +65,7 @@ static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
|| __put_user((int __user *)(unsigned long)batchbuffer32.cliprects,
&batchbuffer->cliprects))
return -EFAULT;
-
+
return drm_ioctl(file->f_dentry->d_inode, file,
DRM_IOCTL_I915_BATCHBUFFER,
(unsigned long) batchbuffer);
@@ -85,11 +85,11 @@ static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
{
drm_i915_cmdbuffer32_t cmdbuffer32;
drm_i915_cmdbuffer_t __user *cmdbuffer;
-
+
if (copy_from_user
(&cmdbuffer32, (void __user *)arg, sizeof(cmdbuffer32)))
return -EFAULT;
-
+
cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer));
if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer))
|| __put_user((int __user *)(unsigned long)cmdbuffer32.buf,
@@ -101,7 +101,7 @@ static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
|| __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects,
&cmdbuffer->cliprects))
return -EFAULT;
-
+
return drm_ioctl(file->f_dentry->d_inode, file,
DRM_IOCTL_I915_CMDBUFFER, (unsigned long) cmdbuffer);
}
@@ -208,7 +208,7 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
-
+
if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
diff --git a/linux-core/mga_drv.c b/linux-core/mga_drv.c
index 1eb6d9e6..e3d26a08 100644
--- a/linux-core/mga_drv.c
+++ b/linux-core/mga_drv.c
@@ -140,7 +140,7 @@ static int mga_driver_device_is_agp(struct drm_device * dev)
* device is 0x0021 (HB6 Universal PCI-PCI bridge), we reject the
* device.
*/
-
+
if ((pdev->device == 0x0525) && pdev->bus->self
&& (pdev->bus->self->vendor == 0x3388)
&& (pdev->bus->self->device == 0x0021) ) {
diff --git a/linux-core/mga_ioc32.c b/linux-core/mga_ioc32.c
index 75f2a231..e3df567e 100644
--- a/linux-core/mga_ioc32.c
+++ b/linux-core/mga_ioc32.c
@@ -39,17 +39,17 @@
typedef struct drm32_mga_init {
int func;
- u32 sarea_priv_offset;
+ u32 sarea_priv_offset;
int chipset;
- int sgram;
+ int sgram;
unsigned int maccess;
- unsigned int fb_cpp;
+ unsigned int fb_cpp;
unsigned int front_offset, front_pitch;
- unsigned int back_offset, back_pitch;
- unsigned int depth_cpp;
- unsigned int depth_offset, depth_pitch;
- unsigned int texture_offset[MGA_NR_TEX_HEAPS];
- unsigned int texture_size[MGA_NR_TEX_HEAPS];
+ unsigned int back_offset, back_pitch;
+ unsigned int depth_cpp;
+ unsigned int depth_offset, depth_pitch;
+ unsigned int texture_offset[MGA_NR_TEX_HEAPS];
+ unsigned int texture_size[MGA_NR_TEX_HEAPS];
u32 fb_offset;
u32 mmio_offset;
u32 status_offset;
@@ -64,10 +64,10 @@ static int compat_mga_init(struct file *file, unsigned int cmd,
drm_mga_init32_t init32;
drm_mga_init_t __user *init;
int err = 0, i;
-
+
if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
return -EFAULT;
-
+
init = compat_alloc_user_space(sizeof(*init));
if (!access_ok(VERIFY_WRITE, init, sizeof(*init))
|| __put_user(init32.func, &init->func)
@@ -90,7 +90,7 @@ static int compat_mga_init(struct file *file, unsigned int cmd,
|| __put_user(init32.primary_offset, &init->primary_offset)
|| __put_user(init32.buffers_offset, &init->buffers_offset))
return -EFAULT;
-
+
for (i=0; i<MGA_NR_TEX_HEAPS; i++)
{
err |= __put_user(init32.texture_offset[i], &init->texture_offset[i]);
@@ -98,7 +98,7 @@ static int compat_mga_init(struct file *file, unsigned int cmd,
}
if (err)
return -EFAULT;
-
+
return drm_ioctl(file->f_dentry->d_inode, file,
DRM_IOCTL_MGA_INIT, (unsigned long) init);
}
@@ -115,7 +115,7 @@ static int compat_mga_getparam(struct file *file, unsigned int cmd,
{
drm_mga_getparam32_t getparam32;
drm_mga_getparam_t __user *getparam;
-
+
if (copy_from_user(&getparam32, (void __user *)arg, sizeof(getparam32)))
return -EFAULT;
@@ -125,7 +125,7 @@ static int compat_mga_getparam(struct file *file, unsigned int cmd,
|| __put_user((void __user *)(unsigned long)getparam32.value, &getparam->value))
return -EFAULT;
- return drm_ioctl(file->f_dentry->d_inode, file,
+ return drm_ioctl(file->f_dentry->d_inode, file,
DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam);
}
@@ -189,7 +189,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
return -EFAULT;
if (copy_to_user((void __user *)arg, &dma_bootstrap32,
- sizeof(dma_bootstrap32)))
+ sizeof(dma_bootstrap32)))
return -EFAULT;
return 0;
@@ -219,7 +219,7 @@ long mga_compat_ioctl(struct file *filp, unsigned int cmd,
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
-
+
if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
diff --git a/linux-core/nouveau_buffer.c b/linux-core/nouveau_buffer.c
index 80ba1759..c40dff6b 100644
--- a/linux-core/nouveau_buffer.c
+++ b/linux-core/nouveau_buffer.c
@@ -95,7 +95,7 @@ nouveau_bo_init_mem_type(struct drm_device *dev, uint32_t type,
man->io_size = nouveau_mem_fb_amount(dev);
break;
case DRM_BO_MEM_PRIV0:
- /* Unmappable VRAM */
+ /* Unmappable VRAM */
man->flags = _DRM_FLAG_MEMTYPE_CMA;
man->drm_bus_maptype = 0;
break;
@@ -254,4 +254,3 @@ struct drm_bo_driver nouveau_bo_driver = {
.move = nouveau_bo_move,
.ttm_cache_flush= nouveau_bo_flush_ttm
};
-
diff --git a/linux-core/nouveau_drv.c b/linux-core/nouveau_drv.c
index ac70d7ae..e9623eb1 100644
--- a/linux-core/nouveau_drv.c
+++ b/linux-core/nouveau_drv.c
@@ -80,7 +80,7 @@ static struct drm_driver driver = {
.probe = probe,
.remove = __devexit_p(drm_cleanup_pci),
},
-
+
.bo_driver = &nouveau_bo_driver,
.fence_driver = &nouveau_fence_driver,
diff --git a/linux-core/nouveau_fence.c b/linux-core/nouveau_fence.c
index 6f3259f0..b3e81a89 100644
--- a/linux-core/nouveau_fence.c
+++ b/linux-core/nouveau_fence.c
@@ -131,4 +131,3 @@ struct drm_fence_driver nouveau_fence_driver = {
.emit = nouveau_fence_emit,
.poke_flush = nouveau_fence_poke_flush
};
-
diff --git a/linux-core/nouveau_sgdma.c b/linux-core/nouveau_sgdma.c
index b86c5d7c..f3bf5341 100644
--- a/linux-core/nouveau_sgdma.c
+++ b/linux-core/nouveau_sgdma.c
@@ -128,7 +128,7 @@ nouveau_sgdma_unbind(struct drm_ttm_backend *be)
if (nvbe->is_bound) {
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
unsigned int pte;
-
+
pte = nvbe->pte_start;
while (pte < (nvbe->pte_start + nvbe->pages)) {
uint64_t pteval = dev_priv->gart_info.sg_dummy_bus;
@@ -336,4 +336,3 @@ nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
DRM_ERROR("Unimplemented on NV50\n");
return -EINVAL;
}
-
diff --git a/linux-core/r128_ioc32.c b/linux-core/r128_ioc32.c
index 6b757576..64b16798 100644
--- a/linux-core/r128_ioc32.c
+++ b/linux-core/r128_ioc32.c
@@ -64,10 +64,10 @@ static int compat_r128_init(struct file *file, unsigned int cmd,
{
drm_r128_init32_t init32;
drm_r128_init_t __user *init;
-
+
if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
return -EFAULT;
-
+
init = compat_alloc_user_space(sizeof(*init));
if (!access_ok(VERIFY_WRITE, init, sizeof(*init))
|| __put_user(init32.func, &init->func)
@@ -94,7 +94,7 @@ static int compat_r128_init(struct file *file, unsigned int cmd,
|| __put_user(init32.agp_textures_offset,
&init->agp_textures_offset))
return -EFAULT;
-
+
return drm_ioctl(file->f_dentry->d_inode, file,
DRM_IOCTL_R128_INIT, (unsigned long)init);
}
diff --git a/linux-core/radeon_drv.c b/linux-core/radeon_drv.c
index 327a6a97..d847f3cd 100644
--- a/linux-core/radeon_drv.c
+++ b/linux-core/radeon_drv.c
@@ -49,7 +49,7 @@ static int dri_library_name(struct drm_device * dev, char * buf)
return snprintf(buf, PAGE_SIZE, "%s\n",
(family < CHIP_R200) ? "radeon" :
((family < CHIP_R300) ? "r200" :
- "r300"));
+ "r300"));
}
static struct pci_device_id pciidlist[] = {
diff --git a/linux-core/sis_mm.c b/linux-core/sis_mm.c
index 9222b08d..2225ec80 100644
--- a/linux-core/sis_mm.c
+++ b/linux-core/sis_mm.c
@@ -249,7 +249,7 @@ sis_idle(struct drm_device *dev)
return 0;
}
}
-
+
/*
* Implement a device switch here if needed
*/
diff --git a/linux-core/via_buffer.c b/linux-core/via_buffer.c
index a6c59832..ea755247 100644
--- a/linux-core/via_buffer.c
+++ b/linux-core/via_buffer.c
@@ -94,9 +94,9 @@ int via_init_mem_type(struct drm_device * dev, uint32_t type,
man->drm_bus_maptype = 0;
break;
- case DRM_BO_MEM_TT:
+ case DRM_BO_MEM_TT:
/* Dynamic agpgart memory */
-
+
if (!(drm_core_has_AGP(dev) && dev->agp)) {
DRM_ERROR("AGP is not enabled for memory type %u\n",
(unsigned)type);
@@ -109,21 +109,21 @@ int via_init_mem_type(struct drm_device * dev, uint32_t type,
/* Only to get pte protection right. */
- man->drm_bus_maptype = _DRM_AGP;
+ man->drm_bus_maptype = _DRM_AGP;
break;
- case DRM_BO_MEM_VRAM:
+ case DRM_BO_MEM_VRAM:
/* "On-card" video ram */
-
+
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_NEEDS_IOREMAP;
man->drm_bus_maptype = _DRM_FRAME_BUFFER;
man->io_addr = NULL;
return via_vram_info(dev, &man->io_offset, &man->io_size);
break;
- case DRM_BO_MEM_PRIV0:
+ case DRM_BO_MEM_PRIV0:
/* Pre-bound agpgart memory */
-
+
if (!(drm_core_has_AGP(dev) && dev->agp)) {
DRM_ERROR("AGP is not enabled for memory type %u\n",
(unsigned)type);
diff --git a/linux-core/via_dmablit.c b/linux-core/via_dmablit.c
index d44c26f4..e285ca7b 100644
--- a/linux-core/via_dmablit.c
+++ b/linux-core/via_dmablit.c
@@ -1,5 +1,5 @@
/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro
- *
+ *
* Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -16,22 +16,22 @@
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors:
+ * Authors:
* Thomas Hellstrom.
* Partially based on code obtained from Digeo Inc.
*/
/*
- * Unmaps the DMA mappings.
- * FIXME: Is this a NoOp on x86? Also
- * FIXME: What happens if this one is called and a pending blit has previously done
- * the same DMA mappings?
+ * Unmaps the DMA mappings.
+ * FIXME: Is this a NoOp on x86? Also
+ * FIXME: What happens if this one is called and a pending blit has previously done
+ * the same DMA mappings?
*/
#include "drmP.h"
@@ -65,7 +65,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
int num_desc = vsg->num_desc;
unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
- drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
+ drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
descriptor_this_page;
dma_addr_t next = vsg->chain_start;
@@ -73,7 +73,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
if (descriptor_this_page-- == 0) {
cur_descriptor_page--;
descriptor_this_page = vsg->descriptors_per_page - 1;
- desc_ptr = vsg->desc_pages[cur_descriptor_page] +
+ desc_ptr = vsg->desc_pages[cur_descriptor_page] +
descriptor_this_page;
}
dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
@@ -93,7 +93,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
static void
via_map_blit_for_device(struct pci_dev *pdev,
const drm_via_dmablit_t *xfer,
- drm_via_sg_info_t *vsg,
+ drm_via_sg_info_t *vsg,
int mode)
{
unsigned cur_descriptor_page = 0;
@@ -110,7 +110,7 @@ via_map_blit_for_device(struct pci_dev *pdev,
dma_addr_t next = 0 | VIA_DMA_DPR_EC;
drm_via_descriptor_t *desc_ptr = NULL;
- if (mode == 1)
+ if (mode == 1)
desc_ptr = vsg->desc_pages[cur_descriptor_page];
for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
@@ -118,7 +118,7 @@ via_map_blit_for_device(struct pci_dev *pdev,
line_len = xfer->line_length;
cur_fb = fb_addr;
cur_mem = mem_addr;
-
+
while (line_len > 0) {
remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
@@ -131,10 +131,10 @@ via_map_blit_for_device(struct pci_dev *pdev,
VIA_PGOFF(cur_mem), remaining_len,
vsg->direction);
desc_ptr->dev_addr = cur_fb;
-
+
desc_ptr->size = remaining_len;
desc_ptr->next = (uint32_t) next;
- next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
+ next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
DMA_TO_DEVICE);
desc_ptr++;
if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
@@ -142,12 +142,12 @@ via_map_blit_for_device(struct pci_dev *pdev,
desc_ptr = vsg->desc_pages[++cur_descriptor_page];
}
}
-
+
num_desc++;
cur_mem += remaining_len;
cur_fb += remaining_len;
}
-
+
mem_addr += xfer->mem_stride;
fb_addr += xfer->fb_stride;
}
@@ -160,14 +160,14 @@ via_map_blit_for_device(struct pci_dev *pdev,
}
/*
- * Function that frees up all resources for a blit. It is usable even if the
+ * Function that frees up all resources for a blit. It is usable even if the
* blit info has only been partially built as long as the status enum is consistent
* with the actual status of the used resources.
*/
static void
-via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
+via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
{
struct page *page;
int i;
@@ -184,7 +184,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
case dr_via_pages_locked:
for (i=0; i<vsg->num_pages; ++i) {
if ( NULL != (page = vsg->pages[i])) {
- if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
+ if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
SetPageDirty(page);
page_cache_release(page);
}
@@ -199,7 +199,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
vsg->bounce_buffer = NULL;
}
vsg->free_on_sequence = 0;
-}
+}
/*
* Fire a blit engine.
@@ -212,7 +212,7 @@ via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0);
VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0);
- VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
+ VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
VIA_DMA_CSR_DE);
VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
@@ -232,20 +232,20 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
{
int ret;
unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
- vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) -
+ vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) -
first_pfn + 1;
-
+
if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
return -ENOMEM;
memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
down_read(&current->mm->mmap_sem);
ret = get_user_pages(current, current->mm, (unsigned long) xfer->mem_addr,
- vsg->num_pages, (vsg->direction == DMA_FROM_DEVICE),
+ vsg->num_pages, (vsg->direction == DMA_FROM_DEVICE),
0, vsg->pages, NULL);
up_read(&current->mm->mmap_sem);
if (ret != vsg->num_pages) {
- if (ret < 0)
+ if (ret < 0)
return ret;
vsg->state = dr_via_pages_locked;
return -EINVAL;
@@ -261,22 +261,22 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
* quite large for some blits, and pages don't need to be contingous.
*/
-static int
+static int
via_alloc_desc_pages(drm_via_sg_info_t *vsg)
{
int i;
-
+
vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t);
- vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
+ vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
vsg->descriptors_per_page;
- if (NULL == (vsg->desc_pages = kmalloc(sizeof(void *) * vsg->num_desc_pages, GFP_KERNEL)))
+ if (NULL == (vsg->desc_pages = kmalloc(sizeof(void *) * vsg->num_desc_pages, GFP_KERNEL)))
return -ENOMEM;
-
+
memset(vsg->desc_pages, 0, sizeof(void *) * vsg->num_desc_pages);
vsg->state = dr_via_desc_pages_alloc;
for (i=0; i<vsg->num_desc_pages; ++i) {
- if (NULL == (vsg->desc_pages[i] =
+ if (NULL == (vsg->desc_pages[i] =
(drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
return -ENOMEM;
}
@@ -284,7 +284,7 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg)
vsg->num_desc);
return 0;
}
-
+
static void
via_abort_dmablit(struct drm_device *dev, int engine)
{
@@ -298,7 +298,7 @@ via_dmablit_engine_off(struct drm_device *dev, int engine)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
- VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
+ VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
}
@@ -309,7 +309,7 @@ via_dmablit_engine_off(struct drm_device *dev, int engine)
* task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
* the workqueue task takes care of processing associated with the old blit.
*/
-
+
void
via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
{
@@ -329,19 +329,19 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
spin_lock_irqsave(&blitq->blit_lock, irqsave);
}
- done_transfer = blitq->is_active &&
+ done_transfer = blitq->is_active &&
(( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
- done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));
+ done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));
cur = blitq->cur;
if (done_transfer) {
blitq->blits[cur]->aborted = blitq->aborting;
blitq->done_blit_handle++;
- DRM_WAKEUP(blitq->blit_queue + cur);
+ DRM_WAKEUP(blitq->blit_queue + cur);
cur++;
- if (cur >= VIA_NUM_BLIT_SLOTS)
+ if (cur >= VIA_NUM_BLIT_SLOTS)
cur = 0;
blitq->cur = cur;
@@ -353,7 +353,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
blitq->is_active = 0;
blitq->aborting = 0;
- schedule_work(&blitq->wq);
+ schedule_work(&blitq->wq);
} else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
@@ -365,7 +365,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
blitq->aborting = 1;
blitq->end = jiffies + DRM_HZ;
}
-
+
if (!blitq->is_active) {
if (blitq->num_outstanding) {
via_fire_dmablit(dev, blitq->blits[cur], engine);
@@ -383,14 +383,14 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
}
via_dmablit_engine_off(dev, engine);
}
- }
+ }
if (from_irq) {
spin_unlock(&blitq->blit_lock);
} else {
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
}
-}
+}
@@ -426,13 +426,13 @@ via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_que
return active;
}
-
+
/*
* Sync. Wait for at least three seconds for the blit to be performed.
*/
static int
-via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
+via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
@@ -441,12 +441,12 @@ via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
int ret = 0;
if (via_dmablit_active(blitq, engine, handle, &queue)) {
- DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
+ DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
!via_dmablit_active(blitq, engine, handle, NULL));
}
DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
handle, engine, ret);
-
+
return ret;
}
@@ -468,12 +468,12 @@ via_dmablit_timer(unsigned long data)
struct drm_device *dev = blitq->dev;
int engine = (int)
(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
-
- DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
+
+ DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
(unsigned long) jiffies);
via_dmablit_handler(dev, engine, 0);
-
+
if (!timer_pending(&blitq->poll_timer)) {
blitq->poll_timer.expires = jiffies+1;
add_timer(&blitq->poll_timer);
@@ -497,7 +497,7 @@ via_dmablit_timer(unsigned long data)
*/
-static void
+static void
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
via_dmablit_workqueue(void *data)
#else
@@ -513,38 +513,38 @@ via_dmablit_workqueue(struct work_struct *work)
unsigned long irqsave;
drm_via_sg_info_t *cur_sg;
int cur_released;
-
-
- DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long)
+
+
+ DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long)
(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
spin_lock_irqsave(&blitq->blit_lock, irqsave);
-
+
while(blitq->serviced != blitq->cur) {
cur_released = blitq->serviced++;
DRM_DEBUG("Releasing blit slot %d\n", cur_released);
- if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
+ if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
blitq->serviced = 0;
-
+
cur_sg = blitq->blits[cur_released];
blitq->num_free++;
-
+
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
-
+
DRM_WAKEUP(&blitq->busy_queue);
-
+
via_free_sg_info(dev->pdev, cur_sg);
kfree(cur_sg);
-
+
spin_lock_irqsave(&blitq->blit_lock, irqsave);
}
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
}
-
+
/*
* Init all blit engines. Currently we use two, but some hardware have 4.
@@ -558,8 +558,8 @@ via_init_dmablit(struct drm_device *dev)
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
drm_via_blitq_t *blitq;
- pci_set_master(dev->pdev);
-
+ pci_set_master(dev->pdev);
+
for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) {
blitq = dev_priv->blit_queues + i;
blitq->dev = dev;
@@ -585,20 +585,20 @@ via_init_dmablit(struct drm_device *dev)
init_timer(&blitq->poll_timer);
blitq->poll_timer.function = &via_dmablit_timer;
blitq->poll_timer.data = (unsigned long) blitq;
- }
+ }
}
/*
* Build all info and do all mappings required for a blit.
*/
-
+
static int
via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
{
int draw = xfer->to_fb;
int ret = 0;
-
+
vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
vsg->bounce_buffer = NULL;
@@ -612,7 +612,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
/*
* Below check is a driver limitation, not a hardware one. We
* don't want to lock unused pages, and don't want to incoporate the
- * extra logic of avoiding them. Make sure there are no.
+ * extra logic of avoiding them. Make sure there are no.
* (Not a big limitation anyway.)
*/
@@ -638,11 +638,11 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
DRM_ERROR("Too large PCI DMA bitblt.\n");
return -EINVAL;
- }
+ }
- /*
+ /*
* we allow a negative fb stride to allow flipping of images in
- * transfer.
+ * transfer.
*/
if (xfer->mem_stride < xfer->line_length ||
@@ -668,7 +668,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
((xfer->num_lines > 1) && ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
DRM_ERROR("Invalid DRM bitblt alignment.\n");
return -EINVAL;
- }
+ }
#endif
if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
@@ -684,17 +684,17 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
return ret;
}
via_map_blit_for_device(dev->pdev, xfer, vsg, 1);
-
+
return 0;
}
-
+
/*
* Reserve one free slot in the blit queue. Will wait for one second for one
* to become available. Otherwise -EBUSY is returned.
*/
-static int
+static int
via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
{
int ret=0;
@@ -709,10 +709,10 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
if (ret) {
return (-EINTR == ret) ? -EAGAIN : ret;
}
-
+
spin_lock_irqsave(&blitq->blit_lock, irqsave);
}
-
+
blitq->num_free--;
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
@@ -723,7 +723,7 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
* Hand back a free slot if we changed our mind.
*/
-static void
+static void
via_dmablit_release_slot(drm_via_blitq_t *blitq)
{
unsigned long irqsave;
@@ -739,8 +739,8 @@ via_dmablit_release_slot(drm_via_blitq_t *blitq)
*/
-static int
-via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
+static int
+via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
drm_via_sg_info_t *vsg;
@@ -771,15 +771,15 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
spin_lock_irqsave(&blitq->blit_lock, irqsave);
blitq->blits[blitq->head++] = vsg;
- if (blitq->head >= VIA_NUM_BLIT_SLOTS)
+ if (blitq->head >= VIA_NUM_BLIT_SLOTS)
blitq->head = 0;
blitq->num_outstanding++;
- xfer->sync.sync_handle = ++blitq->cur_blit_handle;
+ xfer->sync.sync_handle = ++blitq->cur_blit_handle;
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
xfer->sync.engine = engine;
- via_dmablit_handler(dev, engine, 0);
+ via_dmablit_handler(dev, engine, 0);
return 0;
}
@@ -787,7 +787,7 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
/*
* Sync on a previously submitted blit. Note that the X server use signals extensively, and
* that there is a very big probability that this IOCTL will be interrupted by a signal. In that
- * case it returns with -EAGAIN for the signal to be delivered.
+ * case it returns with -EAGAIN for the signal to be delivered.
* The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
*/
@@ -797,7 +797,7 @@ via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_pri
drm_via_blitsync_t *sync = data;
int err;
- if (sync->engine >= VIA_NUM_BLIT_ENGINES)
+ if (sync->engine >= VIA_NUM_BLIT_ENGINES)
return -EINVAL;
err = via_dmablit_sync(dev, sync->sync_handle, sync->engine);
@@ -807,15 +807,15 @@ via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_pri
return err;
}
-
+
/*
* Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal
- * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
+ * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
* be reissued. See the above IOCTL code.
*/
-int
+int
via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv )
{
drm_via_dmablit_t *xfer = data;
diff --git a/linux-core/via_dmablit.h b/linux-core/via_dmablit.h
index 726ad25d..9b662a32 100644
--- a/linux-core/via_dmablit.h
+++ b/linux-core/via_dmablit.h
@@ -1,5 +1,5 @@
/* via_dmablit.h -- PCI DMA BitBlt support for the VIA Unichrome/Pro
- *
+ *
* Copyright 2005 Thomas Hellstrom.
* All Rights Reserved.
*
@@ -17,12 +17,12 @@
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors:
+ * Authors:
* Thomas Hellstrom.
* Register info from Digeo Inc.
*/
@@ -67,7 +67,7 @@ typedef struct _drm_via_blitq {
unsigned cur;
unsigned num_free;
unsigned num_outstanding;
- unsigned long end;
+ unsigned long end;
int aborting;
int is_active;
drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS];
@@ -77,46 +77,46 @@ typedef struct _drm_via_blitq {
struct work_struct wq;
struct timer_list poll_timer;
} drm_via_blitq_t;
-
-/*
+
+/*
* PCI DMA Registers
* Channels 2 & 3 don't seem to be implemented in hardware.
*/
-
-#define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */
-#define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */
-#define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */
-#define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */
-
-#define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */
-#define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */
-#define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */
-#define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */
-
-#define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */
-#define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */
-#define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */
-#define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */
-
-#define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */
-#define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */
-#define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */
-#define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */
-
-#define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */
-#define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */
-#define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */
-#define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */
-
-#define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */
-#define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */
-#define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */
-#define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */
-
-#define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */
-
-/* Define for DMA engine */
+
+#define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */
+#define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */
+#define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */
+#define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */
+
+#define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */
+#define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */
+#define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */
+#define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */
+
+#define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */
+#define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */
+#define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */
+#define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */
+
+#define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */
+#define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */
+#define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */
+#define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */
+
+#define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */
+#define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */
+#define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */
+#define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */
+
+#define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */
+#define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */
+#define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */
+#define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */
+
+#define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */
+
+/* Define for DMA engine */
/* DPR */
#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
#define VIA_DMA_DPR_DDIE (1<<2) /* descriptor done interrupt enable */
diff --git a/linux-core/via_mm.c b/linux-core/via_mm.c
index 35ca6bfc..270eac15 100644
--- a/linux-core/via_mm.c
+++ b/linux-core/via_mm.c
@@ -115,7 +115,7 @@ void via_lastclose(struct drm_device *dev)
dev_priv->vram_initialized = 0;
dev_priv->agp_initialized = 0;
mutex_unlock(&dev->struct_mutex);
-}
+}
int via_mem_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv)
diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c
index d7b23c89..64401ae5 100644
--- a/linux-core/xgi_cmdlist.c
+++ b/linux-core/xgi_cmdlist.c
@@ -78,7 +78,7 @@ int xgi_cmdlist_initialize(struct xgi_info * info, size_t size,
* @type: Type of the current batch
*
* See section 3.2.2 "Begin" (page 15) of the 3D SPG.
- *
+ *
* This function assumes that @type is on the range [0,3].
*/
unsigned int get_batch_command(enum xgi_batch_type type)
@@ -86,7 +86,7 @@ unsigned int get_batch_command(enum xgi_batch_type type)
static const unsigned int ports[4] = {
0x30 >> 2, 0x40 >> 2, 0x50 >> 2, 0x20 >> 2
};
-
+
return ports[type];
}
@@ -159,7 +159,7 @@ int xgi_submit_cmdlist(struct drm_device * dev, void * data,
2 - fb
3 - logout
*/
-int xgi_state_change(struct xgi_info * info, unsigned int to,
+int xgi_state_change(struct xgi_info * info, unsigned int to,
unsigned int from)
{
#define STATE_CONSOLE 0
@@ -219,7 +219,7 @@ void xgi_cmdlist_cleanup(struct xgi_info * info)
}
xgi_waitfor_pci_idle(info);
-
+
(void) memset(&info->cmdring, 0, sizeof(info->cmdring));
}
}
@@ -243,7 +243,7 @@ static void triggerHWCommandList(struct xgi_info * info)
void xgi_emit_flush(struct xgi_info * info, bool stop)
{
const u32 flush_command[8] = {
- ((0x10 << 24)
+ ((0x10 << 24)
| (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence)),
BEGIN_LINK_ENABLE_MASK | (0x00004),
0x00000000, 0x00000000,
@@ -266,9 +266,9 @@ void xgi_emit_flush(struct xgi_info * info, bool stop)
info->cmdring.ring_offset = 0;
}
- hw_addr = info->cmdring.ring_hw_base
+ hw_addr = info->cmdring.ring_hw_base
+ info->cmdring.ring_offset;
- batch_addr = info->cmdring.ptr
+ batch_addr = info->cmdring.ptr
+ (info->cmdring.ring_offset / 4);
for (i = 0; i < (flush_size / 4); i++) {
diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c
index 4e66197e..4f0b4ed0 100644
--- a/linux-core/xgi_drv.c
+++ b/linux-core/xgi_drv.c
@@ -352,7 +352,7 @@ irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS)
struct drm_device *dev = (struct drm_device *) arg;
struct xgi_info *info = dev->dev_private;
const u32 irq_bits = le32_to_cpu(DRM_READ32(info->mmio_map,
- (0x2800
+ (0x2800
+ M2REG_AUTO_LINK_STATUS_ADDRESS)))
& (M2REG_ACTIVE_TIMER_INTERRUPT_MASK
| M2REG_ACTIVE_INTERRUPT_0_MASK
@@ -361,7 +361,7 @@ irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS)
if (irq_bits != 0) {
- DRM_WRITE32(info->mmio_map,
+ DRM_WRITE32(info->mmio_map,
0x2800 + M2REG_AUTO_LINK_SETTING_ADDRESS,
cpu_to_le32(M2REG_AUTO_LINK_SETTING_COMMAND | irq_bits));
xgi_fence_handler(dev);
@@ -413,7 +413,7 @@ int xgi_driver_load(struct drm_device *dev, unsigned long flags)
return 0;
-
+
fail:
drm_free(info, sizeof(*info), DRM_MEM_DRIVER);
return err;
diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c
index 2e2d0094..3f50fe8f 100644
--- a/linux-core/xgi_fb.c
+++ b/linux-core/xgi_fb.c
@@ -32,7 +32,7 @@ int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
struct drm_file * filp)
{
struct drm_memblock_item *block;
- const char *const mem_name = (alloc->location == XGI_MEMLOC_LOCAL)
+ const char *const mem_name = (alloc->location == XGI_MEMLOC_LOCAL)
? "on-card" : "GART";
@@ -43,7 +43,7 @@ int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
return -EINVAL;
}
- if ((alloc->location == XGI_MEMLOC_LOCAL)
+ if ((alloc->location == XGI_MEMLOC_LOCAL)
? !info->fb_heap_initialized : !info->pcie_heap_initialized) {
DRM_ERROR("Attempt to allocate from uninitialized memory "
"pool (0x%08x).\n", alloc->location);
@@ -118,7 +118,7 @@ int xgi_free_ioctl(struct drm_device * dev, void * data,
int xgi_fb_heap_init(struct xgi_info * info)
{
int err;
-
+
mutex_lock(&info->dev->struct_mutex);
err = drm_sman_set_range(&info->sman, XGI_MEMLOC_LOCAL,
XGI_FB_HEAP_START,
diff --git a/linux-core/xgi_fence.c b/linux-core/xgi_fence.c
index 526bc5db..9a75581a 100644
--- a/linux-core/xgi_fence.c
+++ b/linux-core/xgi_fence.c
@@ -72,7 +72,7 @@ static uint32_t xgi_do_flush(struct drm_device * dev, uint32_t class)
int xgi_fence_emit_sequence(struct drm_device * dev, uint32_t class,
- uint32_t flags, uint32_t * sequence,
+ uint32_t flags, uint32_t * sequence,
uint32_t * native_type)
{
struct xgi_info * info = dev->dev_private;
diff --git a/linux-core/xgi_ioc32.c b/linux-core/xgi_ioc32.c
index c54044fa..e4338417 100644
--- a/linux-core/xgi_ioc32.c
+++ b/linux-core/xgi_ioc32.c
@@ -43,7 +43,7 @@ struct drm_map32 {
u32 handle; /**< User-space: "Handle" to pass to mmap() */
int mtrr; /**< MTRR slot used */
};
-
+
struct drm32_xgi_bootstrap {
struct drm_map32 gart;
};
diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c
index 4a4a9844..2b3a1788 100644
--- a/linux-core/xgi_misc.c
+++ b/linux-core/xgi_misc.c
@@ -90,7 +90,7 @@ static void xgi_ge_hang_reset(struct drm_map * map)
DRM_WRITE8(map, 0xb057, 8);
while (0 != le32_to_cpu(DRM_READ32(map, 0x2800) & 0xf0000000)) {
- while (0 != ((--time_out) & 0xfff))
+ while (0 != ((--time_out) & 0xfff))
/* empty */ ;
if (0 == time_out) {
@@ -117,8 +117,8 @@ static void xgi_ge_hang_reset(struct drm_map * map)
DRM_WRITE8(map, 0x3d4, 0x36);
old_36 = DRM_READ8(map, 0x3d5);
DRM_WRITE8(map, 0x3d5, old_36 | 0x10);
-
- while (0 != ((--time_out) & 0xfff))
+
+ while (0 != ((--time_out) & 0xfff))
/* empty */ ;
DRM_WRITE8(map, 0x3d5, old_36);
@@ -134,7 +134,7 @@ static void xgi_ge_hang_reset(struct drm_map * map)
DRM_WRITE8(map, 0xb057, 0);
}
-
+
bool xgi_ge_irq_handler(struct xgi_info * info)
{
const u32 int_status = le32_to_cpu(DRM_READ32(info->mmio_map, 0x2810));
@@ -143,7 +143,7 @@ bool xgi_ge_irq_handler(struct xgi_info * info)
/* Check GE on/off */
if (0 == (0xffffc0f0 & int_status)) {
if (0 != (0x1000 & int_status)) {
- /* We got GE stall interrupt.
+ /* We got GE stall interrupt.
*/
DRM_WRITE32(info->mmio_map, 0x2810,
cpu_to_le32(int_status | 0x04000000));
@@ -289,7 +289,7 @@ static void dump_reg(struct xgi_info * info, unsigned regbase, unsigned range)
printk("%1x ", i);
for (j = 0; j < 0x10; j++) {
- u8 temp = DRM_READ8(info->mmio_map,
+ u8 temp = DRM_READ8(info->mmio_map,
regbase + (i * 0x10) + j);
printk("%3x", temp);
}
diff --git a/linux-core/xgi_misc.h b/linux-core/xgi_misc.h
index af19a11a..5f9e4f09 100644
--- a/linux-core/xgi_misc.h
+++ b/linux-core/xgi_misc.h
@@ -1,5 +1,5 @@
/****************************************************************************
- * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
*
* All Rights Reserved.
*
diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h
index 5c0100a0..a897fd8a 100644
--- a/linux-core/xgi_regs.h
+++ b/linux-core/xgi_regs.h
@@ -4,7 +4,7 @@
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
+ * a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation on the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,