summaryrefslogtreecommitdiff
path: root/shared-core
diff options
context:
space:
mode:
authorDave Airlie <airlied@linux.ie>2007-12-10 15:53:59 +1000
committerDave Airlie <airlied@linux.ie>2007-12-10 15:53:59 +1000
commit814f695135f21aadeba77a3114df505d81a8d433 (patch)
tree277da7e1c5dce9318591158435d2e367f3e495d3 /shared-core
parente51b3c8ff4bb88bc0f57473b7c3fe7fcd6b1a916 (diff)
parentcfa21b22b43c7113107b5eb086b5f4d4ec36dc0a (diff)
Merge branch 'master' into r500-support
Diffstat (limited to 'shared-core')
-rw-r--r--shared-core/drm.h7
-rw-r--r--shared-core/i915_dma.c86
-rw-r--r--shared-core/i915_drm.h6
-rw-r--r--shared-core/i915_irq.c19
-rw-r--r--shared-core/mach64_dma.c255
-rw-r--r--shared-core/mach64_drv.h255
-rw-r--r--shared-core/mach64_state.c16
-rw-r--r--shared-core/mga_dma.c85
-rw-r--r--shared-core/mga_irq.c9
-rw-r--r--shared-core/mga_state.c12
-rw-r--r--shared-core/nouveau_state.c15
-rw-r--r--shared-core/r128_drv.h2
-rw-r--r--shared-core/radeon_cp.c2
-rw-r--r--shared-core/radeon_drv.h6
-rw-r--r--shared-core/radeon_irq.c3
-rw-r--r--shared-core/radeon_state.c8
-rw-r--r--shared-core/savage_bci.c20
-rw-r--r--shared-core/savage_state.c184
-rw-r--r--shared-core/sis_drv.h2
-rw-r--r--shared-core/via_3d_reg.h1
-rw-r--r--shared-core/via_dma.c40
-rw-r--r--shared-core/via_drm.h8
-rw-r--r--shared-core/via_irq.c25
-rw-r--r--shared-core/via_verifier.c4
-rw-r--r--shared-core/via_verifier.h8
25 files changed, 580 insertions, 498 deletions
diff --git a/shared-core/drm.h b/shared-core/drm.h
index 39414902..ec07b895 100644
--- a/shared-core/drm.h
+++ b/shared-core/drm.h
@@ -249,7 +249,8 @@ enum drm_map_flags {
_DRM_KERNEL = 0x08, /**< kernel requires access */
_DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
_DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */
- _DRM_REMOVABLE = 0x40 /**< Removable mapping */
+ _DRM_REMOVABLE = 0x40, /**< Removable mapping */
+ _DRM_DRIVER = 0x80 /**< Managed by driver */
};
struct drm_ctx_priv_map {
@@ -646,7 +647,7 @@ struct drm_fence_arg {
unsigned int signaled;
unsigned int error;
unsigned int sequence;
- unsigned int pad64;
+ unsigned int pad64;
uint64_t expand_pad[2]; /*Future expansion */
};
@@ -878,7 +879,7 @@ struct drm_bo_version_arg {
struct drm_mm_type_arg {
unsigned int mem_type;
- unsigned int lock_flags;
+ unsigned int lock_flags;
};
struct drm_mm_init_arg {
diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c
index 8ce47e36..42114beb 100644
--- a/shared-core/i915_dma.c
+++ b/shared-core/i915_dma.c
@@ -51,8 +51,6 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
if (ring->space >= n)
return 0;
- dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
-
if (ring->head != last_head)
i = 0;
@@ -73,9 +71,6 @@ void i915_kernel_lost_context(struct drm_device * dev)
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->Size;
-
- if (ring->head == ring->tail)
- dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
}
static int i915_dma_cleanup(struct drm_device * dev)
@@ -165,6 +160,8 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
* private backbuffer/depthbuffer usage.
*/
dev_priv->use_mi_batchbuffer_start = 0;
+ if (IS_I965G(dev)) /* 965 doesn't support older method */
+ dev_priv->use_mi_batchbuffer_start = 1;
/* Allow hardware batchbuffers unless told otherwise.
*/
@@ -202,7 +199,7 @@ static int i915_dma_resume(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
if (!dev_priv->sarea) {
DRM_ERROR("can not find sarea!\n");
@@ -336,7 +333,7 @@ static int validate_cmd(int cmd)
return ret;
}
-static int i915_emit_cmds(struct drm_device * dev, int __user * buffer,
+static int i915_emit_cmds(struct drm_device *dev, int __user *buffer,
int dwords)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -416,7 +413,7 @@ static int i915_emit_box(struct drm_device * dev,
}
/* XXX: Emitting the counter should really be moved to part of the IRQ
- * emit. For now, do it in both places:
+ * emit. For now, do it in both places:
*/
void i915_emit_breadcrumb(struct drm_device *dev)
@@ -492,7 +489,7 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
return ret;
}
- i915_emit_breadcrumb( dev );
+ i915_emit_breadcrumb(dev);
#ifdef I915_HAVE_FENCE
drm_fence_flush_old(dev, 0, dev_priv->counter);
#endif
@@ -546,7 +543,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
}
}
- i915_emit_breadcrumb( dev );
+ i915_emit_breadcrumb(dev);
#ifdef I915_HAVE_FENCE
drm_fence_flush_old(dev, 0, dev_priv->counter);
#endif
@@ -610,8 +607,7 @@ void i915_dispatch_flip(struct drm_device * dev, int planes, int sync)
drm_i915_private_t *dev_priv = dev->dev_private;
int i;
- DRM_DEBUG("%s: planes=0x%x pfCurrentPage=%d\n",
- __FUNCTION__,
+ DRM_DEBUG("planes=0x%x pfCurrentPage=%d\n",
planes, dev_priv->sarea_priv->pf_current_page);
i915_emit_mi_flush(dev, MI_READ_FLUSH | MI_EXE_FLUSH);
@@ -627,7 +623,7 @@ void i915_dispatch_flip(struct drm_device * dev, int planes, int sync)
#endif
}
-static int i915_quiescent(struct drm_device * dev)
+static int i915_quiescent(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -916,7 +912,7 @@ int i915_validate_buffer_list(struct drm_file *file_priv,
buffers[buf_count] = NULL;
- if (copy_from_user(&arg, (void __user *)(unsigned)data, sizeof(arg))) {
+ if (copy_from_user(&arg, (void __user *)(unsigned long)data, sizeof(arg))) {
ret = -EFAULT;
goto out_err;
}
@@ -967,7 +963,7 @@ int i915_validate_buffer_list(struct drm_file *file_priv,
arg.handled = 1;
arg.d.rep = rep;
- if (copy_to_user((void __user *)(unsigned)data, &arg, sizeof(arg)))
+ if (copy_to_user((void __user *)(unsigned long)data, &arg, sizeof(arg)))
return -EFAULT;
data = next;
@@ -1032,10 +1028,10 @@ static int i915_execbuffer(struct drm_device *dev, void *data,
buffers = drm_calloc(num_buffers, sizeof(struct drm_buffer_object *), DRM_MEM_DRIVER);
if (!buffers) {
- drm_bo_read_unlock(&dev->bm.bm_lock);
+ drm_bo_read_unlock(&dev->bm.bm_lock);
mutex_unlock(&dev_priv->cmdbuf_mutex);
return -ENOMEM;
- }
+ }
/* validate buffer list + fixup relocations */
ret = i915_validate_buffer_list(file_priv, 0, exec_buf->ops_list,
@@ -1095,13 +1091,13 @@ static int i915_do_cleanup_pageflip(struct drm_device * dev)
drm_i915_private_t *dev_priv = dev->dev_private;
int i, planes, num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
for (i = 0, planes = 0; i < 2; i++)
if (dev_priv->sarea_priv->pf_current_page & (0x3 << (2 * i))) {
dev_priv->sarea_priv->pf_current_page =
(dev_priv->sarea_priv->pf_current_page &
- ~(0x3 << (2 * i))) | (num_pages - 1) << (2 * i);
+ ~(0x3 << (2 * i))) | ((num_pages - 1) << (2 * i));
planes |= 1 << i;
}
@@ -1116,7 +1112,7 @@ static int i915_flip_bufs(struct drm_device *dev, void *data, struct drm_file *f
{
drm_i915_flip_t *param = data;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
@@ -1181,7 +1177,8 @@ static int i915_setparam(struct drm_device *dev, void *data,
switch (param->param) {
case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
- dev_priv->use_mi_batchbuffer_start = param->value;
+ if (!IS_I965G(dev))
+ dev_priv->use_mi_batchbuffer_start = param->value;
break;
case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
dev_priv->tex_lru_log_granularity = param->value;
@@ -1229,27 +1226,27 @@ static int i915_mmio(struct drm_device *dev, void *data,
base = (u8 *) dev_priv->mmio_map->handle + e->offset;
switch (mmio->read_write) {
- case I915_MMIO_READ:
- if (!(e->flag & I915_MMIO_MAY_READ))
- return -EINVAL;
- for (i = 0; i < e->size / 4; i++)
- buf[i] = I915_READ(e->offset + i * 4);
- if (DRM_COPY_TO_USER(mmio->data, buf, e->size)) {
- DRM_ERROR("DRM_COPY_TO_USER failed\n");
- return -EFAULT;
- }
- break;
-
- case I915_MMIO_WRITE:
- if (!(e->flag & I915_MMIO_MAY_WRITE))
- return -EINVAL;
- if(DRM_COPY_FROM_USER(buf, mmio->data, e->size)) {
- DRM_ERROR("DRM_COPY_TO_USER failed\n");
- return -EFAULT;
- }
- for (i = 0; i < e->size / 4; i++)
- I915_WRITE(e->offset + i * 4, buf[i]);
- break;
+ case I915_MMIO_READ:
+ if (!(e->flag & I915_MMIO_MAY_READ))
+ return -EINVAL;
+ for (i = 0; i < e->size / 4; i++)
+ buf[i] = I915_READ(e->offset + i * 4);
+ if (DRM_COPY_TO_USER(mmio->data, buf, e->size)) {
+ DRM_ERROR("DRM_COPY_TO_USER failed\n");
+ return -EFAULT;
+ }
+ break;
+
+ case I915_MMIO_WRITE:
+ if (!(e->flag & I915_MMIO_MAY_WRITE))
+ return -EINVAL;
+ if (DRM_COPY_FROM_USER(buf, mmio->data, e->size)) {
+ DRM_ERROR("DRM_COPY_TO_USER failed\n");
+ return -EFAULT;
+ }
+ for (i = 0; i < e->size / 4; i++)
+ I915_WRITE(e->offset + i * 4, buf[i]);
+ break;
}
return 0;
}
@@ -1317,14 +1314,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
base = drm_get_resource_start(dev, mmio_bar);
size = drm_get_resource_len(dev, mmio_bar);
- ret = drm_addmap(dev, base, size, _DRM_REGISTERS, _DRM_KERNEL,
- &dev_priv->mmio_map);
+ ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
+ _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
#ifdef __linux__
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
intel_init_chipset_flush_compat(dev);
#endif
#endif
+
return ret;
}
diff --git a/shared-core/i915_drm.h b/shared-core/i915_drm.h
index 56977ff3..cfa3f93a 100644
--- a/shared-core/i915_drm.h
+++ b/shared-core/i915_drm.h
@@ -178,6 +178,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
+#define DRM_IOCTL_I915_MMIO DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_MMIO, drm_i915_mmio)
#define DRM_IOCTL_I915_EXECBUFFER DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_EXECBUFFER, struct drm_i915_execbuffer)
/* Asynchronous page flipping:
@@ -274,7 +275,7 @@ typedef struct drm_i915_mem_init_heap {
* rotate):
*/
typedef struct drm_i915_mem_destroy_heap {
- int region;
+ int region;
} drm_i915_mem_destroy_heap_t;
/* Allow X server to configure which pipes to monitor for vblank signals
@@ -314,7 +315,7 @@ typedef struct drm_i915_mmio_entry {
unsigned int flag;
unsigned int offset;
unsigned int size;
-}drm_i915_mmio_entry_t;
+} drm_i915_mmio_entry_t;
typedef struct drm_i915_mmio {
unsigned int read_write:1;
@@ -359,6 +360,7 @@ struct drm_i915_execbuffer {
uint64_t ops_list;
uint32_t num_buffers;
struct _drm_i915_batchbuffer batch;
+ drm_context_t context; /* for lockless use in the future */
struct drm_fence_arg fence_arg;
};
diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c
index 2c699ecd..ee7c40b5 100644
--- a/shared-core/i915_irq.c
+++ b/shared-core/i915_irq.c
@@ -417,8 +417,6 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
if (READ_BREADCRUMB(dev_priv) >= irq_nr)
return 0;
- dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
-
i915_user_irq_on(dev_priv);
DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
READ_BREADCRUMB(dev_priv) >= irq_nr);
@@ -458,12 +456,25 @@ static int i915_driver_vblank_do_wait(struct drm_device *dev,
int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
{
- return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received);
+ atomic_t *counter;
+
+ if (i915_get_pipe(dev, 0) == 0)
+ counter = &dev->vbl_received;
+ else
+ counter = &dev->vbl_received2;
+ return i915_driver_vblank_do_wait(dev, sequence, counter);
}
int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
{
- return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2);
+ atomic_t *counter;
+
+ if (i915_get_pipe(dev, 1) == 0)
+ counter = &dev->vbl_received;
+ else
+ counter = &dev->vbl_received2;
+
+ return i915_driver_vblank_do_wait(dev, sequence, counter);
}
/* Needs the lock as it touches the ring.
diff --git a/shared-core/mach64_dma.c b/shared-core/mach64_dma.c
index 13fa0446..411b98d5 100644
--- a/shared-core/mach64_dma.c
+++ b/shared-core/mach64_dma.c
@@ -6,7 +6,7 @@
* \author Gareth Hughes <gareth@valinux.com>
* \author Frank C. Earl <fearl@airmail.net>
* \author Leif Delgass <ldelgass@retinalburn.net>
- * \author Jose Fonseca <j_r_fonseca@yahoo.co.uk>
+ * \author José Fonseca <j_r_fonseca@yahoo.co.uk>
*/
/*
@@ -559,6 +559,259 @@ void mach64_dump_ring_info(drm_mach64_private_t * dev_priv)
/*******************************************************************/
+/** \name DMA descriptor ring macros */
+/*@{*/
+
+/**
+ * Add the end mark to the ring's new tail position.
+ *
+ * The bus master engine will keep processing the DMA buffers listed in the ring
+ * until it finds this mark, making it stop.
+ *
+ * \sa mach64_clear_dma_eol
+ */
+static __inline__ void mach64_set_dma_eol(volatile u32 * addr)
+{
+#if defined(__i386__)
+ int nr = 31;
+
+ /* Taken from include/asm-i386/bitops.h linux header */
+ __asm__ __volatile__("lock;" "btsl %1,%0":"=m"(*addr)
+ :"Ir"(nr));
+#elif defined(__powerpc__)
+ u32 old;
+ u32 mask = cpu_to_le32(MACH64_DMA_EOL);
+
+ /* Taken from the include/asm-ppc/bitops.h linux header */
+ __asm__ __volatile__("\n\
+1: lwarx %0,0,%3 \n\
+ or %0,%0,%2 \n\
+ stwcx. %0,0,%3 \n\
+ bne- 1b":"=&r"(old), "=m"(*addr)
+ :"r"(mask), "r"(addr), "m"(*addr)
+ :"cc");
+#elif defined(__alpha__)
+ u32 temp;
+ u32 mask = MACH64_DMA_EOL;
+
+ /* Taken from the include/asm-alpha/bitops.h linux header */
+ __asm__ __volatile__("1: ldl_l %0,%3\n"
+ " bis %0,%2,%0\n"
+ " stl_c %0,%1\n"
+ " beq %0,2f\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous":"=&r"(temp), "=m"(*addr)
+ :"Ir"(mask), "m"(*addr));
+#else
+ u32 mask = cpu_to_le32(MACH64_DMA_EOL);
+
+ *addr |= mask;
+#endif
+}
+
+/**
+ * Remove the end mark from the ring's old tail position.
+ *
+ * It should be called after calling mach64_set_dma_eol to mark the ring's new
+ * tail position.
+ *
+ * We update the end marks while the bus master engine is in operation. Since
+ * the bus master engine may potentially be reading from the same position
+ * that we write, we must change atomically to avoid having intermediary bad
+ * data.
+ */
+static __inline__ void mach64_clear_dma_eol(volatile u32 * addr)
+{
+#if defined(__i386__)
+ int nr = 31;
+
+ /* Taken from include/asm-i386/bitops.h linux header */
+ __asm__ __volatile__("lock;" "btrl %1,%0":"=m"(*addr)
+ :"Ir"(nr));
+#elif defined(__powerpc__)
+ u32 old;
+ u32 mask = cpu_to_le32(MACH64_DMA_EOL);
+
+ /* Taken from the include/asm-ppc/bitops.h linux header */
+ __asm__ __volatile__("\n\
+1: lwarx %0,0,%3 \n\
+ andc %0,%0,%2 \n\
+ stwcx. %0,0,%3 \n\
+ bne- 1b":"=&r"(old), "=m"(*addr)
+ :"r"(mask), "r"(addr), "m"(*addr)
+ :"cc");
+#elif defined(__alpha__)
+ u32 temp;
+ u32 mask = ~MACH64_DMA_EOL;
+
+ /* Taken from the include/asm-alpha/bitops.h linux header */
+ __asm__ __volatile__("1: ldl_l %0,%3\n"
+ " and %0,%2,%0\n"
+ " stl_c %0,%1\n"
+ " beq %0,2f\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous":"=&r"(temp), "=m"(*addr)
+ :"Ir"(mask), "m"(*addr));
+#else
+ u32 mask = cpu_to_le32(~MACH64_DMA_EOL);
+
+ *addr &= mask;
+#endif
+}
+
+#define RING_LOCALS \
+ int _ring_tail, _ring_write; unsigned int _ring_mask; volatile u32 *_ring
+
+#define RING_WRITE_OFS _ring_write
+
+#define BEGIN_RING( n ) \
+do { \
+ if ( MACH64_VERBOSE ) { \
+ DRM_INFO( "BEGIN_RING( %d ) in %s\n", \
+ (n), __FUNCTION__ ); \
+ } \
+ if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \
+ int ret; \
+ if ((ret=mach64_wait_ring( dev_priv, (n) * sizeof(u32))) < 0 ) { \
+ DRM_ERROR( "wait_ring failed, resetting engine\n"); \
+ mach64_dump_engine_info( dev_priv ); \
+ mach64_do_engine_reset( dev_priv ); \
+ return ret; \
+ } \
+ } \
+ dev_priv->ring.space -= (n) * sizeof(u32); \
+ _ring = (u32 *) dev_priv->ring.start; \
+ _ring_tail = _ring_write = dev_priv->ring.tail; \
+ _ring_mask = dev_priv->ring.tail_mask; \
+} while (0)
+
+#define OUT_RING( x ) \
+do { \
+ if ( MACH64_VERBOSE ) { \
+ DRM_INFO( " OUT_RING( 0x%08x ) at 0x%x\n", \
+ (unsigned int)(x), _ring_write ); \
+ } \
+ _ring[_ring_write++] = cpu_to_le32( x ); \
+ _ring_write &= _ring_mask; \
+} while (0)
+
+#define ADVANCE_RING() \
+do { \
+ if ( MACH64_VERBOSE ) { \
+ DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n", \
+ _ring_write, _ring_tail ); \
+ } \
+ DRM_MEMORYBARRIER(); \
+ mach64_clear_dma_eol( &_ring[(_ring_tail - 2) & _ring_mask] ); \
+ DRM_MEMORYBARRIER(); \
+ dev_priv->ring.tail = _ring_write; \
+ mach64_ring_tick( dev_priv, &(dev_priv)->ring ); \
+} while (0)
+
+/**
+ * Queue a DMA buffer of registers writes into the ring buffer.
+ */
+int mach64_add_buf_to_ring(drm_mach64_private_t *dev_priv,
+ drm_mach64_freelist_t *entry)
+{
+ int bytes, pages, remainder;
+ u32 address, page;
+ int i;
+ struct drm_buf *buf = entry->buf;
+ RING_LOCALS;
+
+ bytes = buf->used;
+ address = GETBUFADDR( buf );
+ pages = (bytes + MACH64_DMA_CHUNKSIZE - 1) / MACH64_DMA_CHUNKSIZE;
+
+ BEGIN_RING( pages * 4 );
+
+ for ( i = 0 ; i < pages-1 ; i++ ) {
+ page = address + i * MACH64_DMA_CHUNKSIZE;
+ OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR );
+ OUT_RING( page );
+ OUT_RING( MACH64_DMA_CHUNKSIZE | MACH64_DMA_HOLD_OFFSET );
+ OUT_RING( 0 );
+ }
+
+ /* generate the final descriptor for any remaining commands in this buffer */
+ page = address + i * MACH64_DMA_CHUNKSIZE;
+ remainder = bytes - i * MACH64_DMA_CHUNKSIZE;
+
+ /* Save dword offset of last descriptor for this buffer.
+ * This is needed to check for completion of the buffer in freelist_get
+ */
+ entry->ring_ofs = RING_WRITE_OFS;
+
+ OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR );
+ OUT_RING( page );
+ OUT_RING( remainder | MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL );
+ OUT_RING( 0 );
+
+ ADVANCE_RING();
+
+ return 0;
+}
+
+/**
+ * Queue DMA buffer controlling host data tranfers (e.g., blit).
+ *
+ * Almost identical to mach64_add_buf_to_ring.
+ */
+int mach64_add_hostdata_buf_to_ring(drm_mach64_private_t *dev_priv,
+ drm_mach64_freelist_t *entry)
+{
+ int bytes, pages, remainder;
+ u32 address, page;
+ int i;
+ struct drm_buf *buf = entry->buf;
+ RING_LOCALS;
+
+ bytes = buf->used - MACH64_HOSTDATA_BLIT_OFFSET;
+ pages = (bytes + MACH64_DMA_CHUNKSIZE - 1) / MACH64_DMA_CHUNKSIZE;
+ address = GETBUFADDR( buf );
+
+ BEGIN_RING( 4 + pages * 4 );
+
+ OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR );
+ OUT_RING( address );
+ OUT_RING( MACH64_HOSTDATA_BLIT_OFFSET | MACH64_DMA_HOLD_OFFSET );
+ OUT_RING( 0 );
+ address += MACH64_HOSTDATA_BLIT_OFFSET;
+
+ for ( i = 0 ; i < pages-1 ; i++ ) {
+ page = address + i * MACH64_DMA_CHUNKSIZE;
+ OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_HOSTDATA );
+ OUT_RING( page );
+ OUT_RING( MACH64_DMA_CHUNKSIZE | MACH64_DMA_HOLD_OFFSET );
+ OUT_RING( 0 );
+ }
+
+ /* generate the final descriptor for any remaining commands in this buffer */
+ page = address + i * MACH64_DMA_CHUNKSIZE;
+ remainder = bytes - i * MACH64_DMA_CHUNKSIZE;
+
+ /* Save dword offset of last descriptor for this buffer.
+ * This is needed to check for completion of the buffer in freelist_get
+ */
+ entry->ring_ofs = RING_WRITE_OFS;
+
+ OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_HOSTDATA );
+ OUT_RING( page );
+ OUT_RING( remainder | MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL );
+ OUT_RING( 0 );
+
+ ADVANCE_RING();
+
+ return 0;
+}
+
+/*@}*/
+
+
+/*******************************************************************/
/** \name DMA test and initialization */
/*@{*/
diff --git a/shared-core/mach64_drv.h b/shared-core/mach64_drv.h
index 79c2c61d..1768a2a4 100644
--- a/shared-core/mach64_drv.h
+++ b/shared-core/mach64_drv.h
@@ -29,7 +29,7 @@
* Gareth Hughes <gareth@valinux.com>
* Frank C. Earl <fearl@airmail.net>
* Leif Delgass <ldelgass@retinalburn.net>
- * Jos�Fonseca <j_r_fonseca@yahoo.co.uk>
+ * José Fonseca <j_r_fonseca@yahoo.co.uk>
*/
#ifndef __MACH64_DRV_H__
@@ -140,6 +140,11 @@ extern void mach64_dump_engine_info(drm_mach64_private_t * dev_priv);
extern void mach64_dump_ring_info(drm_mach64_private_t * dev_priv);
extern int mach64_do_engine_reset(drm_mach64_private_t * dev_priv);
+extern int mach64_add_buf_to_ring(drm_mach64_private_t *dev_priv,
+ drm_mach64_freelist_t *_entry);
+extern int mach64_add_hostdata_buf_to_ring(drm_mach64_private_t *dev_priv,
+ drm_mach64_freelist_t *_entry);
+
extern int mach64_do_dma_idle(drm_mach64_private_t * dev_priv);
extern int mach64_do_dma_flush(drm_mach64_private_t * dev_priv);
extern int mach64_do_cleanup_dma(struct drm_device * dev);
@@ -521,89 +526,12 @@ extern void mach64_driver_irq_uninstall(struct drm_device * dev);
#define MACH64_APERTURE_OFFSET 0x7ff800 /* frame-buffer offset for gui-masters */
/* ================================================================
- * Misc helper macros
+ * Ring operations
+ *
+ * Since the Mach64 bus master engine requires polling, these functions end
+ * up being called frequently, hence being inline.
*/
-static __inline__ void mach64_set_dma_eol(volatile u32 * addr)
-{
-#if defined(__i386__)
- int nr = 31;
-
- /* Taken from include/asm-i386/bitops.h linux header */
- __asm__ __volatile__("lock;" "btsl %1,%0":"=m"(*addr)
- :"Ir"(nr));
-#elif defined(__powerpc__)
- u32 old;
- u32 mask = cpu_to_le32(MACH64_DMA_EOL);
-
- /* Taken from the include/asm-ppc/bitops.h linux header */
- __asm__ __volatile__("\n\
-1: lwarx %0,0,%3 \n\
- or %0,%0,%2 \n\
- stwcx. %0,0,%3 \n\
- bne- 1b":"=&r"(old), "=m"(*addr)
- :"r"(mask), "r"(addr), "m"(*addr)
- :"cc");
-#elif defined(__alpha__)
- u32 temp;
- u32 mask = MACH64_DMA_EOL;
-
- /* Taken from the include/asm-alpha/bitops.h linux header */
- __asm__ __volatile__("1: ldl_l %0,%3\n"
- " bis %0,%2,%0\n"
- " stl_c %0,%1\n"
- " beq %0,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous":"=&r"(temp), "=m"(*addr)
- :"Ir"(mask), "m"(*addr));
-#else
- u32 mask = cpu_to_le32(MACH64_DMA_EOL);
-
- *addr |= mask;
-#endif
-}
-
-static __inline__ void mach64_clear_dma_eol(volatile u32 * addr)
-{
-#if defined(__i386__)
- int nr = 31;
-
- /* Taken from include/asm-i386/bitops.h linux header */
- __asm__ __volatile__("lock;" "btrl %1,%0":"=m"(*addr)
- :"Ir"(nr));
-#elif defined(__powerpc__)
- u32 old;
- u32 mask = cpu_to_le32(MACH64_DMA_EOL);
-
- /* Taken from the include/asm-ppc/bitops.h linux header */
- __asm__ __volatile__("\n\
-1: lwarx %0,0,%3 \n\
- andc %0,%0,%2 \n\
- stwcx. %0,0,%3 \n\
- bne- 1b":"=&r"(old), "=m"(*addr)
- :"r"(mask), "r"(addr), "m"(*addr)
- :"cc");
-#elif defined(__alpha__)
- u32 temp;
- u32 mask = ~MACH64_DMA_EOL;
-
- /* Taken from the include/asm-alpha/bitops.h linux header */
- __asm__ __volatile__("1: ldl_l %0,%3\n"
- " and %0,%2,%0\n"
- " stl_c %0,%1\n"
- " beq %0,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous":"=&r"(temp), "=m"(*addr)
- :"Ir"(mask), "m"(*addr));
-#else
- u32 mask = cpu_to_le32(~MACH64_DMA_EOL);
-
- *addr &= mask;
-#endif
-}
-
static __inline__ void mach64_ring_start(drm_mach64_private_t * dev_priv)
{
drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
@@ -666,6 +594,18 @@ static __inline__ void mach64_ring_resume(drm_mach64_private_t * dev_priv,
}
}
+/**
+ * Poll the ring head and make sure the bus master is alive.
+ *
+ * Mach64's bus master engine will stop if there are no more entries to process.
+ * This function polls the engine for the last processed entry and calls
+ * mach64_ring_resume if there is an unprocessed entry.
+ *
+ * Note also that, since we update the ring tail while the bus master engine is
+ * in operation, it is possible that the last tail update was too late to be
+ * processed, and the bus master engine stops at the previous tail position.
+ * Therefore it is important to call this function frequently.
+ */
static __inline__ void mach64_ring_tick(drm_mach64_private_t * dev_priv,
drm_mach64_descriptor_ring_t * ring)
{
@@ -750,60 +690,12 @@ mach64_update_ring_snapshot(drm_mach64_private_t * dev_priv)
}
/* ================================================================
- * DMA descriptor ring macros
- */
-
-#define RING_LOCALS \
- int _ring_tail, _ring_write; unsigned int _ring_mask; volatile u32 *_ring
-
-#define RING_WRITE_OFS _ring_write
-
-#define BEGIN_RING( n ) \
-do { \
- if ( MACH64_VERBOSE ) { \
- DRM_INFO( "BEGIN_RING( %d ) in %s\n", \
- (n), __FUNCTION__ ); \
- } \
- if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \
- int ret; \
- if ((ret=mach64_wait_ring( dev_priv, (n) * sizeof(u32))) < 0 ) { \
- DRM_ERROR( "wait_ring failed, resetting engine\n"); \
- mach64_dump_engine_info( dev_priv ); \
- mach64_do_engine_reset( dev_priv ); \
- return ret; \
- } \
- } \
- dev_priv->ring.space -= (n) * sizeof(u32); \
- _ring = (u32 *) dev_priv->ring.start; \
- _ring_tail = _ring_write = dev_priv->ring.tail; \
- _ring_mask = dev_priv->ring.tail_mask; \
-} while (0)
-
-#define OUT_RING( x ) \
-do { \
- if ( MACH64_VERBOSE ) { \
- DRM_INFO( " OUT_RING( 0x%08x ) at 0x%x\n", \
- (unsigned int)(x), _ring_write ); \
- } \
- _ring[_ring_write++] = cpu_to_le32( x ); \
- _ring_write &= _ring_mask; \
-} while (0)
-
-#define ADVANCE_RING() \
-do { \
- if ( MACH64_VERBOSE ) { \
- DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n", \
- _ring_write, _ring_tail ); \
- } \
- DRM_MEMORYBARRIER(); \
- mach64_clear_dma_eol( &_ring[(_ring_tail - 2) & _ring_mask] ); \
- DRM_MEMORYBARRIER(); \
- dev_priv->ring.tail = _ring_write; \
- mach64_ring_tick( dev_priv, &(dev_priv)->ring ); \
-} while (0)
-
-/* ================================================================
* DMA macros
+ *
+ * Mach64's ring buffer doesn't take register writes directly. These
+ * have to be written indirectly in DMA buffers. These macros simplify
+ * the task of setting up a buffer, writing commands to it, and
+ * queuing the buffer in the ring.
*/
#define DMALOCALS \
@@ -889,7 +781,7 @@ do { \
#define DMAADVANCE( dev_priv, _discard ) \
do { \
struct list_head *ptr; \
- RING_LOCALS; \
+ int ret; \
\
if ( MACH64_VERBOSE ) { \
DRM_INFO( "DMAADVANCE() in %s\n", __FUNCTION__ ); \
@@ -902,7 +794,6 @@ do { \
} \
if (_buf->pending) { \
/* This is a resued buffer, so we need to find it in the pending list */ \
- int ret; \
if ( (ret=mach64_find_pending_buf_entry(dev_priv, &_entry, _buf)) ) { \
DRM_ERROR( "DMAADVANCE() in %s: couldn't find pending buf %d\n", \
__FUNCTION__, _buf->idx ); \
@@ -927,7 +818,8 @@ do { \
list_add_tail(ptr, &dev_priv->pending); \
} \
_entry->discard = (_discard); \
- ADD_BUF_TO_RING( dev_priv ); \
+ if ( (ret = mach64_add_buf_to_ring( dev_priv, _entry )) ) \
+ return ret; \
} while (0)
#define DMADISCARDBUF() \
@@ -943,48 +835,10 @@ do { \
_entry->discard = 1; \
} while(0)
-#define ADD_BUF_TO_RING( dev_priv ) \
-do { \
- int bytes, pages, remainder; \
- u32 address, page; \
- int i; \
- \
- bytes = _buf->used; \
- address = GETBUFADDR( _buf ); \
- \
- pages = (bytes + MACH64_DMA_CHUNKSIZE - 1) / MACH64_DMA_CHUNKSIZE; \
- \
- BEGIN_RING( pages * 4 ); \
- \
- for ( i = 0 ; i < pages-1 ; i++ ) { \
- page = address + i * MACH64_DMA_CHUNKSIZE; \
- OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR ); \
- OUT_RING( page ); \
- OUT_RING( MACH64_DMA_CHUNKSIZE | MACH64_DMA_HOLD_OFFSET ); \
- OUT_RING( 0 ); \
- } \
- \
- /* generate the final descriptor for any remaining commands in this buffer */ \
- page = address + i * MACH64_DMA_CHUNKSIZE; \
- remainder = bytes - i * MACH64_DMA_CHUNKSIZE; \
- \
- /* Save dword offset of last descriptor for this buffer. \
- * This is needed to check for completion of the buffer in freelist_get \
- */ \
- _entry->ring_ofs = RING_WRITE_OFS; \
- \
- OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR ); \
- OUT_RING( page ); \
- OUT_RING( remainder | MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL ); \
- OUT_RING( 0 ); \
- \
- ADVANCE_RING(); \
-} while(0)
-
#define DMAADVANCEHOSTDATA( dev_priv ) \
do { \
struct list_head *ptr; \
- RING_LOCALS; \
+ int ret; \
\
if ( MACH64_VERBOSE ) { \
DRM_INFO( "DMAADVANCEHOSTDATA() in %s\n", __FUNCTION__ ); \
@@ -1008,51 +862,8 @@ do { \
_entry->buf->pending = 1; \
list_add_tail(ptr, &dev_priv->pending); \
_entry->discard = 1; \
- ADD_HOSTDATA_BUF_TO_RING( dev_priv ); \
+ if ( (ret = mach64_add_hostdata_buf_to_ring( dev_priv, _entry )) ) \
+ return ret; \
} while (0)
-#define ADD_HOSTDATA_BUF_TO_RING( dev_priv ) \
-do { \
- int bytes, pages, remainder; \
- u32 address, page; \
- int i; \
- \
- bytes = _buf->used - MACH64_HOSTDATA_BLIT_OFFSET; \
- pages = (bytes + MACH64_DMA_CHUNKSIZE - 1) / MACH64_DMA_CHUNKSIZE; \
- address = GETBUFADDR( _buf ); \
- \
- BEGIN_RING( 4 + pages * 4 ); \
- \
- OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR ); \
- OUT_RING( address ); \
- OUT_RING( MACH64_HOSTDATA_BLIT_OFFSET | MACH64_DMA_HOLD_OFFSET ); \
- OUT_RING( 0 ); \
- \
- address += MACH64_HOSTDATA_BLIT_OFFSET; \
- \
- for ( i = 0 ; i < pages-1 ; i++ ) { \
- page = address + i * MACH64_DMA_CHUNKSIZE; \
- OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_HOSTDATA ); \
- OUT_RING( page ); \
- OUT_RING( MACH64_DMA_CHUNKSIZE | MACH64_DMA_HOLD_OFFSET ); \
- OUT_RING( 0 ); \
- } \
- \
- /* generate the final descriptor for any remaining commands in this buffer */ \
- page = address + i * MACH64_DMA_CHUNKSIZE; \
- remainder = bytes - i * MACH64_DMA_CHUNKSIZE; \
- \
- /* Save dword offset of last descriptor for this buffer. \
- * This is needed to check for completion of the buffer in freelist_get \
- */ \
- _entry->ring_ofs = RING_WRITE_OFS; \
- \
- OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_HOSTDATA ); \
- OUT_RING( page ); \
- OUT_RING( remainder | MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL ); \
- OUT_RING( 0 ); \
- \
- ADVANCE_RING(); \
-} while(0)
-
#endif /* __MACH64_DRV_H__ */
diff --git a/shared-core/mach64_state.c b/shared-core/mach64_state.c
index 89b6c6ce..88ff4843 100644
--- a/shared-core/mach64_state.c
+++ b/shared-core/mach64_state.c
@@ -27,7 +27,7 @@
* Authors:
* Gareth Hughes <gareth@valinux.com>
* Leif Delgass <ldelgass@retinalburn.net>
- * Jos�Fonseca <j_r_fonseca@yahoo.co.uk>
+ * José Fonseca <j_r_fonseca@yahoo.co.uk>
*/
#include "drmP.h"
@@ -575,6 +575,10 @@ static int mach64_dma_dispatch_vertex(struct drm_device * dev,
return -EAGAIN;
}
+ /* Mach64's vertex data is actually register writes. To avoid security
+ * compromises these register writes have to be verified and copied from
+ * user space into a private DMA buffer.
+ */
verify_ret = copy_from_user_vertex(GETBUFPTR(copy_buf), buf, used);
if (verify_ret != 0) {
@@ -698,6 +702,16 @@ static int mach64_dma_dispatch_blit(struct drm_device * dev,
return -EAGAIN;
}
+ /* Copy the blit data from userspace.
+ *
+ * XXX: This is overkill. The most efficient solution would be having
+ * two sets of buffers (one set private for vertex data, the other set
+ * client-writable for blits). However that would bring more complexity
+ * and would break backward compatability. The solution currently
+ * implemented is keeping all buffers private, allowing to secure the
+ * driver, without increasing complexity at the expense of some speed
+ * transfering data.
+ */
verify_ret = copy_from_user_blit(GETBUFPTR(copy_buf), blit->buf, used);
if (verify_ret != 0) {
diff --git a/shared-core/mga_dma.c b/shared-core/mga_dma.c
index 44b14945..67236b2d 100644
--- a/shared-core/mga_dma.c
+++ b/shared-core/mga_dma.c
@@ -46,7 +46,7 @@
#define MINIMAL_CLEANUP 0
#define FULL_CLEANUP 1
-static int mga_do_cleanup_dma(struct drm_device * dev, int full_cleanup);
+static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup);
/* ================================================================
* Engine control
@@ -395,7 +395,7 @@ int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf)
int mga_driver_load(struct drm_device *dev, unsigned long flags)
{
- drm_mga_private_t * dev_priv;
+ drm_mga_private_t *dev_priv;
dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
if (!dev_priv)
@@ -436,10 +436,11 @@ int mga_driver_load(struct drm_device *dev, unsigned long flags)
static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
drm_mga_dma_bootstrap_t * dma_bs)
{
- drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private;
+ drm_mga_private_t *const dev_priv =
+ (drm_mga_private_t *)dev->dev_private;
unsigned int warp_size = mga_warp_microcode_size(dev_priv);
int err;
- unsigned offset;
+ unsigned offset;
const unsigned secondary_size = dma_bs->secondary_bin_count
* dma_bs->secondary_bin_size;
const unsigned agp_size = (dma_bs->agp_size << 20);
@@ -481,11 +482,10 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
}
}
-
/* Allocate and bind AGP memory. */
agp_req.size = agp_size;
agp_req.type = 0;
- err = drm_agp_alloc( dev, & agp_req );
+ err = drm_agp_alloc(dev, &agp_req);
if (err) {
dev_priv->agp_size = 0;
DRM_ERROR("Unable to allocate %uMB AGP memory\n",
@@ -511,36 +511,36 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
warp_size = PAGE_SIZE;
offset = 0;
- err = drm_addmap( dev, offset, warp_size,
- _DRM_AGP, _DRM_READ_ONLY, & dev_priv->warp );
+ err = drm_addmap(dev, offset, warp_size,
+ _DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp);
if (err) {
DRM_ERROR("Unable to map WARP microcode: %d\n", err);
return err;
}
offset += warp_size;
- err = drm_addmap( dev, offset, dma_bs->primary_size,
- _DRM_AGP, _DRM_READ_ONLY, & dev_priv->primary );
+ err = drm_addmap(dev, offset, dma_bs->primary_size,
+ _DRM_AGP, _DRM_READ_ONLY, & dev_priv->primary);
if (err) {
DRM_ERROR("Unable to map primary DMA region: %d\n", err);
return err;
}
offset += dma_bs->primary_size;
- err = drm_addmap( dev, offset, secondary_size,
- _DRM_AGP, 0, & dev->agp_buffer_map );
+ err = drm_addmap(dev, offset, secondary_size,
+ _DRM_AGP, 0, & dev->agp_buffer_map);
if (err) {
DRM_ERROR("Unable to map secondary DMA region: %d\n", err);
return err;
}
- (void) memset( &req, 0, sizeof(req) );
+ (void)memset( &req, 0, sizeof(req) );
req.count = dma_bs->secondary_bin_count;
req.size = dma_bs->secondary_bin_size;
req.flags = _DRM_AGP_BUFFER;
req.agp_start = offset;
- err = drm_addbufs_agp( dev, & req );
+ err = drm_addbufs_agp(dev, &req);
if (err) {
DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
return err;
@@ -563,8 +563,8 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
#endif
offset += secondary_size;
- err = drm_addmap( dev, offset, agp_size - offset,
- _DRM_AGP, 0, & dev_priv->agp_textures );
+ err = drm_addmap(dev, offset, agp_size - offset,
+ _DRM_AGP, 0, & dev_priv->agp_textures);
if (err) {
DRM_ERROR("Unable to map AGP texture region: %d\n", err);
return err;
@@ -606,7 +606,8 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
drm_mga_dma_bootstrap_t * dma_bs)
{
- drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private;
+ drm_mga_private_t *const dev_priv =
+ (drm_mga_private_t *) dev->dev_private;
unsigned int warp_size = mga_warp_microcode_size(dev_priv);
unsigned int primary_size;
unsigned int bin_count;
@@ -639,9 +640,8 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
* alignment of the primary or secondary DMA buffers.
*/
- for ( primary_size = dma_bs->primary_size
- ; primary_size != 0
- ; primary_size >>= 1 ) {
+ for (primary_size = dma_bs->primary_size; primary_size != 0;
+ primary_size >>= 1 ) {
/* The proper alignment for this mapping is 0x04 */
err = drm_addmap(dev, 0, primary_size, _DRM_CONSISTENT,
_DRM_READ_ONLY, &dev_priv->primary);
@@ -657,18 +657,17 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
if (dev_priv->primary->size != dma_bs->primary_size) {
DRM_INFO("Primary DMA buffer size reduced from %u to %u.\n",
dma_bs->primary_size,
- (unsigned) dev_priv->primary->size);
+ (unsigned)dev_priv->primary->size);
dma_bs->primary_size = dev_priv->primary->size;
}
- for ( bin_count = dma_bs->secondary_bin_count
- ; bin_count > 0
- ; bin_count-- ) {
- (void) memset( &req, 0, sizeof(req) );
+ for (bin_count = dma_bs->secondary_bin_count; bin_count > 0;
+ bin_count-- ) {
+ (void)memset(&req, 0, sizeof(req));
req.count = bin_count;
req.size = dma_bs->secondary_bin_size;
- err = drm_addbufs_pci( dev, & req );
+ err = drm_addbufs_pci(dev, &req);
if (!err) {
break;
}
@@ -696,12 +695,12 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
}
-static int mga_do_dma_bootstrap(struct drm_device * dev,
- drm_mga_dma_bootstrap_t * dma_bs)
+static int mga_do_dma_bootstrap(struct drm_device *dev,
+ drm_mga_dma_bootstrap_t *dma_bs)
{
const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev);
int err;
- drm_mga_private_t * const dev_priv =
+ drm_mga_private_t *const dev_priv =
(drm_mga_private_t *) dev->dev_private;
@@ -710,17 +709,17 @@ static int mga_do_dma_bootstrap(struct drm_device * dev,
/* The first steps are the same for both PCI and AGP based DMA. Map
* the cards MMIO registers and map a status page.
*/
- err = drm_addmap( dev, dev_priv->mmio_base, dev_priv->mmio_size,
- _DRM_REGISTERS, _DRM_READ_ONLY, & dev_priv->mmio );
+ err = drm_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size,
+ _DRM_REGISTERS, _DRM_READ_ONLY, & dev_priv->mmio);
if (err) {
DRM_ERROR("Unable to map MMIO region: %d\n", err);
return err;
}
- err = drm_addmap( dev, 0, SAREA_MAX, _DRM_SHM,
- _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL,
- & dev_priv->status );
+ err = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
+ _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL,
+ & dev_priv->status);
if (err) {
DRM_ERROR("Unable to map status region: %d\n", err);
return err;
@@ -768,7 +767,7 @@ int mga_dma_bootstrap(struct drm_device *dev, void *data,
drm_mga_dma_bootstrap_t *bootstrap = data;
int err;
static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 };
- const drm_mga_private_t * const dev_priv =
+ const drm_mga_private_t *const dev_priv =
(drm_mga_private_t *) dev->dev_private;
@@ -829,7 +828,7 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
return -EINVAL;
}
- if (! dev_priv->used_new_dma_init) {
+ if (!dev_priv->used_new_dma_init) {
dev_priv->dma_access = MGA_PAGPXFER;
dev_priv->wagp_enable = MGA_WAGP_ENABLE;
@@ -855,7 +854,8 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
return -EINVAL;
}
dev->agp_buffer_token = init->buffers_offset;
- dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
+ dev->agp_buffer_map =
+ drm_core_findmap(dev, init->buffers_offset);
if (!dev->agp_buffer_map) {
DRM_ERROR("failed to find dma buffer region!\n");
return -EINVAL;
@@ -898,10 +898,6 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
/* Init the primary DMA registers.
*/
MGA_WRITE(MGA_PRIMADDRESS, dev_priv->primary->offset | MGA_DMA_GENERAL);
-#if 0
- MGA_WRITE(MGA_PRIMPTR, virt_to_bus((void *)dev_priv->prim.status) | MGA_PRIMPTREN0 | /* Soft trap, SECEND, SETUPEND */
- MGA_PRIMPTREN1); /* DWGSYNC */
-#endif
dev_priv->prim.start = (u8 *) dev_priv->primary->handle;
dev_priv->prim.end = ((u8 *) dev_priv->primary->handle
@@ -932,7 +928,7 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
return 0;
}
-static int mga_do_cleanup_dma(struct drm_device * dev, int full_cleanup)
+static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
{
int err = 0;
DRM_DEBUG("\n");
@@ -993,7 +989,8 @@ static int mga_do_cleanup_dma(struct drm_device * dev, int full_cleanup)
memset(&dev_priv->prim, 0, sizeof(dev_priv->prim));
dev_priv->warp_pipe = 0;
- memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));
+ memset(dev_priv->warp_pipe_phys, 0,
+ sizeof(dev_priv->warp_pipe_phys));
if (dev_priv->head != NULL) {
mga_freelist_cleanup(dev);
@@ -1015,7 +1012,7 @@ int mga_dma_init(struct drm_device *dev, void *data,
case MGA_INIT_DMA:
err = mga_do_init_dma(dev, init);
if (err) {
- (void) mga_do_cleanup_dma(dev, FULL_CLEANUP);
+ (void)mga_do_cleanup_dma(dev, FULL_CLEANUP);
}
return err;
case MGA_CLEANUP_DMA:
diff --git a/shared-core/mga_irq.c b/shared-core/mga_irq.c
index c18bae9f..0f83577f 100644
--- a/shared-core/mga_irq.c
+++ b/shared-core/mga_irq.c
@@ -57,7 +57,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
/* SOFTRAP interrupt */
if (status & MGA_SOFTRAPEN) {
const u32 prim_start = MGA_READ(MGA_PRIMADDRESS);
- const u32 prim_end = MGA_READ(MGA_PRIMEND);
+ const u32 prim_end = MGA_READ(MGA_PRIMEND);
MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR);
@@ -65,7 +65,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
/* In addition to clearing the interrupt-pending bit, we
* have to write to MGA_PRIMEND to re-start the DMA operation.
*/
- if ( (prim_start & ~0x03) != (prim_end & ~0x03) ) {
+ if ((prim_start & ~0x03) != (prim_end & ~0x03)) {
MGA_WRITE(MGA_PRIMEND, prim_end);
}
@@ -74,9 +74,8 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
handled = 1;
}
- if ( handled ) {
+ if (handled)
return IRQ_HANDLED;
- }
return IRQ_NONE;
}
@@ -131,7 +130,7 @@ void mga_driver_irq_postinstall(struct drm_device * dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
- DRM_INIT_WAITQUEUE( &dev_priv->fence_queue );
+ DRM_INIT_WAITQUEUE(&dev_priv->fence_queue);
/* Turn on vertical blank interrupt and soft trap interrupt. */
MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN);
diff --git a/shared-core/mga_state.c b/shared-core/mga_state.c
index 0b8ba048..d9cc9d7e 100644
--- a/shared-core/mga_state.c
+++ b/shared-core/mga_state.c
@@ -62,8 +62,7 @@ static void mga_emit_clip_rect(drm_mga_private_t * dev_priv,
}
DMA_BLOCK(MGA_DMAPAD, 0x00000000,
MGA_CXBNDRY, ((box->x2 - 1) << 16) | box->x1,
- MGA_YTOP, box->y1 * pitch,
- MGA_YBOT, (box->y2 - 1) * pitch);
+ MGA_YTOP, box->y1 * pitch, MGA_YBOT, (box->y2 - 1) * pitch);
ADVANCE_DMA();
}
@@ -78,18 +77,15 @@ static __inline__ void mga_g200_emit_context(drm_mga_private_t * dev_priv)
DMA_BLOCK(MGA_DSTORG, ctx->dstorg,
MGA_MACCESS, ctx->maccess,
- MGA_PLNWT, ctx->plnwt,
- MGA_DWGCTL, ctx->dwgctl);
+ MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl);
DMA_BLOCK(MGA_ALPHACTRL, ctx->alphactrl,
MGA_FOGCOL, ctx->fogcolor,
- MGA_WFLAG, ctx->wflag,
- MGA_ZORG, dev_priv->depth_offset);
+ MGA_WFLAG, ctx->wflag, MGA_ZORG, dev_priv->depth_offset);
DMA_BLOCK(MGA_FCOL, ctx->fcol,
MGA_DMAPAD, 0x00000000,
- MGA_DMAPAD, 0x00000000,
- MGA_DMAPAD, 0x00000000);
+ MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
ADVANCE_DMA();
}
diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c
index 7c9503e8..16c86494 100644
--- a/shared-core/nouveau_state.c
+++ b/shared-core/nouveau_state.c
@@ -454,6 +454,9 @@ int nouveau_firstopen(struct drm_device *dev)
return 0;
}
+#define NV40_CHIPSET_MASK 0x00000baf
+#define NV44_CHIPSET_MASK 0x00005450
+
int nouveau_load(struct drm_device *dev, unsigned long flags)
{
struct drm_nouveau_private *dev_priv;
@@ -497,10 +500,16 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
if (architecture >= 0x50) {
dev_priv->card_type = NV_50;
- } else if (architecture >= 0x44) {
- dev_priv->card_type = NV_44;
} else if (architecture >= 0x40) {
- dev_priv->card_type = NV_40;
+ uint8_t subarch = architecture & 0xf;
+ /* Selection criteria borrowed from NV40EXA */
+ if (NV40_CHIPSET_MASK & (1 << subarch)) {
+ dev_priv->card_type = NV_40;
+ } else if (NV44_CHIPSET_MASK & (1 << subarch)) {
+ dev_priv->card_type = NV_44;
+ } else {
+ dev_priv->card_type = NV_UNKNOWN;
+ }
} else if (architecture >= 0x30) {
dev_priv->card_type = NV_30;
} else if (architecture >= 0x20) {
diff --git a/shared-core/r128_drv.h b/shared-core/r128_drv.h
index 4d27f549..b0558389 100644
--- a/shared-core/r128_drv.h
+++ b/shared-core/r128_drv.h
@@ -118,7 +118,7 @@ typedef struct drm_r128_private {
drm_local_map_t *cce_ring;
drm_local_map_t *ring_rptr;
drm_local_map_t *agp_textures;
- struct ati_pcigart_info gart_info;
+ struct drm_ati_pcigart_info gart_info;
} drm_r128_private_t;
typedef struct drm_r128_buf_priv {
diff --git a/shared-core/radeon_cp.c b/shared-core/radeon_cp.c
index 9ea98a84..30d38e13 100644
--- a/shared-core/radeon_cp.c
+++ b/shared-core/radeon_cp.c
@@ -1979,7 +1979,7 @@ void radeon_do_release(struct drm_device * dev)
schedule();
#else
#if defined(__FreeBSD__) && __FreeBSD_version > 500000
- msleep(&ret, &dev->dev_lock, PZERO, "rdnrel",
+ mtx_sleep(&ret, &dev->dev_lock, PZERO, "rdnrel",
1);
#else
tsleep(&ret, PZERO, "rdnrel", 1);
diff --git a/shared-core/radeon_drv.h b/shared-core/radeon_drv.h
index aea4c476..af39b61c 100644
--- a/shared-core/radeon_drv.h
+++ b/shared-core/radeon_drv.h
@@ -297,11 +297,11 @@ typedef struct drm_radeon_private {
int irq_enabled;
struct radeon_surface surfaces[RADEON_MAX_SURFACES];
- struct radeon_virt_surface virt_surfaces[2*RADEON_MAX_SURFACES];
+ struct radeon_virt_surface virt_surfaces[2 * RADEON_MAX_SURFACES];
unsigned long pcigart_offset;
unsigned int pcigart_offset_set;
- struct ati_pcigart_info gart_info;
+ struct drm_ati_pcigart_info gart_info;
u32 scratch_ages[5];
@@ -401,7 +401,7 @@ extern void r300_init_reg_flags(struct drm_device *dev);
extern int r300_do_cp_cmdbuf(struct drm_device *dev,
struct drm_file *file_priv,
- drm_radeon_kcmd_buffer_t* cmdbuf);
+ drm_radeon_kcmd_buffer_t *cmdbuf);
/* Flags for stats.boxes
*/
diff --git a/shared-core/radeon_irq.c b/shared-core/radeon_irq.c
index 1ece6399..2b2407ee 100644
--- a/shared-core/radeon_irq.c
+++ b/shared-core/radeon_irq.c
@@ -145,8 +145,7 @@ static int radeon_wait_irq(struct drm_device * dev, int swi_nr)
}
static int radeon_driver_vblank_do_wait(struct drm_device * dev,
- unsigned int *sequence,
- int crtc)
+ unsigned int *sequence, int crtc)
{
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
diff --git a/shared-core/radeon_state.c b/shared-core/radeon_state.c
index 4c85371e..623576ad 100644
--- a/shared-core/radeon_state.c
+++ b/shared-core/radeon_state.c
@@ -3157,7 +3157,7 @@ static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_fil
*
* DRM infrastructure takes care of reclaiming dma buffers.
*/
-void radeon_driver_preclose(struct drm_device * dev,
+void radeon_driver_preclose(struct drm_device *dev,
struct drm_file *file_priv)
{
if (dev->dev_private) {
@@ -3169,7 +3169,7 @@ void radeon_driver_preclose(struct drm_device * dev,
}
}
-void radeon_driver_lastclose(struct drm_device * dev)
+void radeon_driver_lastclose(struct drm_device *dev)
{
if (dev->dev_private) {
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -3182,7 +3182,7 @@ void radeon_driver_lastclose(struct drm_device * dev)
radeon_do_release(dev);
}
-int radeon_driver_open(struct drm_device * dev, struct drm_file *file_priv)
+int radeon_driver_open(struct drm_device *dev, struct drm_file *file_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
struct drm_radeon_driver_file_fields *radeon_priv;
@@ -3204,7 +3204,7 @@ int radeon_driver_open(struct drm_device * dev, struct drm_file *file_priv)
return 0;
}
-void radeon_driver_postclose(struct drm_device * dev, struct drm_file *file_priv)
+void radeon_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_radeon_driver_file_fields *radeon_priv =
file_priv->driver_priv;
diff --git a/shared-core/savage_bci.c b/shared-core/savage_bci.c
index 7710ba0d..4b8a89fe 100644
--- a/shared-core/savage_bci.c
+++ b/shared-core/savage_bci.c
@@ -364,7 +364,7 @@ uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n)
unsigned int cur = dev_priv->current_dma_page;
unsigned int rest = SAVAGE_DMA_PAGE_SIZE -
dev_priv->dma_pages[cur].used;
- unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE-1) /
+ unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE - 1) /
SAVAGE_DMA_PAGE_SIZE;
uint32_t *dma_ptr;
unsigned int i;
@@ -374,7 +374,7 @@ uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n)
if (cur + nr_pages < dev_priv->nr_dma_pages) {
dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
- cur*SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
+ cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
if (n < rest)
rest = n;
dev_priv->dma_pages[cur].used += rest;
@@ -383,7 +383,7 @@ uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n)
} else {
dev_priv->dma_flush(dev_priv);
nr_pages =
- (n + SAVAGE_DMA_PAGE_SIZE-1) / SAVAGE_DMA_PAGE_SIZE;
+ (n + SAVAGE_DMA_PAGE_SIZE - 1) / SAVAGE_DMA_PAGE_SIZE;
for (i = cur; i < dev_priv->nr_dma_pages; ++i) {
dev_priv->dma_pages[i].age = dev_priv->last_dma_age;
dev_priv->dma_pages[i].used = 0;
@@ -443,7 +443,7 @@ static void savage_dma_flush(drm_savage_private_t *dev_priv)
uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
dev_priv->dma_pages[cur].used += pad;
- while(pad != 0) {
+ while (pad != 0) {
*dma_ptr++ = BCI_CMD_WAIT;
pad--;
}
@@ -587,12 +587,12 @@ int savage_driver_firstopen(struct drm_device *dev)
dev_priv->mtrr[0].handle =
drm_mtrr_add(dev_priv->mtrr[0].base,
dev_priv->mtrr[0].size, DRM_MTRR_WC);
- dev_priv->mtrr[1].base = fb_base+0x02000000;
+ dev_priv->mtrr[1].base = fb_base + 0x02000000;
dev_priv->mtrr[1].size = 0x02000000;
dev_priv->mtrr[1].handle =
drm_mtrr_add(dev_priv->mtrr[1].base,
dev_priv->mtrr[1].size, DRM_MTRR_WC);
- dev_priv->mtrr[2].base = fb_base+0x04000000;
+ dev_priv->mtrr[2].base = fb_base + 0x04000000;
dev_priv->mtrr[2].size = 0x04000000;
dev_priv->mtrr[2].handle =
drm_mtrr_add(dev_priv->mtrr[2].base,
@@ -833,7 +833,7 @@ static int savage_do_init_bci(struct drm_device *dev, drm_savage_init_t *init)
depth_tile_format = SAVAGE_BD_TILE_DEST;
}
front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp / 8);
- back_stride = dev_priv->back_pitch / (dev_priv->fb_bpp / 8);
+ back_stride = dev_priv->back_pitch / (dev_priv->fb_bpp / 8);
depth_stride =
dev_priv->depth_pitch / (dev_priv->depth_bpp / 8);
@@ -888,7 +888,7 @@ static int savage_do_init_bci(struct drm_device *dev, drm_savage_init_t *init)
return -ENOMEM;
}
- if (savage_dma_init(dev_priv) < 0) {
+ if (savage_dma_init(dev_priv) < 0) {
DRM_ERROR("could not initialize command DMA\n");
savage_do_cleanup_bci(dev);
return -ENOMEM;
@@ -983,7 +983,7 @@ static int savage_bci_event_wait(struct drm_device *dev, void *data, struct drm_
* - event counter wrapped since the event was emitted or
* - the hardware has advanced up to or over the event to wait for.
*/
- if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e) )
+ if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e))
return 0;
else
return dev_priv->wait_evnt(dev_priv, event_e);
@@ -1065,8 +1065,6 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
if (!dma->buflist)
return;
- /*i830_flush_queue(dev);*/
-
for (i = 0; i < dma->buf_count; i++) {
struct drm_buf *buf = dma->buflist[i];
drm_savage_buf_priv_t *buf_priv = buf->dev_private;
diff --git a/shared-core/savage_state.c b/shared-core/savage_state.c
index 61ec11cc..1c5a0e2e 100644
--- a/shared-core/savage_state.c
+++ b/shared-core/savage_state.c
@@ -30,23 +30,23 @@ void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv,
const struct drm_clip_rect *pbox)
{
uint32_t scstart = dev_priv->state.s3d.new_scstart;
- uint32_t scend = dev_priv->state.s3d.new_scend;
+ uint32_t scend = dev_priv->state.s3d.new_scend;
scstart = (scstart & ~SAVAGE_SCISSOR_MASK_S3D) |
((uint32_t)pbox->x1 & 0x000007ff) |
(((uint32_t)pbox->y1 << 16) & 0x07ff0000);
- scend = (scend & ~SAVAGE_SCISSOR_MASK_S3D) |
- (((uint32_t)pbox->x2-1) & 0x000007ff) |
- ((((uint32_t)pbox->y2-1) << 16) & 0x07ff0000);
+ scend = (scend & ~SAVAGE_SCISSOR_MASK_S3D) |
+ (((uint32_t)pbox->x2 - 1) & 0x000007ff) |
+ ((((uint32_t)pbox->y2 - 1) << 16) & 0x07ff0000);
if (scstart != dev_priv->state.s3d.scstart ||
scend != dev_priv->state.s3d.scend) {
DMA_LOCALS;
BEGIN_DMA(4);
- DMA_WRITE(BCI_CMD_WAIT|BCI_CMD_WAIT_3D);
+ DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
DMA_SET_REGISTERS(SAVAGE_SCSTART_S3D, 2);
DMA_WRITE(scstart);
DMA_WRITE(scend);
dev_priv->state.s3d.scstart = scstart;
- dev_priv->state.s3d.scend = scend;
+ dev_priv->state.s3d.scend = scend;
dev_priv->waiting = 1;
DMA_COMMIT();
}
@@ -61,13 +61,13 @@ void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv,
((uint32_t)pbox->x1 & 0x000007ff) |
(((uint32_t)pbox->y1 << 12) & 0x00fff000);
drawctrl1 = (drawctrl1 & ~SAVAGE_SCISSOR_MASK_S4) |
- (((uint32_t)pbox->x2-1) & 0x000007ff) |
- ((((uint32_t)pbox->y2-1) << 12) & 0x00fff000);
+ (((uint32_t)pbox->x2 - 1) & 0x000007ff) |
+ ((((uint32_t)pbox->y2 - 1) << 12) & 0x00fff000);
if (drawctrl0 != dev_priv->state.s4.drawctrl0 ||
drawctrl1 != dev_priv->state.s4.drawctrl1) {
DMA_LOCALS;
BEGIN_DMA(4);
- DMA_WRITE(BCI_CMD_WAIT|BCI_CMD_WAIT_3D);
+ DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
DMA_SET_REGISTERS(SAVAGE_DRAWCTRL0_S4, 2);
DMA_WRITE(drawctrl0);
DMA_WRITE(drawctrl1);
@@ -87,8 +87,8 @@ static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit,
}
if (!(addr & 1)) { /* local */
addr &= ~7;
- if (addr < dev_priv->texture_offset ||
- addr >= dev_priv->texture_offset+dev_priv->texture_size) {
+ if (addr < dev_priv->texture_offset ||
+ addr >= dev_priv->texture_offset + dev_priv->texture_size) {
DRM_ERROR
("bad texAddr%d %08x (local addr out of range)\n",
unit, addr);
@@ -114,10 +114,10 @@ static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit,
}
#define SAVE_STATE(reg,where) \
- if(start <= reg && start+count > reg) \
+ if(start <= reg && start + count > reg) \
dev_priv->state.where = regs[reg - start]
#define SAVE_STATE_MASK(reg,where,mask) do { \
- if(start <= reg && start+count > reg) { \
+ if(start <= reg && start + count > reg) { \
uint32_t tmp; \
tmp = regs[reg - start]; \
dev_priv->state.where = (tmp & (mask)) | \
@@ -129,9 +129,9 @@ static int savage_verify_state_s3d(drm_savage_private_t *dev_priv,
const uint32_t *regs)
{
if (start < SAVAGE_TEXPALADDR_S3D ||
- start+count-1 > SAVAGE_DESTTEXRWWATERMARK_S3D) {
+ start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) {
DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
- start, start+count-1);
+ start, start + count - 1);
return -EINVAL;
}
@@ -142,7 +142,7 @@ static int savage_verify_state_s3d(drm_savage_private_t *dev_priv,
/* if any texture regs were changed ... */
if (start <= SAVAGE_TEXCTRL_S3D &&
- start+count > SAVAGE_TEXPALADDR_S3D) {
+ start + count > SAVAGE_TEXPALADDR_S3D) {
/* ... check texture state */
SAVE_STATE(SAVAGE_TEXCTRL_S3D, s3d.texctrl);
SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr);
@@ -161,9 +161,9 @@ static int savage_verify_state_s4(drm_savage_private_t *dev_priv,
int ret = 0;
if (start < SAVAGE_DRAWLOCALCTRL_S4 ||
- start+count-1 > SAVAGE_TEXBLENDCOLOR_S4) {
+ start + count - 1 > SAVAGE_TEXBLENDCOLOR_S4) {
DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
- start, start+count-1);
+ start, start + count - 1);
return -EINVAL;
}
@@ -212,14 +212,14 @@ static int savage_dispatch_state(drm_savage_private_t *dev_priv,
return ret;
/* scissor regs are emitted in savage_dispatch_draw */
if (start < SAVAGE_SCSTART_S3D) {
- if (start+count > SAVAGE_SCEND_S3D+1)
- count2 = count - (SAVAGE_SCEND_S3D+1 - start);
- if (start+count > SAVAGE_SCSTART_S3D)
+ if (start + count > SAVAGE_SCEND_S3D + 1)
+ count2 = count - (SAVAGE_SCEND_S3D + 1 - start);
+ if (start + count > SAVAGE_SCSTART_S3D)
count = SAVAGE_SCSTART_S3D - start;
} else if (start <= SAVAGE_SCEND_S3D) {
- if (start+count > SAVAGE_SCEND_S3D+1) {
- count -= SAVAGE_SCEND_S3D+1 - start;
- start = SAVAGE_SCEND_S3D+1;
+ if (start + count > SAVAGE_SCEND_S3D + 1) {
+ count -= SAVAGE_SCEND_S3D + 1 - start;
+ start = SAVAGE_SCEND_S3D + 1;
} else
return 0;
}
@@ -229,24 +229,24 @@ static int savage_dispatch_state(drm_savage_private_t *dev_priv,
return ret;
/* scissor regs are emitted in savage_dispatch_draw */
if (start < SAVAGE_DRAWCTRL0_S4) {
- if (start+count > SAVAGE_DRAWCTRL1_S4+1)
+ if (start + count > SAVAGE_DRAWCTRL1_S4 + 1)
count2 = count -
(SAVAGE_DRAWCTRL1_S4 + 1 - start);
- if (start+count > SAVAGE_DRAWCTRL0_S4)
+ if (start + count > SAVAGE_DRAWCTRL0_S4)
count = SAVAGE_DRAWCTRL0_S4 - start;
} else if (start <= SAVAGE_DRAWCTRL1_S4) {
- if (start+count > SAVAGE_DRAWCTRL1_S4+1) {
- count -= SAVAGE_DRAWCTRL1_S4+1 - start;
- start = SAVAGE_DRAWCTRL1_S4+1;
+ if (start + count > SAVAGE_DRAWCTRL1_S4 + 1) {
+ count -= SAVAGE_DRAWCTRL1_S4 + 1 - start;
+ start = SAVAGE_DRAWCTRL1_S4 + 1;
} else
return 0;
}
}
- bci_size = count + (count+254)/255 + count2 + (count2+254)/255;
+ bci_size = count + (count + 254) / 255 + count2 + (count2 + 254) / 255;
if (cmd_header->state.global) {
- BEGIN_DMA(bci_size+1);
+ BEGIN_DMA(bci_size + 1);
DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
dev_priv->waiting = 1;
} else {
@@ -286,8 +286,8 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
BCI_LOCALS;
if (!dmabuf) {
- DRM_ERROR("called without dma buffers!\n");
- return -EINVAL;
+ DRM_ERROR("called without dma buffers!\n");
+ return -EINVAL;
}
if (!n)
@@ -337,9 +337,9 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
}
}
- if (start + n > dmabuf->total/32) {
+ if (start + n > dmabuf->total / 32) {
DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
- start, start + n - 1, dmabuf->total/32);
+ start, start + n - 1, dmabuf->total / 32);
return -EINVAL;
}
@@ -374,33 +374,33 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
/* Need to reorder indices for correct flat
* shading while preserving the clock sense
* for correct culling. Only on Savage3D. */
- int reorder[3] = {-1, -1, -1};
- reorder[start%3] = 2;
+ int reorder[3] = { -1, -1, -1 };
+ reorder[start % 3] = 2;
- BEGIN_BCI((count+1+1)/2);
- BCI_DRAW_INDICES_S3D(count, prim, start+2);
+ BEGIN_BCI((count + 1 + 1) / 2);
+ BCI_DRAW_INDICES_S3D(count, prim, start + 2);
- for (i = start+1; i+1 < start+count; i += 2)
+ for (i = start + 1; i + 1 < start + count; i += 2)
BCI_WRITE((i + reorder[i % 3]) |
((i + 1 +
reorder[(i + 1) % 3]) << 16));
- if (i < start+count)
- BCI_WRITE(i + reorder[i%3]);
+ if (i < start + count)
+ BCI_WRITE(i + reorder[i % 3]);
} else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
- BEGIN_BCI((count+1+1)/2);
+ BEGIN_BCI((count + 1 + 1) / 2);
BCI_DRAW_INDICES_S3D(count, prim, start);
- for (i = start+1; i+1 < start+count; i += 2)
- BCI_WRITE(i | ((i+1) << 16));
- if (i < start+count)
+ for (i = start + 1; i + 1 < start + count; i += 2)
+ BCI_WRITE(i | ((i + 1) << 16));
+ if (i < start + count)
BCI_WRITE(i);
} else {
- BEGIN_BCI((count+2+1)/2);
+ BEGIN_BCI((count + 2 + 1) / 2);
BCI_DRAW_INDICES_S4(count, prim, skip);
- for (i = start; i+1 < start+count; i += 2)
- BCI_WRITE(i | ((i+1) << 16));
- if (i < start+count)
+ for (i = start; i + 1 < start + count; i += 2)
+ BCI_WRITE(i | ((i + 1) << 16));
+ if (i < start + count)
BCI_WRITE(i);
}
@@ -479,9 +479,9 @@ static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv,
return -EINVAL;
}
- if (start + n > vb_size / (vb_stride*4)) {
+ if (start + n > vb_size / (vb_stride * 4)) {
DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
- start, start + n - 1, vb_size / (vb_stride*4));
+ start, start + n - 1, vb_size / (vb_stride * 4));
return -EINVAL;
}
@@ -493,28 +493,28 @@ static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv,
/* Need to reorder vertices for correct flat
* shading while preserving the clock sense
* for correct culling. Only on Savage3D. */
- int reorder[3] = {-1, -1, -1};
- reorder[start%3] = 2;
+ int reorder[3] = { -1, -1, -1 };
+ reorder[start % 3] = 2;
- BEGIN_DMA(count*vtx_size+1);
+ BEGIN_DMA(count * vtx_size + 1);
DMA_DRAW_PRIMITIVE(count, prim, skip);
- for (i = start; i < start+count; ++i) {
+ for (i = start; i < start + count; ++i) {
unsigned int j = i + reorder[i % 3];
- DMA_COPY(&vtxbuf[vb_stride*j], vtx_size);
+ DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
}
DMA_COMMIT();
} else {
- BEGIN_DMA(count*vtx_size+1);
+ BEGIN_DMA(count * vtx_size + 1);
DMA_DRAW_PRIMITIVE(count, prim, skip);
if (vb_stride == vtx_size) {
- DMA_COPY(&vtxbuf[vb_stride*start],
- vtx_size*count);
+ DMA_COPY(&vtxbuf[vb_stride * start],
+ vtx_size * count);
} else {
- for (i = start; i < start+count; ++i) {
- DMA_COPY(&vtxbuf[vb_stride*i],
+ for (i = start; i < start + count; ++i) {
+ DMA_COPY(&vtxbuf[vb_stride * i],
vtx_size);
}
}
@@ -544,8 +544,8 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
BCI_LOCALS;
if (!dmabuf) {
- DRM_ERROR("called without dma buffers!\n");
- return -EINVAL;
+ DRM_ERROR("called without dma buffers!\n");
+ return -EINVAL;
}
if (!n)
@@ -623,9 +623,9 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
/* check indices */
for (i = 0; i < count; ++i) {
- if (idx[i] > dmabuf->total/32) {
+ if (idx[i] > dmabuf->total / 32) {
DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
- i, idx[i], dmabuf->total/32);
+ i, idx[i], dmabuf->total / 32);
return -EINVAL;
}
}
@@ -634,31 +634,31 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
/* Need to reorder indices for correct flat
* shading while preserving the clock sense
* for correct culling. Only on Savage3D. */
- int reorder[3] = {2, -1, -1};
+ int reorder[3] = { 2, -1, -1 };
- BEGIN_BCI((count+1+1)/2);
+ BEGIN_BCI((count + 1 + 1) / 2);
BCI_DRAW_INDICES_S3D(count, prim, idx[2]);
- for (i = 1; i+1 < count; i += 2)
+ for (i = 1; i + 1 < count; i += 2)
BCI_WRITE(idx[i + reorder[i % 3]] |
(idx[i + 1 +
reorder[(i + 1) % 3]] << 16));
if (i < count)
- BCI_WRITE(idx[i + reorder[i%3]]);
+ BCI_WRITE(idx[i + reorder[i % 3]]);
} else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
- BEGIN_BCI((count+1+1)/2);
+ BEGIN_BCI((count + 1 + 1) / 2);
BCI_DRAW_INDICES_S3D(count, prim, idx[0]);
- for (i = 1; i+1 < count; i += 2)
- BCI_WRITE(idx[i] | (idx[i+1] << 16));
+ for (i = 1; i + 1 < count; i += 2)
+ BCI_WRITE(idx[i] | (idx[i + 1] << 16));
if (i < count)
BCI_WRITE(idx[i]);
} else {
- BEGIN_BCI((count+2+1)/2);
+ BEGIN_BCI((count + 2 + 1) / 2);
BCI_DRAW_INDICES_S4(count, prim, skip);
- for (i = 0; i+1 < count; i += 2)
- BCI_WRITE(idx[i] | (idx[i+1] << 16));
+ for (i = 0; i + 1 < count; i += 2)
+ BCI_WRITE(idx[i] | (idx[i + 1] << 16));
if (i < count)
BCI_WRITE(idx[i]);
}
@@ -743,9 +743,9 @@ static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv,
/* Check indices */
for (i = 0; i < count; ++i) {
- if (idx[i] > vb_size / (vb_stride*4)) {
+ if (idx[i] > vb_size / (vb_stride * 4)) {
DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
- i, idx[i], vb_size / (vb_stride*4));
+ i, idx[i], vb_size / (vb_stride * 4));
return -EINVAL;
}
}
@@ -754,24 +754,24 @@ static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv,
/* Need to reorder vertices for correct flat
* shading while preserving the clock sense
* for correct culling. Only on Savage3D. */
- int reorder[3] = {2, -1, -1};
+ int reorder[3] = { 2, -1, -1 };
- BEGIN_DMA(count*vtx_size+1);
+ BEGIN_DMA(count * vtx_size + 1);
DMA_DRAW_PRIMITIVE(count, prim, skip);
for (i = 0; i < count; ++i) {
unsigned int j = idx[i + reorder[i % 3]];
- DMA_COPY(&vtxbuf[vb_stride*j], vtx_size);
+ DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
}
DMA_COMMIT();
} else {
- BEGIN_DMA(count*vtx_size+1);
+ BEGIN_DMA(count * vtx_size + 1);
DMA_DRAW_PRIMITIVE(count, prim, skip);
for (i = 0; i < count; ++i) {
unsigned int j = idx[i];
- DMA_COPY(&vtxbuf[vb_stride*j], vtx_size);
+ DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
}
DMA_COMMIT();
@@ -823,12 +823,12 @@ static int savage_dispatch_clear(drm_savage_private_t *dev_priv,
x = boxes[i].x1, y = boxes[i].y1;
w = boxes[i].x2 - boxes[i].x1;
h = boxes[i].y2 - boxes[i].y1;
- BEGIN_DMA(nbufs*6);
+ BEGIN_DMA(nbufs * 6);
for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) {
if (!(flags & buf))
continue;
DMA_WRITE(clear_cmd);
- switch(buf) {
+ switch (buf) {
case SAVAGE_FRONT:
DMA_WRITE(dev_priv->front_offset);
DMA_WRITE(dev_priv->front_bd);
@@ -880,8 +880,8 @@ static int savage_dispatch_swap(drm_savage_private_t *dev_priv,
DMA_WRITE(dev_priv->back_bd);
DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1));
DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1));
- DMA_WRITE(BCI_W_H(boxes[i].x2-boxes[i].x1,
- boxes[i].y2-boxes[i].y1));
+ DMA_WRITE(BCI_W_H(boxes[i].x2 - boxes[i].x1,
+ boxes[i].y2 - boxes[i].y1));
DMA_COMMIT();
}
@@ -973,7 +973,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
if (cmdbuf->dma_idx > dma->buf_count) {
DRM_ERROR
("vertex buffer index %u out of range (0-%u)\n",
- cmdbuf->dma_idx, dma->buf_count-1);
+ cmdbuf->dma_idx, dma->buf_count - 1);
return -EINVAL;
}
dmabuf = dma->buflist[cmdbuf->dma_idx];
@@ -1064,15 +1064,15 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
case SAVAGE_CMD_DMA_PRIM:
case SAVAGE_CMD_VB_PRIM:
if (!first_draw_cmd)
- first_draw_cmd = cmdbuf->cmd_addr-1;
+ first_draw_cmd = cmdbuf->cmd_addr - 1;
cmdbuf->cmd_addr += j;
i += j;
break;
default:
if (first_draw_cmd) {
- ret = savage_dispatch_draw (
+ ret = savage_dispatch_draw(
dev_priv, first_draw_cmd,
- cmdbuf->cmd_addr-1,
+ cmdbuf->cmd_addr - 1,
dmabuf, cmdbuf->vb_addr,
cmdbuf->vb_size,
cmdbuf->vb_stride,
@@ -1134,7 +1134,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
}
if (first_draw_cmd) {
- ret = savage_dispatch_draw (
+ ret = savage_dispatch_draw(
dev_priv, first_draw_cmd, cmdbuf->cmd_addr, dmabuf,
cmdbuf->vb_addr, cmdbuf->vb_size, cmdbuf->vb_stride,
cmdbuf->nbox, cmdbuf->box_addr);
diff --git a/shared-core/sis_drv.h b/shared-core/sis_drv.h
index a4a88fe1..db532f3c 100644
--- a/shared-core/sis_drv.h
+++ b/shared-core/sis_drv.h
@@ -84,8 +84,6 @@ extern int sis_final_context(struct drm_device * dev, int context);
#endif
-
-
extern struct drm_ioctl_desc sis_ioctls[];
extern int sis_max_ioctl;
diff --git a/shared-core/via_3d_reg.h b/shared-core/via_3d_reg.h
index cf61bb51..462375d5 100644
--- a/shared-core/via_3d_reg.h
+++ b/shared-core/via_3d_reg.h
@@ -1643,7 +1643,6 @@
#define HC_HAGPBpID_STOP 0x00000002
#define HC_HAGPBpH_MASK 0x00ffffff
-
#define VIA_VIDEO_HEADER5 0xFE040000
#define VIA_VIDEO_HEADER6 0xFE050000
#define VIA_VIDEO_HEADER7 0xFE060000
diff --git a/shared-core/via_dma.c b/shared-core/via_dma.c
index 89753aa6..ab39f53c 100644
--- a/shared-core/via_dma.c
+++ b/shared-core/via_dma.c
@@ -54,11 +54,11 @@
*vb++ = (w2); \
dev_priv->dma_low += 8;
-static void via_cmdbuf_start(drm_via_private_t * dev_priv);
-static void via_cmdbuf_pause(drm_via_private_t * dev_priv);
-static void via_cmdbuf_reset(drm_via_private_t * dev_priv);
-static void via_cmdbuf_rewind(drm_via_private_t * dev_priv);
-static int via_wait_idle(drm_via_private_t * dev_priv);
+static void via_cmdbuf_start(drm_via_private_t *dev_priv);
+static void via_cmdbuf_pause(drm_via_private_t *dev_priv);
+static void via_cmdbuf_reset(drm_via_private_t *dev_priv);
+static void via_cmdbuf_rewind(drm_via_private_t *dev_priv);
+static int via_wait_idle(drm_via_private_t *dev_priv);
static void via_pad_cache(drm_via_private_t *dev_priv, int qwords);
@@ -110,7 +110,7 @@ via_cmdbuf_wait(drm_via_private_t * dev_priv, unsigned int size)
if (count-- == 0) {
DRM_ERROR
("via_cmdbuf_wait timed out hw %x cur_addr %x next_addr %x\n",
- hw_addr, cur_addr, next_addr);
+ hw_addr, cur_addr, next_addr);
return -1;
}
} while ((cur_addr < hw_addr) && (next_addr >= hw_addr));
@@ -450,7 +450,7 @@ static int via_hook_segment(drm_via_private_t * dev_priv,
-static int via_wait_idle(drm_via_private_t * dev_priv)
+static int via_wait_idle(drm_via_private_t *dev_priv)
{
int count = 10000000;
@@ -462,7 +462,7 @@ static int via_wait_idle(drm_via_private_t * dev_priv)
return count;
}
-static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type,
+static uint32_t *via_align_cmd(drm_via_private_t *dev_priv, uint32_t cmd_type,
uint32_t addr, uint32_t *cmd_addr_hi,
uint32_t *cmd_addr_lo, int skip_wait)
{
@@ -472,11 +472,12 @@ static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type,
uint32_t qw_pad_count;
if (!skip_wait)
- via_cmdbuf_wait(dev_priv, 2*CMDBUF_ALIGNMENT_SIZE);
+ via_cmdbuf_wait(dev_priv, 2 * CMDBUF_ALIGNMENT_SIZE);
vb = via_get_dma(dev_priv);
- VIA_OUT_RING_QW( HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) |
- (VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16);
+ VIA_OUT_RING_QW(HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) |
+ (VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16);
+
agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) -
((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3);
@@ -557,8 +558,8 @@ static void via_pad_cache(drm_via_private_t *dev_priv, int qwords)
via_cmdbuf_wait(dev_priv, qwords + 2);
vb = via_get_dma(dev_priv);
- VIA_OUT_RING_QW( HC_HEADER2, HC_ParaType_NotTex << 16);
- via_align_buffer(dev_priv,vb,qwords);
+ VIA_OUT_RING_QW(HC_HEADER2, HC_ParaType_NotTex << 16);
+ via_align_buffer(dev_priv, vb, qwords);
}
static inline void via_dummy_bitblt(drm_via_private_t * dev_priv)
@@ -577,7 +578,7 @@ static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
volatile uint32_t *last_pause_ptr;
agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
- via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi,
+ via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi,
&jump_addr_lo, 0);
dev_priv->dma_wrap = dev_priv->dma_low;
@@ -594,16 +595,15 @@ static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
via_dummy_bitblt(dev_priv);
via_dummy_bitblt(dev_priv);
- last_pause_ptr = via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
+ last_pause_ptr = via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
&pause_addr_lo, 0) -1;
- via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
+ via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
&pause_addr_lo, 0);
*last_pause_ptr = pause_addr_lo;
- via_hook_segment( dev_priv, jump_addr_hi, jump_addr_lo, 0);
+ via_hook_segment(dev_priv, jump_addr_hi, jump_addr_lo, 0);
}
-
static void via_cmdbuf_rewind(drm_via_private_t * dev_priv)
{
via_cmdbuf_jump(dev_priv);
@@ -614,7 +614,7 @@ static void via_cmdbuf_flush(drm_via_private_t * dev_priv, uint32_t cmd_type)
uint32_t pause_addr_lo, pause_addr_hi;
via_align_cmd(dev_priv, cmd_type, 0, &pause_addr_hi, &pause_addr_lo, 0);
- via_hook_segment( dev_priv, pause_addr_hi, pause_addr_lo, 0);
+ via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
}
@@ -653,7 +653,7 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
count = 1000000;
tmp_size = d_siz->size;
- switch(d_siz->func) {
+ switch (d_siz->func) {
case VIA_CMDBUF_SPACE:
while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size)
&& count--) {
diff --git a/shared-core/via_drm.h b/shared-core/via_drm.h
index 7ee69d28..e6a8ec64 100644
--- a/shared-core/via_drm.h
+++ b/shared-core/via_drm.h
@@ -228,7 +228,7 @@ typedef enum {
#define VIA_IRQ_FLAGS_MASK 0xF0000000
-enum drm_via_irqs{
+enum drm_via_irqs {
drm_via_irq_hqv0 = 0,
drm_via_irq_hqv1,
drm_via_irq_dma0_dd,
@@ -238,7 +238,7 @@ enum drm_via_irqs{
drm_via_irq_num
};
-struct drm_via_wait_irq_request{
+struct drm_via_wait_irq_request {
unsigned irq;
via_irq_seq_type_t type;
uint32_t sequence;
@@ -270,9 +270,9 @@ typedef struct drm_via_dmablit {
uint32_t fb_stride;
unsigned char *mem_addr;
- uint32_t mem_stride;
+ uint32_t mem_stride;
- uint32_t flags;
+ uint32_t flags;
int to_fb;
drm_via_blitsync_t sync;
diff --git a/shared-core/via_irq.c b/shared-core/via_irq.c
index df91ab00..ec3f50bd 100644
--- a/shared-core/via_irq.c
+++ b/shared-core/via_irq.c
@@ -76,8 +76,7 @@ static maskarray_t via_pro_group_a_irqs[] = {
{VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
};
-static int via_num_pro_group_a =
- sizeof(via_pro_group_a_irqs)/sizeof(maskarray_t);
+static int via_num_pro_group_a = ARRAY_SIZE(via_pro_group_a_irqs);
static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3};
static maskarray_t via_unichrome_irqs[] = {
@@ -86,15 +85,15 @@ static maskarray_t via_unichrome_irqs[] = {
{VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}
};
-static int via_num_unichrome = sizeof(via_unichrome_irqs)/sizeof(maskarray_t);
+static int via_num_unichrome = ARRAY_SIZE(via_unichrome_irqs);
static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
static unsigned time_diff(struct timeval *now,struct timeval *then)
{
- return (now->tv_usec >= then->tv_usec) ?
- now->tv_usec - then->tv_usec :
- 1000000 - (then->tv_usec - now->tv_usec);
+ return (now->tv_usec >= then->tv_usec) ?
+ now->tv_usec - then->tv_usec :
+ 1000000 - (then->tv_usec - now->tv_usec);
}
irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
@@ -126,17 +125,17 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
}
if (!(atomic_read(&dev->vbl_received) & 0xFF)) {
DRM_DEBUG("US per vblank is: %u\n",
- dev_priv->usec_per_vblank);
+ dev_priv->usec_per_vblank);
}
DRM_WAKEUP(&dev->vbl_queue);
drm_vbl_send_signals(dev);
handled = 1;
}
- for (i=0; i<dev_priv->num_irqs; ++i) {
+ for (i = 0; i < dev_priv->num_irqs; ++i) {
if (status & cur_irq->pending_mask) {
- atomic_inc( &cur_irq->irq_received );
- DRM_WAKEUP( &cur_irq->irq_queue );
+ atomic_inc(&cur_irq->irq_received);
+ DRM_WAKEUP(&cur_irq->irq_queue);
handled = 1;
#ifdef VIA_HAVE_DMABLIT
if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
@@ -216,7 +215,7 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
return -EINVAL;
}
- if (irq >= drm_via_irq_num ) {
+ if (irq >= drm_via_irq_num) {
DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__,
irq);
return -EINVAL;
@@ -278,11 +277,11 @@ void via_driver_irq_preinstall(struct drm_device * dev)
dev_priv->irq_map = via_irqmap_unichrome;
}
- for(i=0; i < dev_priv->num_irqs; ++i) {
+ for (i = 0; i < dev_priv->num_irqs; ++i) {
atomic_set(&cur_irq->irq_received, 0);
cur_irq->enable_mask = dev_priv->irq_masks[i][0];
cur_irq->pending_mask = dev_priv->irq_masks[i][1];
- DRM_INIT_WAITQUEUE( &cur_irq->irq_queue );
+ DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
dev_priv->irq_enable_mask |= cur_irq->enable_mask;
dev_priv->irq_pending_mask |= cur_irq->pending_mask;
cur_irq++;
diff --git a/shared-core/via_verifier.c b/shared-core/via_verifier.c
index cfacd0ca..d2b69f74 100644
--- a/shared-core/via_verifier.c
+++ b/shared-core/via_verifier.c
@@ -249,10 +249,10 @@ eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words)
* Partially stolen from drm_memory.h
*/
-static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t * seq,
+static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq,
unsigned long offset,
unsigned long size,
- struct drm_device * dev)
+ struct drm_device *dev)
{
#ifdef __linux__
struct drm_map_list *r_list;
diff --git a/shared-core/via_verifier.h b/shared-core/via_verifier.h
index c50a8130..abdaa653 100644
--- a/shared-core/via_verifier.h
+++ b/shared-core/via_verifier.h
@@ -33,8 +33,6 @@ typedef enum {
tex_address
} drm_via_sequence_t;
-
-
typedef struct {
unsigned texture;
uint32_t z_addr;
@@ -45,7 +43,7 @@ typedef struct {
uint32_t tex_level_lo[2];
uint32_t tex_level_hi[2];
uint32_t tex_palette_size[2];
- uint32_t tex_npot[2];
+ uint32_t tex_npot[2];
drm_via_sequence_t unfinished;
int agp_texture;
int multitex;
@@ -56,9 +54,9 @@ typedef struct {
const uint32_t *buf_start;
} drm_via_state_t;
-extern int via_verify_command_stream(const uint32_t * buf, unsigned int size,
+extern int via_verify_command_stream(const uint32_t *buf, unsigned int size,
struct drm_device *dev, int agp);
-extern int via_parse_command_stream(struct drm_device *dev, const uint32_t * buf,
+extern int via_parse_command_stream(struct drm_device *dev, const uint32_t *buf,
unsigned int size);
#endif