diff options
Diffstat (limited to 'shared-core')
-rw-r--r-- | shared-core/drm.h | 4 | ||||
-rw-r--r-- | shared-core/i915_dma.c | 45 | ||||
-rw-r--r-- | shared-core/i915_drm.h | 2 | ||||
-rw-r--r-- | shared-core/mga_dma.c | 85 | ||||
-rw-r--r-- | shared-core/mga_irq.c | 9 | ||||
-rw-r--r-- | shared-core/mga_state.c | 12 | ||||
-rw-r--r-- | shared-core/r128_drv.h | 2 | ||||
-rw-r--r-- | shared-core/radeon_drv.h | 6 | ||||
-rw-r--r-- | shared-core/radeon_irq.c | 3 | ||||
-rw-r--r-- | shared-core/radeon_state.c | 8 | ||||
-rw-r--r-- | shared-core/savage_bci.c | 20 | ||||
-rw-r--r-- | shared-core/savage_state.c | 184 | ||||
-rw-r--r-- | shared-core/sis_drv.h | 2 | ||||
-rw-r--r-- | shared-core/via_3d_reg.h | 1 | ||||
-rw-r--r-- | shared-core/via_dma.c | 40 | ||||
-rw-r--r-- | shared-core/via_drm.h | 8 | ||||
-rw-r--r-- | shared-core/via_irq.c | 25 | ||||
-rw-r--r-- | shared-core/via_verifier.c | 4 | ||||
-rw-r--r-- | shared-core/via_verifier.h | 8 |
19 files changed, 226 insertions, 242 deletions
diff --git a/shared-core/drm.h b/shared-core/drm.h index 39414902..636c1217 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -646,7 +646,7 @@ struct drm_fence_arg { unsigned int signaled; unsigned int error; unsigned int sequence; - unsigned int pad64; + unsigned int pad64; uint64_t expand_pad[2]; /*Future expansion */ }; @@ -878,7 +878,7 @@ struct drm_bo_version_arg { struct drm_mm_type_arg { unsigned int mem_type; - unsigned int lock_flags; + unsigned int lock_flags; }; struct drm_mm_init_arg { diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index 8ce47e36..9aff752c 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -416,7 +416,7 @@ static int i915_emit_box(struct drm_device * dev, } /* XXX: Emitting the counter should really be moved to part of the IRQ - * emit. For now, do it in both places: + * emit. For now, do it in both places: */ void i915_emit_breadcrumb(struct drm_device *dev) @@ -1181,7 +1181,8 @@ static int i915_setparam(struct drm_device *dev, void *data, switch (param->param) { case I915_SETPARAM_USE_MI_BATCHBUFFER_START: - dev_priv->use_mi_batchbuffer_start = param->value; + if (!IS_I965G(dev)) + dev_priv->use_mi_batchbuffer_start = param->value; break; case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: dev_priv->tex_lru_log_granularity = param->value; @@ -1229,27 +1230,27 @@ static int i915_mmio(struct drm_device *dev, void *data, base = (u8 *) dev_priv->mmio_map->handle + e->offset; switch (mmio->read_write) { - case I915_MMIO_READ: - if (!(e->flag & I915_MMIO_MAY_READ)) - return -EINVAL; - for (i = 0; i < e->size / 4; i++) - buf[i] = I915_READ(e->offset + i * 4); - if (DRM_COPY_TO_USER(mmio->data, buf, e->size)) { - DRM_ERROR("DRM_COPY_TO_USER failed\n"); - return -EFAULT; - } - break; - - case I915_MMIO_WRITE: - if (!(e->flag & I915_MMIO_MAY_WRITE)) - return -EINVAL; - if(DRM_COPY_FROM_USER(buf, mmio->data, e->size)) { - DRM_ERROR("DRM_COPY_TO_USER failed\n"); + case I915_MMIO_READ: + if (!(e->flag & I915_MMIO_MAY_READ)) + return -EINVAL; + for (i = 0; i < e->size / 4; i++) + buf[i] = I915_READ(e->offset + i * 4); + if (DRM_COPY_TO_USER(mmio->data, buf, e->size)) { + DRM_ERROR("DRM_COPY_TO_USER failed\n"); + return -EFAULT; + } + break; + + case I915_MMIO_WRITE: + if (!(e->flag & I915_MMIO_MAY_WRITE)) + return -EINVAL; + if(DRM_COPY_FROM_USER(buf, mmio->data, e->size)) { + DRM_ERROR("DRM_COPY_TO_USER failed\n"); return -EFAULT; - } - for (i = 0; i < e->size / 4; i++) - I915_WRITE(e->offset + i * 4, buf[i]); - break; + } + for (i = 0; i < e->size / 4; i++) + I915_WRITE(e->offset + i * 4, buf[i]); + break; } return 0; } diff --git a/shared-core/i915_drm.h b/shared-core/i915_drm.h index 11dd6c62..8a3be4e0 100644 --- a/shared-core/i915_drm.h +++ b/shared-core/i915_drm.h @@ -314,7 +314,7 @@ typedef struct drm_i915_mmio_entry { unsigned int flag; unsigned int offset; unsigned int size; -}drm_i915_mmio_entry_t; +} drm_i915_mmio_entry_t; typedef struct drm_i915_mmio { unsigned int read_write:1; diff --git a/shared-core/mga_dma.c b/shared-core/mga_dma.c index 44b14945..67236b2d 100644 --- a/shared-core/mga_dma.c +++ b/shared-core/mga_dma.c @@ -46,7 +46,7 @@ #define MINIMAL_CLEANUP 0 #define FULL_CLEANUP 1 -static int mga_do_cleanup_dma(struct drm_device * dev, int full_cleanup); +static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup); /* ================================================================ * Engine control @@ -395,7 +395,7 @@ int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf) int mga_driver_load(struct drm_device *dev, unsigned long flags) { - drm_mga_private_t * dev_priv; + drm_mga_private_t *dev_priv; dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER); if (!dev_priv) @@ -436,10 +436,11 @@ int mga_driver_load(struct drm_device *dev, unsigned long flags) static int mga_do_agp_dma_bootstrap(struct drm_device *dev, drm_mga_dma_bootstrap_t * dma_bs) { - drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private; + drm_mga_private_t *const dev_priv = + (drm_mga_private_t *)dev->dev_private; unsigned int warp_size = mga_warp_microcode_size(dev_priv); int err; - unsigned offset; + unsigned offset; const unsigned secondary_size = dma_bs->secondary_bin_count * dma_bs->secondary_bin_size; const unsigned agp_size = (dma_bs->agp_size << 20); @@ -481,11 +482,10 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev, } } - /* Allocate and bind AGP memory. */ agp_req.size = agp_size; agp_req.type = 0; - err = drm_agp_alloc( dev, & agp_req ); + err = drm_agp_alloc(dev, &agp_req); if (err) { dev_priv->agp_size = 0; DRM_ERROR("Unable to allocate %uMB AGP memory\n", @@ -511,36 +511,36 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev, warp_size = PAGE_SIZE; offset = 0; - err = drm_addmap( dev, offset, warp_size, - _DRM_AGP, _DRM_READ_ONLY, & dev_priv->warp ); + err = drm_addmap(dev, offset, warp_size, + _DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp); if (err) { DRM_ERROR("Unable to map WARP microcode: %d\n", err); return err; } offset += warp_size; - err = drm_addmap( dev, offset, dma_bs->primary_size, - _DRM_AGP, _DRM_READ_ONLY, & dev_priv->primary ); + err = drm_addmap(dev, offset, dma_bs->primary_size, + _DRM_AGP, _DRM_READ_ONLY, & dev_priv->primary); if (err) { DRM_ERROR("Unable to map primary DMA region: %d\n", err); return err; } offset += dma_bs->primary_size; - err = drm_addmap( dev, offset, secondary_size, - _DRM_AGP, 0, & dev->agp_buffer_map ); + err = drm_addmap(dev, offset, secondary_size, + _DRM_AGP, 0, & dev->agp_buffer_map); if (err) { DRM_ERROR("Unable to map secondary DMA region: %d\n", err); return err; } - (void) memset( &req, 0, sizeof(req) ); + (void)memset( &req, 0, sizeof(req) ); req.count = dma_bs->secondary_bin_count; req.size = dma_bs->secondary_bin_size; req.flags = _DRM_AGP_BUFFER; req.agp_start = offset; - err = drm_addbufs_agp( dev, & req ); + err = drm_addbufs_agp(dev, &req); if (err) { DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err); return err; @@ -563,8 +563,8 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev, #endif offset += secondary_size; - err = drm_addmap( dev, offset, agp_size - offset, - _DRM_AGP, 0, & dev_priv->agp_textures ); + err = drm_addmap(dev, offset, agp_size - offset, + _DRM_AGP, 0, & dev_priv->agp_textures); if (err) { DRM_ERROR("Unable to map AGP texture region: %d\n", err); return err; @@ -606,7 +606,8 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev, static int mga_do_pci_dma_bootstrap(struct drm_device * dev, drm_mga_dma_bootstrap_t * dma_bs) { - drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private; + drm_mga_private_t *const dev_priv = + (drm_mga_private_t *) dev->dev_private; unsigned int warp_size = mga_warp_microcode_size(dev_priv); unsigned int primary_size; unsigned int bin_count; @@ -639,9 +640,8 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev, * alignment of the primary or secondary DMA buffers. */ - for ( primary_size = dma_bs->primary_size - ; primary_size != 0 - ; primary_size >>= 1 ) { + for (primary_size = dma_bs->primary_size; primary_size != 0; + primary_size >>= 1 ) { /* The proper alignment for this mapping is 0x04 */ err = drm_addmap(dev, 0, primary_size, _DRM_CONSISTENT, _DRM_READ_ONLY, &dev_priv->primary); @@ -657,18 +657,17 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev, if (dev_priv->primary->size != dma_bs->primary_size) { DRM_INFO("Primary DMA buffer size reduced from %u to %u.\n", dma_bs->primary_size, - (unsigned) dev_priv->primary->size); + (unsigned)dev_priv->primary->size); dma_bs->primary_size = dev_priv->primary->size; } - for ( bin_count = dma_bs->secondary_bin_count - ; bin_count > 0 - ; bin_count-- ) { - (void) memset( &req, 0, sizeof(req) ); + for (bin_count = dma_bs->secondary_bin_count; bin_count > 0; + bin_count-- ) { + (void)memset(&req, 0, sizeof(req)); req.count = bin_count; req.size = dma_bs->secondary_bin_size; - err = drm_addbufs_pci( dev, & req ); + err = drm_addbufs_pci(dev, &req); if (!err) { break; } @@ -696,12 +695,12 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev, } -static int mga_do_dma_bootstrap(struct drm_device * dev, - drm_mga_dma_bootstrap_t * dma_bs) +static int mga_do_dma_bootstrap(struct drm_device *dev, + drm_mga_dma_bootstrap_t *dma_bs) { const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev); int err; - drm_mga_private_t * const dev_priv = + drm_mga_private_t *const dev_priv = (drm_mga_private_t *) dev->dev_private; @@ -710,17 +709,17 @@ static int mga_do_dma_bootstrap(struct drm_device * dev, /* The first steps are the same for both PCI and AGP based DMA. Map * the cards MMIO registers and map a status page. */ - err = drm_addmap( dev, dev_priv->mmio_base, dev_priv->mmio_size, - _DRM_REGISTERS, _DRM_READ_ONLY, & dev_priv->mmio ); + err = drm_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size, + _DRM_REGISTERS, _DRM_READ_ONLY, & dev_priv->mmio); if (err) { DRM_ERROR("Unable to map MMIO region: %d\n", err); return err; } - err = drm_addmap( dev, 0, SAREA_MAX, _DRM_SHM, - _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL, - & dev_priv->status ); + err = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, + _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL, + & dev_priv->status); if (err) { DRM_ERROR("Unable to map status region: %d\n", err); return err; @@ -768,7 +767,7 @@ int mga_dma_bootstrap(struct drm_device *dev, void *data, drm_mga_dma_bootstrap_t *bootstrap = data; int err; static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 }; - const drm_mga_private_t * const dev_priv = + const drm_mga_private_t *const dev_priv = (drm_mga_private_t *) dev->dev_private; @@ -829,7 +828,7 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init) return -EINVAL; } - if (! dev_priv->used_new_dma_init) { + if (!dev_priv->used_new_dma_init) { dev_priv->dma_access = MGA_PAGPXFER; dev_priv->wagp_enable = MGA_WAGP_ENABLE; @@ -855,7 +854,8 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init) return -EINVAL; } dev->agp_buffer_token = init->buffers_offset; - dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); + dev->agp_buffer_map = + drm_core_findmap(dev, init->buffers_offset); if (!dev->agp_buffer_map) { DRM_ERROR("failed to find dma buffer region!\n"); return -EINVAL; @@ -898,10 +898,6 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init) /* Init the primary DMA registers. */ MGA_WRITE(MGA_PRIMADDRESS, dev_priv->primary->offset | MGA_DMA_GENERAL); -#if 0 - MGA_WRITE(MGA_PRIMPTR, virt_to_bus((void *)dev_priv->prim.status) | MGA_PRIMPTREN0 | /* Soft trap, SECEND, SETUPEND */ - MGA_PRIMPTREN1); /* DWGSYNC */ -#endif dev_priv->prim.start = (u8 *) dev_priv->primary->handle; dev_priv->prim.end = ((u8 *) dev_priv->primary->handle @@ -932,7 +928,7 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init) return 0; } -static int mga_do_cleanup_dma(struct drm_device * dev, int full_cleanup) +static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup) { int err = 0; DRM_DEBUG("\n"); @@ -993,7 +989,8 @@ static int mga_do_cleanup_dma(struct drm_device * dev, int full_cleanup) memset(&dev_priv->prim, 0, sizeof(dev_priv->prim)); dev_priv->warp_pipe = 0; - memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys)); + memset(dev_priv->warp_pipe_phys, 0, + sizeof(dev_priv->warp_pipe_phys)); if (dev_priv->head != NULL) { mga_freelist_cleanup(dev); @@ -1015,7 +1012,7 @@ int mga_dma_init(struct drm_device *dev, void *data, case MGA_INIT_DMA: err = mga_do_init_dma(dev, init); if (err) { - (void) mga_do_cleanup_dma(dev, FULL_CLEANUP); + (void)mga_do_cleanup_dma(dev, FULL_CLEANUP); } return err; case MGA_CLEANUP_DMA: diff --git a/shared-core/mga_irq.c b/shared-core/mga_irq.c index c18bae9f..0f83577f 100644 --- a/shared-core/mga_irq.c +++ b/shared-core/mga_irq.c @@ -57,7 +57,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS) /* SOFTRAP interrupt */ if (status & MGA_SOFTRAPEN) { const u32 prim_start = MGA_READ(MGA_PRIMADDRESS); - const u32 prim_end = MGA_READ(MGA_PRIMEND); + const u32 prim_end = MGA_READ(MGA_PRIMEND); MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR); @@ -65,7 +65,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS) /* In addition to clearing the interrupt-pending bit, we * have to write to MGA_PRIMEND to re-start the DMA operation. */ - if ( (prim_start & ~0x03) != (prim_end & ~0x03) ) { + if ((prim_start & ~0x03) != (prim_end & ~0x03)) { MGA_WRITE(MGA_PRIMEND, prim_end); } @@ -74,9 +74,8 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS) handled = 1; } - if ( handled ) { + if (handled) return IRQ_HANDLED; - } return IRQ_NONE; } @@ -131,7 +130,7 @@ void mga_driver_irq_postinstall(struct drm_device * dev) { drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; - DRM_INIT_WAITQUEUE( &dev_priv->fence_queue ); + DRM_INIT_WAITQUEUE(&dev_priv->fence_queue); /* Turn on vertical blank interrupt and soft trap interrupt. */ MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); diff --git a/shared-core/mga_state.c b/shared-core/mga_state.c index 0b8ba048..d9cc9d7e 100644 --- a/shared-core/mga_state.c +++ b/shared-core/mga_state.c @@ -62,8 +62,7 @@ static void mga_emit_clip_rect(drm_mga_private_t * dev_priv, } DMA_BLOCK(MGA_DMAPAD, 0x00000000, MGA_CXBNDRY, ((box->x2 - 1) << 16) | box->x1, - MGA_YTOP, box->y1 * pitch, - MGA_YBOT, (box->y2 - 1) * pitch); + MGA_YTOP, box->y1 * pitch, MGA_YBOT, (box->y2 - 1) * pitch); ADVANCE_DMA(); } @@ -78,18 +77,15 @@ static __inline__ void mga_g200_emit_context(drm_mga_private_t * dev_priv) DMA_BLOCK(MGA_DSTORG, ctx->dstorg, MGA_MACCESS, ctx->maccess, - MGA_PLNWT, ctx->plnwt, - MGA_DWGCTL, ctx->dwgctl); + MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl); DMA_BLOCK(MGA_ALPHACTRL, ctx->alphactrl, MGA_FOGCOL, ctx->fogcolor, - MGA_WFLAG, ctx->wflag, - MGA_ZORG, dev_priv->depth_offset); + MGA_WFLAG, ctx->wflag, MGA_ZORG, dev_priv->depth_offset); DMA_BLOCK(MGA_FCOL, ctx->fcol, MGA_DMAPAD, 0x00000000, - MGA_DMAPAD, 0x00000000, - MGA_DMAPAD, 0x00000000); + MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000); ADVANCE_DMA(); } diff --git a/shared-core/r128_drv.h b/shared-core/r128_drv.h index 4d27f549..b0558389 100644 --- a/shared-core/r128_drv.h +++ b/shared-core/r128_drv.h @@ -118,7 +118,7 @@ typedef struct drm_r128_private { drm_local_map_t *cce_ring; drm_local_map_t *ring_rptr; drm_local_map_t *agp_textures; - struct ati_pcigart_info gart_info; + struct drm_ati_pcigart_info gart_info; } drm_r128_private_t; typedef struct drm_r128_buf_priv { diff --git a/shared-core/radeon_drv.h b/shared-core/radeon_drv.h index 18a297ca..fcbdc2e2 100644 --- a/shared-core/radeon_drv.h +++ b/shared-core/radeon_drv.h @@ -291,11 +291,11 @@ typedef struct drm_radeon_private { int irq_enabled; struct radeon_surface surfaces[RADEON_MAX_SURFACES]; - struct radeon_virt_surface virt_surfaces[2*RADEON_MAX_SURFACES]; + struct radeon_virt_surface virt_surfaces[2 * RADEON_MAX_SURFACES]; unsigned long pcigart_offset; unsigned int pcigart_offset_set; - struct ati_pcigart_info gart_info; + struct drm_ati_pcigart_info gart_info; u32 scratch_ages[5]; @@ -395,7 +395,7 @@ extern void r300_init_reg_flags(void); extern int r300_do_cp_cmdbuf(struct drm_device *dev, struct drm_file *file_priv, - drm_radeon_kcmd_buffer_t* cmdbuf); + drm_radeon_kcmd_buffer_t *cmdbuf); /* Flags for stats.boxes */ diff --git a/shared-core/radeon_irq.c b/shared-core/radeon_irq.c index 1ece6399..2b2407ee 100644 --- a/shared-core/radeon_irq.c +++ b/shared-core/radeon_irq.c @@ -145,8 +145,7 @@ static int radeon_wait_irq(struct drm_device * dev, int swi_nr) } static int radeon_driver_vblank_do_wait(struct drm_device * dev, - unsigned int *sequence, - int crtc) + unsigned int *sequence, int crtc) { drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; diff --git a/shared-core/radeon_state.c b/shared-core/radeon_state.c index 4c85371e..623576ad 100644 --- a/shared-core/radeon_state.c +++ b/shared-core/radeon_state.c @@ -3157,7 +3157,7 @@ static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_fil * * DRM infrastructure takes care of reclaiming dma buffers. */ -void radeon_driver_preclose(struct drm_device * dev, +void radeon_driver_preclose(struct drm_device *dev, struct drm_file *file_priv) { if (dev->dev_private) { @@ -3169,7 +3169,7 @@ void radeon_driver_preclose(struct drm_device * dev, } } -void radeon_driver_lastclose(struct drm_device * dev) +void radeon_driver_lastclose(struct drm_device *dev) { if (dev->dev_private) { drm_radeon_private_t *dev_priv = dev->dev_private; @@ -3182,7 +3182,7 @@ void radeon_driver_lastclose(struct drm_device * dev) radeon_do_release(dev); } -int radeon_driver_open(struct drm_device * dev, struct drm_file *file_priv) +int radeon_driver_open(struct drm_device *dev, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_radeon_driver_file_fields *radeon_priv; @@ -3204,7 +3204,7 @@ int radeon_driver_open(struct drm_device * dev, struct drm_file *file_priv) return 0; } -void radeon_driver_postclose(struct drm_device * dev, struct drm_file *file_priv) +void radeon_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) { struct drm_radeon_driver_file_fields *radeon_priv = file_priv->driver_priv; diff --git a/shared-core/savage_bci.c b/shared-core/savage_bci.c index 7710ba0d..4b8a89fe 100644 --- a/shared-core/savage_bci.c +++ b/shared-core/savage_bci.c @@ -364,7 +364,7 @@ uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n) unsigned int cur = dev_priv->current_dma_page; unsigned int rest = SAVAGE_DMA_PAGE_SIZE - dev_priv->dma_pages[cur].used; - unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE-1) / + unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE - 1) / SAVAGE_DMA_PAGE_SIZE; uint32_t *dma_ptr; unsigned int i; @@ -374,7 +374,7 @@ uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n) if (cur + nr_pages < dev_priv->nr_dma_pages) { dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + - cur*SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used; + cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used; if (n < rest) rest = n; dev_priv->dma_pages[cur].used += rest; @@ -383,7 +383,7 @@ uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n) } else { dev_priv->dma_flush(dev_priv); nr_pages = - (n + SAVAGE_DMA_PAGE_SIZE-1) / SAVAGE_DMA_PAGE_SIZE; + (n + SAVAGE_DMA_PAGE_SIZE - 1) / SAVAGE_DMA_PAGE_SIZE; for (i = cur; i < dev_priv->nr_dma_pages; ++i) { dev_priv->dma_pages[i].age = dev_priv->last_dma_age; dev_priv->dma_pages[i].used = 0; @@ -443,7 +443,7 @@ static void savage_dma_flush(drm_savage_private_t *dev_priv) uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used; dev_priv->dma_pages[cur].used += pad; - while(pad != 0) { + while (pad != 0) { *dma_ptr++ = BCI_CMD_WAIT; pad--; } @@ -587,12 +587,12 @@ int savage_driver_firstopen(struct drm_device *dev) dev_priv->mtrr[0].handle = drm_mtrr_add(dev_priv->mtrr[0].base, dev_priv->mtrr[0].size, DRM_MTRR_WC); - dev_priv->mtrr[1].base = fb_base+0x02000000; + dev_priv->mtrr[1].base = fb_base + 0x02000000; dev_priv->mtrr[1].size = 0x02000000; dev_priv->mtrr[1].handle = drm_mtrr_add(dev_priv->mtrr[1].base, dev_priv->mtrr[1].size, DRM_MTRR_WC); - dev_priv->mtrr[2].base = fb_base+0x04000000; + dev_priv->mtrr[2].base = fb_base + 0x04000000; dev_priv->mtrr[2].size = 0x04000000; dev_priv->mtrr[2].handle = drm_mtrr_add(dev_priv->mtrr[2].base, @@ -833,7 +833,7 @@ static int savage_do_init_bci(struct drm_device *dev, drm_savage_init_t *init) depth_tile_format = SAVAGE_BD_TILE_DEST; } front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp / 8); - back_stride = dev_priv->back_pitch / (dev_priv->fb_bpp / 8); + back_stride = dev_priv->back_pitch / (dev_priv->fb_bpp / 8); depth_stride = dev_priv->depth_pitch / (dev_priv->depth_bpp / 8); @@ -888,7 +888,7 @@ static int savage_do_init_bci(struct drm_device *dev, drm_savage_init_t *init) return -ENOMEM; } - if (savage_dma_init(dev_priv) < 0) { + if (savage_dma_init(dev_priv) < 0) { DRM_ERROR("could not initialize command DMA\n"); savage_do_cleanup_bci(dev); return -ENOMEM; @@ -983,7 +983,7 @@ static int savage_bci_event_wait(struct drm_device *dev, void *data, struct drm_ * - event counter wrapped since the event was emitted or * - the hardware has advanced up to or over the event to wait for. */ - if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e) ) + if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e)) return 0; else return dev_priv->wait_evnt(dev_priv, event_e); @@ -1065,8 +1065,6 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv) if (!dma->buflist) return; - /*i830_flush_queue(dev);*/ - for (i = 0; i < dma->buf_count; i++) { struct drm_buf *buf = dma->buflist[i]; drm_savage_buf_priv_t *buf_priv = buf->dev_private; diff --git a/shared-core/savage_state.c b/shared-core/savage_state.c index 61ec11cc..1c5a0e2e 100644 --- a/shared-core/savage_state.c +++ b/shared-core/savage_state.c @@ -30,23 +30,23 @@ void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv, const struct drm_clip_rect *pbox) { uint32_t scstart = dev_priv->state.s3d.new_scstart; - uint32_t scend = dev_priv->state.s3d.new_scend; + uint32_t scend = dev_priv->state.s3d.new_scend; scstart = (scstart & ~SAVAGE_SCISSOR_MASK_S3D) | ((uint32_t)pbox->x1 & 0x000007ff) | (((uint32_t)pbox->y1 << 16) & 0x07ff0000); - scend = (scend & ~SAVAGE_SCISSOR_MASK_S3D) | - (((uint32_t)pbox->x2-1) & 0x000007ff) | - ((((uint32_t)pbox->y2-1) << 16) & 0x07ff0000); + scend = (scend & ~SAVAGE_SCISSOR_MASK_S3D) | + (((uint32_t)pbox->x2 - 1) & 0x000007ff) | + ((((uint32_t)pbox->y2 - 1) << 16) & 0x07ff0000); if (scstart != dev_priv->state.s3d.scstart || scend != dev_priv->state.s3d.scend) { DMA_LOCALS; BEGIN_DMA(4); - DMA_WRITE(BCI_CMD_WAIT|BCI_CMD_WAIT_3D); + DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D); DMA_SET_REGISTERS(SAVAGE_SCSTART_S3D, 2); DMA_WRITE(scstart); DMA_WRITE(scend); dev_priv->state.s3d.scstart = scstart; - dev_priv->state.s3d.scend = scend; + dev_priv->state.s3d.scend = scend; dev_priv->waiting = 1; DMA_COMMIT(); } @@ -61,13 +61,13 @@ void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv, ((uint32_t)pbox->x1 & 0x000007ff) | (((uint32_t)pbox->y1 << 12) & 0x00fff000); drawctrl1 = (drawctrl1 & ~SAVAGE_SCISSOR_MASK_S4) | - (((uint32_t)pbox->x2-1) & 0x000007ff) | - ((((uint32_t)pbox->y2-1) << 12) & 0x00fff000); + (((uint32_t)pbox->x2 - 1) & 0x000007ff) | + ((((uint32_t)pbox->y2 - 1) << 12) & 0x00fff000); if (drawctrl0 != dev_priv->state.s4.drawctrl0 || drawctrl1 != dev_priv->state.s4.drawctrl1) { DMA_LOCALS; BEGIN_DMA(4); - DMA_WRITE(BCI_CMD_WAIT|BCI_CMD_WAIT_3D); + DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D); DMA_SET_REGISTERS(SAVAGE_DRAWCTRL0_S4, 2); DMA_WRITE(drawctrl0); DMA_WRITE(drawctrl1); @@ -87,8 +87,8 @@ static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit, } if (!(addr & 1)) { /* local */ addr &= ~7; - if (addr < dev_priv->texture_offset || - addr >= dev_priv->texture_offset+dev_priv->texture_size) { + if (addr < dev_priv->texture_offset || + addr >= dev_priv->texture_offset + dev_priv->texture_size) { DRM_ERROR ("bad texAddr%d %08x (local addr out of range)\n", unit, addr); @@ -114,10 +114,10 @@ static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit, } #define SAVE_STATE(reg,where) \ - if(start <= reg && start+count > reg) \ + if(start <= reg && start + count > reg) \ dev_priv->state.where = regs[reg - start] #define SAVE_STATE_MASK(reg,where,mask) do { \ - if(start <= reg && start+count > reg) { \ + if(start <= reg && start + count > reg) { \ uint32_t tmp; \ tmp = regs[reg - start]; \ dev_priv->state.where = (tmp & (mask)) | \ @@ -129,9 +129,9 @@ static int savage_verify_state_s3d(drm_savage_private_t *dev_priv, const uint32_t *regs) { if (start < SAVAGE_TEXPALADDR_S3D || - start+count-1 > SAVAGE_DESTTEXRWWATERMARK_S3D) { + start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) { DRM_ERROR("invalid register range (0x%04x-0x%04x)\n", - start, start+count-1); + start, start + count - 1); return -EINVAL; } @@ -142,7 +142,7 @@ static int savage_verify_state_s3d(drm_savage_private_t *dev_priv, /* if any texture regs were changed ... */ if (start <= SAVAGE_TEXCTRL_S3D && - start+count > SAVAGE_TEXPALADDR_S3D) { + start + count > SAVAGE_TEXPALADDR_S3D) { /* ... check texture state */ SAVE_STATE(SAVAGE_TEXCTRL_S3D, s3d.texctrl); SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr); @@ -161,9 +161,9 @@ static int savage_verify_state_s4(drm_savage_private_t *dev_priv, int ret = 0; if (start < SAVAGE_DRAWLOCALCTRL_S4 || - start+count-1 > SAVAGE_TEXBLENDCOLOR_S4) { + start + count - 1 > SAVAGE_TEXBLENDCOLOR_S4) { DRM_ERROR("invalid register range (0x%04x-0x%04x)\n", - start, start+count-1); + start, start + count - 1); return -EINVAL; } @@ -212,14 +212,14 @@ static int savage_dispatch_state(drm_savage_private_t *dev_priv, return ret; /* scissor regs are emitted in savage_dispatch_draw */ if (start < SAVAGE_SCSTART_S3D) { - if (start+count > SAVAGE_SCEND_S3D+1) - count2 = count - (SAVAGE_SCEND_S3D+1 - start); - if (start+count > SAVAGE_SCSTART_S3D) + if (start + count > SAVAGE_SCEND_S3D + 1) + count2 = count - (SAVAGE_SCEND_S3D + 1 - start); + if (start + count > SAVAGE_SCSTART_S3D) count = SAVAGE_SCSTART_S3D - start; } else if (start <= SAVAGE_SCEND_S3D) { - if (start+count > SAVAGE_SCEND_S3D+1) { - count -= SAVAGE_SCEND_S3D+1 - start; - start = SAVAGE_SCEND_S3D+1; + if (start + count > SAVAGE_SCEND_S3D + 1) { + count -= SAVAGE_SCEND_S3D + 1 - start; + start = SAVAGE_SCEND_S3D + 1; } else return 0; } @@ -229,24 +229,24 @@ static int savage_dispatch_state(drm_savage_private_t *dev_priv, return ret; /* scissor regs are emitted in savage_dispatch_draw */ if (start < SAVAGE_DRAWCTRL0_S4) { - if (start+count > SAVAGE_DRAWCTRL1_S4+1) + if (start + count > SAVAGE_DRAWCTRL1_S4 + 1) count2 = count - (SAVAGE_DRAWCTRL1_S4 + 1 - start); - if (start+count > SAVAGE_DRAWCTRL0_S4) + if (start + count > SAVAGE_DRAWCTRL0_S4) count = SAVAGE_DRAWCTRL0_S4 - start; } else if (start <= SAVAGE_DRAWCTRL1_S4) { - if (start+count > SAVAGE_DRAWCTRL1_S4+1) { - count -= SAVAGE_DRAWCTRL1_S4+1 - start; - start = SAVAGE_DRAWCTRL1_S4+1; + if (start + count > SAVAGE_DRAWCTRL1_S4 + 1) { + count -= SAVAGE_DRAWCTRL1_S4 + 1 - start; + start = SAVAGE_DRAWCTRL1_S4 + 1; } else return 0; } } - bci_size = count + (count+254)/255 + count2 + (count2+254)/255; + bci_size = count + (count + 254) / 255 + count2 + (count2 + 254) / 255; if (cmd_header->state.global) { - BEGIN_DMA(bci_size+1); + BEGIN_DMA(bci_size + 1); DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D); dev_priv->waiting = 1; } else { @@ -286,8 +286,8 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv, BCI_LOCALS; if (!dmabuf) { - DRM_ERROR("called without dma buffers!\n"); - return -EINVAL; + DRM_ERROR("called without dma buffers!\n"); + return -EINVAL; } if (!n) @@ -337,9 +337,9 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv, } } - if (start + n > dmabuf->total/32) { + if (start + n > dmabuf->total / 32) { DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n", - start, start + n - 1, dmabuf->total/32); + start, start + n - 1, dmabuf->total / 32); return -EINVAL; } @@ -374,33 +374,33 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv, /* Need to reorder indices for correct flat * shading while preserving the clock sense * for correct culling. Only on Savage3D. */ - int reorder[3] = {-1, -1, -1}; - reorder[start%3] = 2; + int reorder[3] = { -1, -1, -1 }; + reorder[start % 3] = 2; - BEGIN_BCI((count+1+1)/2); - BCI_DRAW_INDICES_S3D(count, prim, start+2); + BEGIN_BCI((count + 1 + 1) / 2); + BCI_DRAW_INDICES_S3D(count, prim, start + 2); - for (i = start+1; i+1 < start+count; i += 2) + for (i = start + 1; i + 1 < start + count; i += 2) BCI_WRITE((i + reorder[i % 3]) | ((i + 1 + reorder[(i + 1) % 3]) << 16)); - if (i < start+count) - BCI_WRITE(i + reorder[i%3]); + if (i < start + count) + BCI_WRITE(i + reorder[i % 3]); } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { - BEGIN_BCI((count+1+1)/2); + BEGIN_BCI((count + 1 + 1) / 2); BCI_DRAW_INDICES_S3D(count, prim, start); - for (i = start+1; i+1 < start+count; i += 2) - BCI_WRITE(i | ((i+1) << 16)); - if (i < start+count) + for (i = start + 1; i + 1 < start + count; i += 2) + BCI_WRITE(i | ((i + 1) << 16)); + if (i < start + count) BCI_WRITE(i); } else { - BEGIN_BCI((count+2+1)/2); + BEGIN_BCI((count + 2 + 1) / 2); BCI_DRAW_INDICES_S4(count, prim, skip); - for (i = start; i+1 < start+count; i += 2) - BCI_WRITE(i | ((i+1) << 16)); - if (i < start+count) + for (i = start; i + 1 < start + count; i += 2) + BCI_WRITE(i | ((i + 1) << 16)); + if (i < start + count) BCI_WRITE(i); } @@ -479,9 +479,9 @@ static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv, return -EINVAL; } - if (start + n > vb_size / (vb_stride*4)) { + if (start + n > vb_size / (vb_stride * 4)) { DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n", - start, start + n - 1, vb_size / (vb_stride*4)); + start, start + n - 1, vb_size / (vb_stride * 4)); return -EINVAL; } @@ -493,28 +493,28 @@ static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv, /* Need to reorder vertices for correct flat * shading while preserving the clock sense * for correct culling. Only on Savage3D. */ - int reorder[3] = {-1, -1, -1}; - reorder[start%3] = 2; + int reorder[3] = { -1, -1, -1 }; + reorder[start % 3] = 2; - BEGIN_DMA(count*vtx_size+1); + BEGIN_DMA(count * vtx_size + 1); DMA_DRAW_PRIMITIVE(count, prim, skip); - for (i = start; i < start+count; ++i) { + for (i = start; i < start + count; ++i) { unsigned int j = i + reorder[i % 3]; - DMA_COPY(&vtxbuf[vb_stride*j], vtx_size); + DMA_COPY(&vtxbuf[vb_stride * j], vtx_size); } DMA_COMMIT(); } else { - BEGIN_DMA(count*vtx_size+1); + BEGIN_DMA(count * vtx_size + 1); DMA_DRAW_PRIMITIVE(count, prim, skip); if (vb_stride == vtx_size) { - DMA_COPY(&vtxbuf[vb_stride*start], - vtx_size*count); + DMA_COPY(&vtxbuf[vb_stride * start], + vtx_size * count); } else { - for (i = start; i < start+count; ++i) { - DMA_COPY(&vtxbuf[vb_stride*i], + for (i = start; i < start + count; ++i) { + DMA_COPY(&vtxbuf[vb_stride * i], vtx_size); } } @@ -544,8 +544,8 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv, BCI_LOCALS; if (!dmabuf) { - DRM_ERROR("called without dma buffers!\n"); - return -EINVAL; + DRM_ERROR("called without dma buffers!\n"); + return -EINVAL; } if (!n) @@ -623,9 +623,9 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv, /* check indices */ for (i = 0; i < count; ++i) { - if (idx[i] > dmabuf->total/32) { + if (idx[i] > dmabuf->total / 32) { DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", - i, idx[i], dmabuf->total/32); + i, idx[i], dmabuf->total / 32); return -EINVAL; } } @@ -634,31 +634,31 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv, /* Need to reorder indices for correct flat * shading while preserving the clock sense * for correct culling. Only on Savage3D. */ - int reorder[3] = {2, -1, -1}; + int reorder[3] = { 2, -1, -1 }; - BEGIN_BCI((count+1+1)/2); + BEGIN_BCI((count + 1 + 1) / 2); BCI_DRAW_INDICES_S3D(count, prim, idx[2]); - for (i = 1; i+1 < count; i += 2) + for (i = 1; i + 1 < count; i += 2) BCI_WRITE(idx[i + reorder[i % 3]] | (idx[i + 1 + reorder[(i + 1) % 3]] << 16)); if (i < count) - BCI_WRITE(idx[i + reorder[i%3]]); + BCI_WRITE(idx[i + reorder[i % 3]]); } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { - BEGIN_BCI((count+1+1)/2); + BEGIN_BCI((count + 1 + 1) / 2); BCI_DRAW_INDICES_S3D(count, prim, idx[0]); - for (i = 1; i+1 < count; i += 2) - BCI_WRITE(idx[i] | (idx[i+1] << 16)); + for (i = 1; i + 1 < count; i += 2) + BCI_WRITE(idx[i] | (idx[i + 1] << 16)); if (i < count) BCI_WRITE(idx[i]); } else { - BEGIN_BCI((count+2+1)/2); + BEGIN_BCI((count + 2 + 1) / 2); BCI_DRAW_INDICES_S4(count, prim, skip); - for (i = 0; i+1 < count; i += 2) - BCI_WRITE(idx[i] | (idx[i+1] << 16)); + for (i = 0; i + 1 < count; i += 2) + BCI_WRITE(idx[i] | (idx[i + 1] << 16)); if (i < count) BCI_WRITE(idx[i]); } @@ -743,9 +743,9 @@ static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv, /* Check indices */ for (i = 0; i < count; ++i) { - if (idx[i] > vb_size / (vb_stride*4)) { + if (idx[i] > vb_size / (vb_stride * 4)) { DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", - i, idx[i], vb_size / (vb_stride*4)); + i, idx[i], vb_size / (vb_stride * 4)); return -EINVAL; } } @@ -754,24 +754,24 @@ static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv, /* Need to reorder vertices for correct flat * shading while preserving the clock sense * for correct culling. Only on Savage3D. */ - int reorder[3] = {2, -1, -1}; + int reorder[3] = { 2, -1, -1 }; - BEGIN_DMA(count*vtx_size+1); + BEGIN_DMA(count * vtx_size + 1); DMA_DRAW_PRIMITIVE(count, prim, skip); for (i = 0; i < count; ++i) { unsigned int j = idx[i + reorder[i % 3]]; - DMA_COPY(&vtxbuf[vb_stride*j], vtx_size); + DMA_COPY(&vtxbuf[vb_stride * j], vtx_size); } DMA_COMMIT(); } else { - BEGIN_DMA(count*vtx_size+1); + BEGIN_DMA(count * vtx_size + 1); DMA_DRAW_PRIMITIVE(count, prim, skip); for (i = 0; i < count; ++i) { unsigned int j = idx[i]; - DMA_COPY(&vtxbuf[vb_stride*j], vtx_size); + DMA_COPY(&vtxbuf[vb_stride * j], vtx_size); } DMA_COMMIT(); @@ -823,12 +823,12 @@ static int savage_dispatch_clear(drm_savage_private_t *dev_priv, x = boxes[i].x1, y = boxes[i].y1; w = boxes[i].x2 - boxes[i].x1; h = boxes[i].y2 - boxes[i].y1; - BEGIN_DMA(nbufs*6); + BEGIN_DMA(nbufs * 6); for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) { if (!(flags & buf)) continue; DMA_WRITE(clear_cmd); - switch(buf) { + switch (buf) { case SAVAGE_FRONT: DMA_WRITE(dev_priv->front_offset); DMA_WRITE(dev_priv->front_bd); @@ -880,8 +880,8 @@ static int savage_dispatch_swap(drm_savage_private_t *dev_priv, DMA_WRITE(dev_priv->back_bd); DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1)); DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1)); - DMA_WRITE(BCI_W_H(boxes[i].x2-boxes[i].x1, - boxes[i].y2-boxes[i].y1)); + DMA_WRITE(BCI_W_H(boxes[i].x2 - boxes[i].x1, + boxes[i].y2 - boxes[i].y1)); DMA_COMMIT(); } @@ -973,7 +973,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_ if (cmdbuf->dma_idx > dma->buf_count) { DRM_ERROR ("vertex buffer index %u out of range (0-%u)\n", - cmdbuf->dma_idx, dma->buf_count-1); + cmdbuf->dma_idx, dma->buf_count - 1); return -EINVAL; } dmabuf = dma->buflist[cmdbuf->dma_idx]; @@ -1064,15 +1064,15 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_ case SAVAGE_CMD_DMA_PRIM: case SAVAGE_CMD_VB_PRIM: if (!first_draw_cmd) - first_draw_cmd = cmdbuf->cmd_addr-1; + first_draw_cmd = cmdbuf->cmd_addr - 1; cmdbuf->cmd_addr += j; i += j; break; default: if (first_draw_cmd) { - ret = savage_dispatch_draw ( + ret = savage_dispatch_draw( dev_priv, first_draw_cmd, - cmdbuf->cmd_addr-1, + cmdbuf->cmd_addr - 1, dmabuf, cmdbuf->vb_addr, cmdbuf->vb_size, cmdbuf->vb_stride, @@ -1134,7 +1134,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_ } if (first_draw_cmd) { - ret = savage_dispatch_draw ( + ret = savage_dispatch_draw( dev_priv, first_draw_cmd, cmdbuf->cmd_addr, dmabuf, cmdbuf->vb_addr, cmdbuf->vb_size, cmdbuf->vb_stride, cmdbuf->nbox, cmdbuf->box_addr); diff --git a/shared-core/sis_drv.h b/shared-core/sis_drv.h index a4a88fe1..db532f3c 100644 --- a/shared-core/sis_drv.h +++ b/shared-core/sis_drv.h @@ -84,8 +84,6 @@ extern int sis_final_context(struct drm_device * dev, int context); #endif - - extern struct drm_ioctl_desc sis_ioctls[]; extern int sis_max_ioctl; diff --git a/shared-core/via_3d_reg.h b/shared-core/via_3d_reg.h index cf61bb51..462375d5 100644 --- a/shared-core/via_3d_reg.h +++ b/shared-core/via_3d_reg.h @@ -1643,7 +1643,6 @@ #define HC_HAGPBpID_STOP 0x00000002 #define HC_HAGPBpH_MASK 0x00ffffff - #define VIA_VIDEO_HEADER5 0xFE040000 #define VIA_VIDEO_HEADER6 0xFE050000 #define VIA_VIDEO_HEADER7 0xFE060000 diff --git a/shared-core/via_dma.c b/shared-core/via_dma.c index 89753aa6..ab39f53c 100644 --- a/shared-core/via_dma.c +++ b/shared-core/via_dma.c @@ -54,11 +54,11 @@ *vb++ = (w2); \ dev_priv->dma_low += 8; -static void via_cmdbuf_start(drm_via_private_t * dev_priv); -static void via_cmdbuf_pause(drm_via_private_t * dev_priv); -static void via_cmdbuf_reset(drm_via_private_t * dev_priv); -static void via_cmdbuf_rewind(drm_via_private_t * dev_priv); -static int via_wait_idle(drm_via_private_t * dev_priv); +static void via_cmdbuf_start(drm_via_private_t *dev_priv); +static void via_cmdbuf_pause(drm_via_private_t *dev_priv); +static void via_cmdbuf_reset(drm_via_private_t *dev_priv); +static void via_cmdbuf_rewind(drm_via_private_t *dev_priv); +static int via_wait_idle(drm_via_private_t *dev_priv); static void via_pad_cache(drm_via_private_t *dev_priv, int qwords); @@ -110,7 +110,7 @@ via_cmdbuf_wait(drm_via_private_t * dev_priv, unsigned int size) if (count-- == 0) { DRM_ERROR ("via_cmdbuf_wait timed out hw %x cur_addr %x next_addr %x\n", - hw_addr, cur_addr, next_addr); + hw_addr, cur_addr, next_addr); return -1; } } while ((cur_addr < hw_addr) && (next_addr >= hw_addr)); @@ -450,7 +450,7 @@ static int via_hook_segment(drm_via_private_t * dev_priv, -static int via_wait_idle(drm_via_private_t * dev_priv) +static int via_wait_idle(drm_via_private_t *dev_priv) { int count = 10000000; @@ -462,7 +462,7 @@ static int via_wait_idle(drm_via_private_t * dev_priv) return count; } -static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type, +static uint32_t *via_align_cmd(drm_via_private_t *dev_priv, uint32_t cmd_type, uint32_t addr, uint32_t *cmd_addr_hi, uint32_t *cmd_addr_lo, int skip_wait) { @@ -472,11 +472,12 @@ static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type, uint32_t qw_pad_count; if (!skip_wait) - via_cmdbuf_wait(dev_priv, 2*CMDBUF_ALIGNMENT_SIZE); + via_cmdbuf_wait(dev_priv, 2 * CMDBUF_ALIGNMENT_SIZE); vb = via_get_dma(dev_priv); - VIA_OUT_RING_QW( HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) | - (VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16); + VIA_OUT_RING_QW(HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) | + (VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16); + agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr; qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) - ((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3); @@ -557,8 +558,8 @@ static void via_pad_cache(drm_via_private_t *dev_priv, int qwords) via_cmdbuf_wait(dev_priv, qwords + 2); vb = via_get_dma(dev_priv); - VIA_OUT_RING_QW( HC_HEADER2, HC_ParaType_NotTex << 16); - via_align_buffer(dev_priv,vb,qwords); + VIA_OUT_RING_QW(HC_HEADER2, HC_ParaType_NotTex << 16); + via_align_buffer(dev_priv, vb, qwords); } static inline void via_dummy_bitblt(drm_via_private_t * dev_priv) @@ -577,7 +578,7 @@ static void via_cmdbuf_jump(drm_via_private_t * dev_priv) volatile uint32_t *last_pause_ptr; agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr; - via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi, + via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi, &jump_addr_lo, 0); dev_priv->dma_wrap = dev_priv->dma_low; @@ -594,16 +595,15 @@ static void via_cmdbuf_jump(drm_via_private_t * dev_priv) via_dummy_bitblt(dev_priv); via_dummy_bitblt(dev_priv); - last_pause_ptr = via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi, + last_pause_ptr = via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi, &pause_addr_lo, 0) -1; - via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi, + via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi, &pause_addr_lo, 0); *last_pause_ptr = pause_addr_lo; - via_hook_segment( dev_priv, jump_addr_hi, jump_addr_lo, 0); + via_hook_segment(dev_priv, jump_addr_hi, jump_addr_lo, 0); } - static void via_cmdbuf_rewind(drm_via_private_t * dev_priv) { via_cmdbuf_jump(dev_priv); @@ -614,7 +614,7 @@ static void via_cmdbuf_flush(drm_via_private_t * dev_priv, uint32_t cmd_type) uint32_t pause_addr_lo, pause_addr_hi; via_align_cmd(dev_priv, cmd_type, 0, &pause_addr_hi, &pause_addr_lo, 0); - via_hook_segment( dev_priv, pause_addr_hi, pause_addr_lo, 0); + via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0); } @@ -653,7 +653,7 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file * count = 1000000; tmp_size = d_siz->size; - switch(d_siz->func) { + switch (d_siz->func) { case VIA_CMDBUF_SPACE: while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size) && count--) { diff --git a/shared-core/via_drm.h b/shared-core/via_drm.h index 7ee69d28..e6a8ec64 100644 --- a/shared-core/via_drm.h +++ b/shared-core/via_drm.h @@ -228,7 +228,7 @@ typedef enum { #define VIA_IRQ_FLAGS_MASK 0xF0000000 -enum drm_via_irqs{ +enum drm_via_irqs { drm_via_irq_hqv0 = 0, drm_via_irq_hqv1, drm_via_irq_dma0_dd, @@ -238,7 +238,7 @@ enum drm_via_irqs{ drm_via_irq_num }; -struct drm_via_wait_irq_request{ +struct drm_via_wait_irq_request { unsigned irq; via_irq_seq_type_t type; uint32_t sequence; @@ -270,9 +270,9 @@ typedef struct drm_via_dmablit { uint32_t fb_stride; unsigned char *mem_addr; - uint32_t mem_stride; + uint32_t mem_stride; - uint32_t flags; + uint32_t flags; int to_fb; drm_via_blitsync_t sync; diff --git a/shared-core/via_irq.c b/shared-core/via_irq.c index df91ab00..ec3f50bd 100644 --- a/shared-core/via_irq.c +++ b/shared-core/via_irq.c @@ -76,8 +76,7 @@ static maskarray_t via_pro_group_a_irqs[] = { {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1, VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}, }; -static int via_num_pro_group_a = - sizeof(via_pro_group_a_irqs)/sizeof(maskarray_t); +static int via_num_pro_group_a = ARRAY_SIZE(via_pro_group_a_irqs); static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3}; static maskarray_t via_unichrome_irqs[] = { @@ -86,15 +85,15 @@ static maskarray_t via_unichrome_irqs[] = { {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1, VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008} }; -static int via_num_unichrome = sizeof(via_unichrome_irqs)/sizeof(maskarray_t); +static int via_num_unichrome = ARRAY_SIZE(via_unichrome_irqs); static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1}; static unsigned time_diff(struct timeval *now,struct timeval *then) { - return (now->tv_usec >= then->tv_usec) ? - now->tv_usec - then->tv_usec : - 1000000 - (then->tv_usec - now->tv_usec); + return (now->tv_usec >= then->tv_usec) ? + now->tv_usec - then->tv_usec : + 1000000 - (then->tv_usec - now->tv_usec); } irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) @@ -126,17 +125,17 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) } if (!(atomic_read(&dev->vbl_received) & 0xFF)) { DRM_DEBUG("US per vblank is: %u\n", - dev_priv->usec_per_vblank); + dev_priv->usec_per_vblank); } DRM_WAKEUP(&dev->vbl_queue); drm_vbl_send_signals(dev); handled = 1; } - for (i=0; i<dev_priv->num_irqs; ++i) { + for (i = 0; i < dev_priv->num_irqs; ++i) { if (status & cur_irq->pending_mask) { - atomic_inc( &cur_irq->irq_received ); - DRM_WAKEUP( &cur_irq->irq_queue ); + atomic_inc(&cur_irq->irq_received); + DRM_WAKEUP(&cur_irq->irq_queue); handled = 1; #ifdef VIA_HAVE_DMABLIT if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) { @@ -216,7 +215,7 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc return -EINVAL; } - if (irq >= drm_via_irq_num ) { + if (irq >= drm_via_irq_num) { DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__, irq); return -EINVAL; @@ -278,11 +277,11 @@ void via_driver_irq_preinstall(struct drm_device * dev) dev_priv->irq_map = via_irqmap_unichrome; } - for(i=0; i < dev_priv->num_irqs; ++i) { + for (i = 0; i < dev_priv->num_irqs; ++i) { atomic_set(&cur_irq->irq_received, 0); cur_irq->enable_mask = dev_priv->irq_masks[i][0]; cur_irq->pending_mask = dev_priv->irq_masks[i][1]; - DRM_INIT_WAITQUEUE( &cur_irq->irq_queue ); + DRM_INIT_WAITQUEUE(&cur_irq->irq_queue); dev_priv->irq_enable_mask |= cur_irq->enable_mask; dev_priv->irq_pending_mask |= cur_irq->pending_mask; cur_irq++; diff --git a/shared-core/via_verifier.c b/shared-core/via_verifier.c index cfacd0ca..d2b69f74 100644 --- a/shared-core/via_verifier.c +++ b/shared-core/via_verifier.c @@ -249,10 +249,10 @@ eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words) * Partially stolen from drm_memory.h */ -static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t * seq, +static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq, unsigned long offset, unsigned long size, - struct drm_device * dev) + struct drm_device *dev) { #ifdef __linux__ struct drm_map_list *r_list; diff --git a/shared-core/via_verifier.h b/shared-core/via_verifier.h index c50a8130..abdaa653 100644 --- a/shared-core/via_verifier.h +++ b/shared-core/via_verifier.h @@ -33,8 +33,6 @@ typedef enum { tex_address } drm_via_sequence_t; - - typedef struct { unsigned texture; uint32_t z_addr; @@ -45,7 +43,7 @@ typedef struct { uint32_t tex_level_lo[2]; uint32_t tex_level_hi[2]; uint32_t tex_palette_size[2]; - uint32_t tex_npot[2]; + uint32_t tex_npot[2]; drm_via_sequence_t unfinished; int agp_texture; int multitex; @@ -56,9 +54,9 @@ typedef struct { const uint32_t *buf_start; } drm_via_state_t; -extern int via_verify_command_stream(const uint32_t * buf, unsigned int size, +extern int via_verify_command_stream(const uint32_t *buf, unsigned int size, struct drm_device *dev, int agp); -extern int via_parse_command_stream(struct drm_device *dev, const uint32_t * buf, +extern int via_parse_command_stream(struct drm_device *dev, const uint32_t *buf, unsigned int size); #endif |