summaryrefslogtreecommitdiff
path: root/shared
diff options
context:
space:
mode:
Diffstat (limited to 'shared')
-rw-r--r--shared/radeon.h8
-rw-r--r--shared/radeon_cp.c10
-rw-r--r--shared/radeon_drm.h20
-rw-r--r--shared/radeon_drv.h26
-rw-r--r--shared/radeon_state.c210
5 files changed, 269 insertions, 5 deletions
diff --git a/shared/radeon.h b/shared/radeon.h
index 80bfa0c1..28d12a3b 100644
--- a/shared/radeon.h
+++ b/shared/radeon.h
@@ -42,10 +42,10 @@
#define DRIVER_NAME "radeon"
#define DRIVER_DESC "ATI Radeon"
-#define DRIVER_DATE "20041207"
+#define DRIVER_DATE "20050125"
#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 13
+#define DRIVER_MINOR 14
#define DRIVER_PATCHLEVEL 0
/* Interface history:
@@ -84,6 +84,8 @@
* (No 3D support yet - just microcode loading).
* 1.13- Add packet R200_EMIT_TCL_POINT_SPRITE_CNTL for ARB_point_parameters
* - Add hyperz support, add hyperz flags to clear ioctl.
+ * 1.14- Add support for color tiling
+ * - Add R100/R200 surface allocation/free support
*/
#define DRIVER_IOCTLS \
[DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { radeon_cp_buffers, 1, 0 }, \
@@ -112,5 +114,7 @@
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_IRQ_EMIT)] = { radeon_irq_emit, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_IRQ_WAIT)] = { radeon_irq_wait, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_SETPARAM)] = { radeon_cp_setparam, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_SURF_ALLOC)] = { radeon_surface_alloc, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_SURF_FREE)] = { radeon_surface_free, 1, 0 }, \
#endif
diff --git a/shared/radeon_cp.c b/shared/radeon_cp.c
index 5e23452b..dfb8b734 100644
--- a/shared/radeon_cp.c
+++ b/shared/radeon_cp.c
@@ -1698,7 +1698,7 @@ int radeon_cp_stop( DRM_IOCTL_ARGS )
void radeon_do_release( drm_device_t *dev )
{
drm_radeon_private_t *dev_priv = dev->dev_private;
- int ret;
+ int i, ret;
if (dev_priv) {
@@ -1720,6 +1720,14 @@ void radeon_do_release( drm_device_t *dev )
if (dev_priv->mmio) /* remove this after permanent addmaps */
RADEON_WRITE( RADEON_GEN_INT_CNTL, 0 );
+ if (dev_priv->mmio) {/* remove all surfaces */
+ for (i = 0; i < RADEON_MAX_SURFACES; i++) {
+ RADEON_WRITE(RADEON_SURFACE0_INFO + 16*i, 0);
+ RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16*i, 0);
+ RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16*i, 0);
+ }
+ }
+
/* Free memory heap structures */
radeon_mem_takedown( &(dev_priv->gart_heap) );
radeon_mem_takedown( &(dev_priv->fb_heap) );
diff --git a/shared/radeon_drm.h b/shared/radeon_drm.h
index e086938f..1a9ffc97 100644
--- a/shared/radeon_drm.h
+++ b/shared/radeon_drm.h
@@ -231,6 +231,8 @@ typedef union {
#define RADEON_MAX_TEXTURE_LEVELS 12
#define RADEON_MAX_TEXTURE_UNITS 3
+#define RADEON_MAX_SURFACES 8
+
/* Blits have strict offset rules. All blit offset must be aligned on
* a 1K-byte boundary.
*/
@@ -365,6 +367,7 @@ typedef struct {
int pfState; /* number of 3d windows (0,1,2ormore) */
int pfCurrentPage; /* which buffer is being displayed? */
int crtc2_base; /* CRTC2 frame offset */
+ int tiling_enabled; /* set by drm, read by 2d + 3d clients */
} drm_radeon_sarea_t;
@@ -403,6 +406,8 @@ typedef struct {
#define DRM_RADEON_IRQ_WAIT 0x17
#define DRM_RADEON_CP_RESUME 0x18
#define DRM_RADEON_SETPARAM 0x19
+#define DRM_RADEON_SURF_ALLOC 0x1a
+#define DRM_RADEON_SURF_FREE 0x1b
#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START)
@@ -429,6 +434,8 @@ typedef struct {
#define DRM_IOCTL_RADEON_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_IRQ_WAIT, drm_radeon_irq_wait_t)
#define DRM_IOCTL_RADEON_CP_RESUME DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESUME)
#define DRM_IOCTL_RADEON_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SETPARAM, drm_radeon_setparam_t)
+#define DRM_IOCTL_RADEON_SURF_ALLOC DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_ALLOC, drm_radeon_surface_alloc_t)
+#define DRM_IOCTL_RADEON_SURF_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_FREE, drm_radeon_surface_free_t)
typedef struct drm_radeon_init {
enum {
@@ -629,6 +636,19 @@ typedef struct drm_radeon_setparam {
} drm_radeon_setparam_t;
#define RADEON_SETPARAM_FB_LOCATION 1 /* determined framebuffer location */
+#define RADEON_SETPARAM_SWITCH_TILING 2 /* enable/disable color tiling */
+
+/* 1.14: Clients can allocate/free a surface
+ */
+typedef struct drm_radeon_surface_alloc {
+ unsigned int address;
+ unsigned int size;
+ unsigned int flags;
+} drm_radeon_surface_alloc_t;
+
+typedef struct drm_radeon_surface_free {
+ unsigned int address;
+} drm_radeon_surface_free_t;
#endif
diff --git a/shared/radeon_drv.h b/shared/radeon_drv.h
index 60252db1..f48a95c1 100644
--- a/shared/radeon_drv.h
+++ b/shared/radeon_drv.h
@@ -112,6 +112,21 @@ struct mem_block {
DRMFILE filp; /* 0: free, -1: heap, other: real files */
};
+struct radeon_surface {
+ int refcount;
+ u32 lower;
+ u32 upper;
+ u32 flags;
+};
+
+struct radeon_virt_surface {
+ int surface_index;
+ u32 lower;
+ u32 upper;
+ u32 flags;
+ DRMFILE filp;
+};
+
typedef struct drm_radeon_private {
drm_radeon_ring_buffer_t ring;
@@ -187,11 +202,15 @@ typedef struct drm_radeon_private {
struct mem_block *fb_heap;
/* SW interrupt */
- wait_queue_head_t swi_queue;
- atomic_t swi_emitted;
+ wait_queue_head_t swi_queue;
+ atomic_t swi_emitted;
+
+ struct radeon_surface surfaces[RADEON_MAX_SURFACES];
+ struct radeon_virt_surface virt_surfaces[2*RADEON_MAX_SURFACES];
/* starting from here on, data is preserved accross an open */
uint32_t flags; /* see radeon_chip_flags */
+
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
struct radeon_i2c_chan i2c[4];
#endif
@@ -240,6 +259,8 @@ extern int radeon_mem_free( DRM_IOCTL_ARGS );
extern int radeon_mem_init_heap( DRM_IOCTL_ARGS );
extern void radeon_mem_takedown( struct mem_block **heap );
extern void radeon_mem_release( DRMFILE filp, struct mem_block *heap );
+extern int radeon_surface_alloc( DRM_IOCTL_ARGS );
+extern int radeon_surface_free( DRM_IOCTL_ARGS );
/* radeon_irq.c */
extern int radeon_irq_emit( DRM_IOCTL_ARGS );
@@ -513,6 +534,7 @@ extern void radeon_driver_irq_uninstall( drm_device_t *dev );
# define RADEON_SURF_TILE_MODE_16BIT_Z (3 << 16)
#define RADEON_SURFACE0_LOWER_BOUND 0x0b04
#define RADEON_SURFACE0_UPPER_BOUND 0x0b08
+# define RADEON_SURF_ADDRESS_FIXED_MASK (0x3ff << 0)
#define RADEON_SURFACE1_INFO 0x0b1c
#define RADEON_SURFACE1_LOWER_BOUND 0x0b14
#define RADEON_SURFACE1_UPPER_BOUND 0x0b18
diff --git a/shared/radeon_state.c b/shared/radeon_state.c
index 66efb5c8..15c9c6f8 100644
--- a/shared/radeon_state.c
+++ b/shared/radeon_state.c
@@ -1705,11 +1705,205 @@ static void radeon_cp_dispatch_stipple( drm_device_t *dev, u32 *stipple )
ADVANCE_RING();
}
+static void radeon_apply_surface_regs( int surf_index, drm_radeon_private_t *dev_priv )
+{
+ if (!dev_priv->mmio)
+ return;
+
+ radeon_do_cp_idle(dev_priv);
+
+ RADEON_WRITE( RADEON_SURFACE0_INFO + 16*surf_index,
+ dev_priv->surfaces[surf_index].flags );
+ RADEON_WRITE( RADEON_SURFACE0_LOWER_BOUND + 16*surf_index,
+ dev_priv->surfaces[surf_index].lower );
+ RADEON_WRITE( RADEON_SURFACE0_UPPER_BOUND + 16*surf_index,
+ dev_priv->surfaces[surf_index].upper );
+}
+
+/* Allocates a virtual surface
+ * doesn't always allocate a real surface, will stretch an existing
+ * surface when possible.
+ *
+ * Note that refcount can be at most 2, since during a free refcount=3
+ * might mean we have to allocate a new surface which might not always
+ * be available.
+ * For example : we allocate three contigous surfaces ABC. If B is
+ * freed, we suddenly need two surfaces to store A and C, which might
+ * not always be available.
+ */
+static int alloc_surface( drm_radeon_surface_alloc_t* new,
+ drm_radeon_private_t *dev_priv, DRMFILE filp )
+{
+ struct radeon_virt_surface *s;
+ int i;
+ int virt_surface_index;
+ uint32_t new_upper, new_lower;
+
+ new_lower = new->address;
+ new_upper = new_lower + new->size - 1;
+
+ /* sanity check */
+ if ((new_lower >= new_upper) || (new->flags == 0) || (new->size == 0) ||
+ ((new_upper & RADEON_SURF_ADDRESS_FIXED_MASK) != RADEON_SURF_ADDRESS_FIXED_MASK) ||
+ ((new_lower & RADEON_SURF_ADDRESS_FIXED_MASK) != 0))
+ return -1;
+
+ /* make sure there is no overlap with existing surfaces */
+ for (i = 0; i < RADEON_MAX_SURFACES; i++) {
+ if ((dev_priv->surfaces[i].refcount != 0) &&
+ (( (new_lower >= dev_priv->surfaces[i].lower) &&
+ (new_lower < dev_priv->surfaces[i].upper) ) ||
+ ( (new_lower < dev_priv->surfaces[i].lower) &&
+ (new_upper > dev_priv->surfaces[i].lower) )) )
+ return -1;
+ }
+
+ /* find a virtual surface */
+ for (i = 0; i < 2*RADEON_MAX_SURFACES; i++)
+ if (dev_priv->virt_surfaces[i].filp == 0)
+ break;
+ if (i == 2*RADEON_MAX_SURFACES)
+ return -1;
+ virt_surface_index = i;
+
+ /* try to reuse an existing surface */
+ for (i = 0; i < RADEON_MAX_SURFACES; i++) {
+ /* extend before */
+ if ((dev_priv->surfaces[i].refcount == 1) &&
+ (new->flags == dev_priv->surfaces[i].flags) &&
+ (new_upper + 1 == dev_priv->surfaces[i].lower)) {
+ s = &(dev_priv->virt_surfaces[virt_surface_index]);
+ s->surface_index = i;
+ s->lower = new_lower;
+ s->upper = new_upper;
+ s->flags = new->flags;
+ s->filp = filp;
+ dev_priv->surfaces[i].refcount++;
+ dev_priv->surfaces[i].lower = s->lower;
+ radeon_apply_surface_regs(s->surface_index, dev_priv);
+ return virt_surface_index;
+ }
+
+ /* extend after */
+ if ((dev_priv->surfaces[i].refcount == 1) &&
+ (new->flags == dev_priv->surfaces[i].flags) &&
+ (new_lower == dev_priv->surfaces[i].upper + 1)) {
+ s = &(dev_priv->virt_surfaces[virt_surface_index]);
+ s->surface_index = i;
+ s->lower = new_lower;
+ s->upper = new_upper;
+ s->flags = new->flags;
+ s->filp = filp;
+ dev_priv->surfaces[i].refcount++;
+ dev_priv->surfaces[i].upper = s->upper;
+ radeon_apply_surface_regs(s->surface_index, dev_priv);
+ return virt_surface_index;
+ }
+ }
+
+ /* okay, we need a new one */
+ for (i = 0; i < RADEON_MAX_SURFACES; i++) {
+ if (dev_priv->surfaces[i].refcount == 0) {
+ s = &(dev_priv->virt_surfaces[virt_surface_index]);
+ s->surface_index = i;
+ s->lower = new_lower;
+ s->upper = new_upper;
+ s->flags = new->flags;
+ s->filp = filp;
+ dev_priv->surfaces[i].refcount = 1;
+ dev_priv->surfaces[i].lower = s->lower;
+ dev_priv->surfaces[i].upper = s->upper;
+ dev_priv->surfaces[i].flags = s->flags;
+ radeon_apply_surface_regs(s->surface_index, dev_priv);
+ return virt_surface_index;
+ }
+ }
+
+ /* we didn't find anything */
+ return -1;
+}
+
+static int free_surface( DRMFILE filp, drm_radeon_private_t *dev_priv, int lower )
+{
+ struct radeon_virt_surface *s;
+ int i;
+ /* find the virtual surface */
+ for(i = 0; i < 2*RADEON_MAX_SURFACES; i++) {
+ s = &(dev_priv->virt_surfaces[i]);
+ if (s->filp) {
+ if ((lower == s->lower) && (filp == s->filp)) {
+ if (dev_priv->surfaces[s->surface_index].lower == s->lower)
+ dev_priv->surfaces[s->surface_index].lower = s->upper;
+
+ if (dev_priv->surfaces[s->surface_index].upper == s->upper)
+ dev_priv->surfaces[s->surface_index].upper = s->lower;
+
+ dev_priv->surfaces[s->surface_index].refcount--;
+ if (dev_priv->surfaces[s->surface_index].refcount == 0)
+ dev_priv->surfaces[s->surface_index].flags = 0;
+ s->filp = 0;
+ radeon_apply_surface_regs(s->surface_index, dev_priv);
+ return 0;
+ }
+ }
+ }
+ return 1;
+}
+
+static void radeon_surfaces_release( DRMFILE filp, drm_radeon_private_t *dev_priv )
+{
+ int i;
+ for( i = 0; i < 2*RADEON_MAX_SURFACES; i++)
+ {
+ if (dev_priv->virt_surfaces[i].filp == filp)
+ free_surface(filp, dev_priv, dev_priv->virt_surfaces[i].lower);
+ }
+}
/* ================================================================
* IOCTL functions
*/
+int radeon_surface_alloc( DRM_IOCTL_ARGS )
+{
+ DRM_DEVICE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_surface_alloc_t alloc;
+
+ if ( !dev_priv ) {
+ DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
+ return DRM_ERR(EINVAL);
+}
+
+ DRM_COPY_FROM_USER_IOCTL( alloc, (drm_radeon_surface_alloc_t __user *)data,
+ sizeof(alloc) );
+
+ if ( alloc_surface( &alloc, dev_priv, filp) == -1 )
+ return DRM_ERR(EINVAL);
+ else
+ return 0;
+}
+
+int radeon_surface_free( DRM_IOCTL_ARGS )
+{
+ DRM_DEVICE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_surface_free_t memfree;
+
+ if ( !dev_priv ) {
+ DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
+ return DRM_ERR(EINVAL);
+ }
+
+ DRM_COPY_FROM_USER_IOCTL( memfree, (drm_radeon_mem_free_t __user *)data,
+ sizeof(memfree) );
+
+ if ( free_surface( filp, dev_priv, memfree.address ) )
+ return DRM_ERR(EINVAL);
+ else
+ return 0;
+}
+
int radeon_cp_clear( DRM_IOCTL_ARGS )
{
DRM_DEVICE;
@@ -2712,6 +2906,20 @@ int radeon_cp_setparam( DRM_IOCTL_ARGS ) {
radeon_priv = filp_priv->driver_priv;
radeon_priv->radeon_fb_delta = dev_priv->fb_location - sp.value;
break;
+ case RADEON_SETPARAM_SWITCH_TILING:
+ if (sp.value == 0) {
+ DRM_DEBUG( "color tiling disabled\n" );
+ dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO;
+ dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO;
+ dev_priv->sarea_priv->tiling_enabled = 0;
+ }
+ else if (sp.value == 1) {
+ DRM_DEBUG( "color tiling enabled\n" );
+ dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO;
+ dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO;
+ dev_priv->sarea_priv->tiling_enabled = 1;
+ }
+ break;
default:
DRM_DEBUG( "Invalid parameter %d\n", sp.param );
return DRM_ERR( EINVAL );
@@ -2723,6 +2931,7 @@ int radeon_cp_setparam( DRM_IOCTL_ARGS ) {
/* When a client dies:
* - Check for and clean up flipped page state
* - Free any alloced GART memory.
+ * - Free any alloced radeon surfaces.
*
* DRM infrastructure takes care of reclaiming dma buffers.
*/
@@ -2735,6 +2944,7 @@ static void radeon_driver_prerelease(drm_device_t *dev, DRMFILE filp)
}
radeon_mem_release( filp, dev_priv->gart_heap );
radeon_mem_release( filp, dev_priv->fb_heap );
+ radeon_surfaces_release( filp, dev_priv );
}
}