summaryrefslogtreecommitdiff
path: root/shared-core
diff options
context:
space:
mode:
authorRoland Scheidegger <rscheidegger_lists@hispeed.ch>2005-01-26 17:48:59 +0000
committerRoland Scheidegger <rscheidegger_lists@hispeed.ch>2005-01-26 17:48:59 +0000
commit43c3223de690b892759901386d8dc936b0dfbad1 (patch)
treecbeed9ac6c6cc5777031d72b81b155b6f4c0c580 /shared-core
parent408376b2031cf301f1a8e35e89ceefc72f2fdc94 (diff)
(Stephane Marchesin,me) Add radeon framebuffer tiling support to radeon
drm. Add new ioctls to manage surfaces which cover the tiled areas
Diffstat (limited to 'shared-core')
-rw-r--r--shared-core/radeon_cp.c10
-rw-r--r--shared-core/radeon_drm.h21
-rw-r--r--shared-core/radeon_drv.h28
-rw-r--r--shared-core/radeon_state.c214
4 files changed, 269 insertions, 4 deletions
diff --git a/shared-core/radeon_cp.c b/shared-core/radeon_cp.c
index fdba7c70..6121d283 100644
--- a/shared-core/radeon_cp.c
+++ b/shared-core/radeon_cp.c
@@ -1689,7 +1689,7 @@ int radeon_cp_stop(DRM_IOCTL_ARGS)
void radeon_do_release(drm_device_t * dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
- int ret;
+ int i, ret;
if (dev_priv) {
@@ -1711,6 +1711,14 @@ void radeon_do_release(drm_device_t * dev)
if (dev_priv->mmio) /* remove this after permanent addmaps */
RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
+ if (dev_priv->mmio) {/* remove all surfaces */
+ for (i = 0; i < RADEON_MAX_SURFACES; i++) {
+ RADEON_WRITE(RADEON_SURFACE0_INFO + 16*i, 0);
+ RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16*i, 0);
+ RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16*i, 0);
+ }
+ }
+
/* Free memory heap structures */
radeon_mem_takedown(&(dev_priv->gart_heap));
radeon_mem_takedown(&(dev_priv->fb_heap));
diff --git a/shared-core/radeon_drm.h b/shared-core/radeon_drm.h
index 78c3e611..5b9d35b2 100644
--- a/shared-core/radeon_drm.h
+++ b/shared-core/radeon_drm.h
@@ -227,6 +227,8 @@ typedef union {
#define RADEON_MAX_TEXTURE_LEVELS 12
#define RADEON_MAX_TEXTURE_UNITS 3
+#define RADEON_MAX_SURFACES 8
+
/* Blits have strict offset rules. All blit offset must be aligned on
* a 1K-byte boundary.
*/
@@ -359,6 +361,7 @@ typedef struct {
int pfState; /* number of 3d windows (0,1,2ormore) */
int pfCurrentPage; /* which buffer is being displayed? */
int crtc2_base; /* CRTC2 frame offset */
+ int tiling_enabled; /* set by drm, read by 2d + 3d clients */
} drm_radeon_sarea_t;
/* WARNING: If you change any of these defines, make sure to change the
@@ -396,6 +399,8 @@ typedef struct {
#define DRM_RADEON_IRQ_WAIT 0x17
#define DRM_RADEON_CP_RESUME 0x18
#define DRM_RADEON_SETPARAM 0x19
+#define DRM_RADEON_SURF_ALLOC 0x1a
+#define DRM_RADEON_SURF_FREE 0x1b
#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START)
@@ -422,6 +427,8 @@ typedef struct {
#define DRM_IOCTL_RADEON_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_IRQ_WAIT, drm_radeon_irq_wait_t)
#define DRM_IOCTL_RADEON_CP_RESUME DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESUME)
#define DRM_IOCTL_RADEON_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SETPARAM, drm_radeon_setparam_t)
+#define DRM_IOCTL_RADEON_SURF_ALLOC DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_ALLOC, drm_radeon_surface_alloc_t)
+#define DRM_IOCTL_RADEON_SURF_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_FREE, drm_radeon_surface_free_t)
typedef struct drm_radeon_init {
enum {
@@ -619,5 +626,19 @@ typedef struct drm_radeon_setparam {
} drm_radeon_setparam_t;
#define RADEON_SETPARAM_FB_LOCATION 1 /* determined framebuffer location */
+#define RADEON_SETPARAM_SWITCH_TILING 2 /* enable/disable color tiling */
+
+/* 1.14: Clients can allocate/free a surface
+ */
+typedef struct drm_radeon_surface_alloc {
+ unsigned int address;
+ unsigned int size;
+ unsigned int flags;
+} drm_radeon_surface_alloc_t;
+
+typedef struct drm_radeon_surface_free {
+ unsigned int address;
+} drm_radeon_surface_free_t;
+
#endif
diff --git a/shared-core/radeon_drv.h b/shared-core/radeon_drv.h
index 30c3bcb6..bb1d30b2 100644
--- a/shared-core/radeon_drv.h
+++ b/shared-core/radeon_drv.h
@@ -42,7 +42,7 @@
#define DRIVER_NAME "radeon"
#define DRIVER_DESC "ATI Radeon"
-#define DRIVER_DATE "20041207"
+#define DRIVER_DATE "20050125"
/* Interface history:
*
@@ -80,10 +80,12 @@
* (No 3D support yet - just microcode loading).
* 1.13- Add packet R200_EMIT_TCL_POINT_SPRITE_CNTL for ARB_point_parameters
* - Add hyperz support, add hyperz flags to clear ioctl.
+ * 1.14- Add support for color tiling
+ * - Add R100/R200 surface allocation/free support
*/
#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 13
+#define DRIVER_MINOR 14
#define DRIVER_PATCHLEVEL 0
enum radeon_family {
@@ -163,6 +165,21 @@ struct mem_block {
DRMFILE filp; /* 0: free, -1: heap, other: real files */
};
+struct radeon_surface {
+ int refcount;
+ u32 lower;
+ u32 upper;
+ u32 flags;
+};
+
+struct radeon_virt_surface {
+ int surface_index;
+ u32 lower;
+ u32 upper;
+ u32 flags;
+ DRMFILE filp;
+};
+
typedef struct drm_radeon_private {
drm_radeon_ring_buffer_t ring;
@@ -241,8 +258,12 @@ typedef struct drm_radeon_private {
wait_queue_head_t swi_queue;
atomic_t swi_emitted;
+ struct radeon_surface surfaces[RADEON_MAX_SURFACES];
+ struct radeon_virt_surface virt_surfaces[2*RADEON_MAX_SURFACES];
+
/* starting from here on, data is preserved accross an open */
uint32_t flags; /* see radeon_chip_flags */
+
#ifdef __linux__
struct radeon_i2c_chan i2c[4];
#endif /* __linux__ */
@@ -291,6 +312,8 @@ extern int radeon_mem_free(DRM_IOCTL_ARGS);
extern int radeon_mem_init_heap(DRM_IOCTL_ARGS);
extern void radeon_mem_takedown(struct mem_block **heap);
extern void radeon_mem_release(DRMFILE filp, struct mem_block *heap);
+extern int radeon_surface_alloc(DRM_IOCTL_ARGS);
+extern int radeon_surface_free(DRM_IOCTL_ARGS);
/* radeon_irq.c */
extern int radeon_irq_emit(DRM_IOCTL_ARGS);
@@ -570,6 +593,7 @@ extern void radeon_driver_free_filp_priv(drm_device_t * dev,
# define RADEON_SURF_TILE_MODE_16BIT_Z (3 << 16)
#define RADEON_SURFACE0_LOWER_BOUND 0x0b04
#define RADEON_SURFACE0_UPPER_BOUND 0x0b08
+# define RADEON_SURF_ADDRESS_FIXED_MASK (0x3ff << 0)
#define RADEON_SURFACE1_INFO 0x0b1c
#define RADEON_SURFACE1_LOWER_BOUND 0x0b14
#define RADEON_SURFACE1_UPPER_BOUND 0x0b18
diff --git a/shared-core/radeon_state.c b/shared-core/radeon_state.c
index 86ed132c..c9eaf06e 100644
--- a/shared-core/radeon_state.c
+++ b/shared-core/radeon_state.c
@@ -58,7 +58,9 @@ drm_ioctl_desc_t radeon_ioctls[] = {
[DRM_IOCTL_NR(DRM_RADEON_INIT_HEAP)] = {radeon_mem_init_heap, 1, 1},
[DRM_IOCTL_NR(DRM_RADEON_IRQ_EMIT)] = {radeon_irq_emit, 1, 0},
[DRM_IOCTL_NR(DRM_RADEON_IRQ_WAIT)] = {radeon_irq_wait, 1, 0},
- [DRM_IOCTL_NR(DRM_RADEON_SETPARAM)] = {radeon_cp_setparam, 1, 0}
+ [DRM_IOCTL_NR(DRM_RADEON_SETPARAM)] = {radeon_cp_setparam, 1, 0},
+ [DRM_IOCTL_NR(DRM_RADEON_SURF_ALLOC)] = {radeon_surface_alloc, 1, 0},
+ [DRM_IOCTL_NR(DRM_RADEON_SURF_FREE)] = {radeon_surface_free, 1, 0}
};
int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);
@@ -1710,10 +1712,204 @@ static void radeon_cp_dispatch_stipple(drm_device_t * dev, u32 * stipple)
ADVANCE_RING();
}
+static void radeon_apply_surface_regs(int surf_index, drm_radeon_private_t *dev_priv)
+{
+ if (!dev_priv->mmio)
+ return;
+
+ radeon_do_cp_idle(dev_priv);
+
+ RADEON_WRITE(RADEON_SURFACE0_INFO + 16*surf_index,
+ dev_priv->surfaces[surf_index].flags);
+ RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16*surf_index,
+ dev_priv->surfaces[surf_index].lower);
+ RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16*surf_index,
+ dev_priv->surfaces[surf_index].upper);
+}
+
+/* Allocates a virtual surface
+ * doesn't always allocate a real surface, will stretch an existing
+ * surface when possible.
+ *
+ * Note that refcount can be at most 2, since during a free refcount=3
+ * might mean we have to allocate a new surface which might not always
+ * be available.
+ * For example : we allocate three contigous surfaces ABC. If B is
+ * freed, we suddenly need two surfaces to store A and C, which might
+ * not always be available.
+ */
+static int alloc_surface(drm_radeon_surface_alloc_t* new, drm_radeon_private_t *dev_priv, DRMFILE filp)
+{
+ struct radeon_virt_surface *s;
+ int i;
+ int virt_surface_index;
+ uint32_t new_upper, new_lower;
+
+ new_lower = new->address;
+ new_upper = new_lower + new->size - 1;
+
+ /* sanity check */
+ if ((new_lower >= new_upper) || (new->flags == 0) || (new->size == 0) ||
+ ((new_upper & RADEON_SURF_ADDRESS_FIXED_MASK) != RADEON_SURF_ADDRESS_FIXED_MASK) ||
+ ((new_lower & RADEON_SURF_ADDRESS_FIXED_MASK) != 0))
+ return -1;
+
+ /* make sure there is no overlap with existing surfaces */
+ for (i = 0; i < RADEON_MAX_SURFACES; i++) {
+ if ((dev_priv->surfaces[i].refcount != 0) &&
+ (( (new_lower >= dev_priv->surfaces[i].lower) &&
+ (new_lower < dev_priv->surfaces[i].upper) ) ||
+ ( (new_lower < dev_priv->surfaces[i].lower) &&
+ (new_upper > dev_priv->surfaces[i].lower) )) ){
+ return -1;}
+ }
+
+ /* find a virtual surface */
+ for (i = 0; i < 2*RADEON_MAX_SURFACES; i++)
+ if (dev_priv->virt_surfaces[i].filp == 0)
+ break;
+ if (i == 2*RADEON_MAX_SURFACES) {
+ return -1;}
+ virt_surface_index = i;
+
+ /* try to reuse an existing surface */
+ for (i = 0; i < RADEON_MAX_SURFACES; i++) {
+ /* extend before */
+ if ((dev_priv->surfaces[i].refcount == 1) &&
+ (new->flags == dev_priv->surfaces[i].flags) &&
+ (new_upper + 1 == dev_priv->surfaces[i].lower)) {
+ s = &(dev_priv->virt_surfaces[virt_surface_index]);
+ s->surface_index = i;
+ s->lower = new_lower;
+ s->upper = new_upper;
+ s->flags = new->flags;
+ s->filp = filp;
+ dev_priv->surfaces[i].refcount++;
+ dev_priv->surfaces[i].lower = s->lower;
+ radeon_apply_surface_regs(s->surface_index, dev_priv);
+ return virt_surface_index;
+ }
+
+ /* extend after */
+ if ((dev_priv->surfaces[i].refcount == 1) &&
+ (new->flags == dev_priv->surfaces[i].flags) &&
+ (new_lower == dev_priv->surfaces[i].upper + 1)) {
+ s = &(dev_priv->virt_surfaces[virt_surface_index]);
+ s->surface_index = i;
+ s->lower = new_lower;
+ s->upper = new_upper;
+ s->flags = new->flags;
+ s->filp = filp;
+ dev_priv->surfaces[i].refcount++;
+ dev_priv->surfaces[i].upper = s->upper;
+ radeon_apply_surface_regs(s->surface_index, dev_priv);
+ return virt_surface_index;
+ }
+ }
+
+ /* okay, we need a new one */
+ for (i = 0; i < RADEON_MAX_SURFACES; i++) {
+ if (dev_priv->surfaces[i].refcount == 0) {
+ s = &(dev_priv->virt_surfaces[virt_surface_index]);
+ s->surface_index = i;
+ s->lower = new_lower;
+ s->upper = new_upper;
+ s->flags = new->flags;
+ s->filp = filp;
+ dev_priv->surfaces[i].refcount = 1;
+ dev_priv->surfaces[i].lower = s->lower;
+ dev_priv->surfaces[i].upper = s->upper;
+ dev_priv->surfaces[i].flags = s->flags;
+ radeon_apply_surface_regs(s->surface_index, dev_priv);
+ return virt_surface_index;
+ }
+ }
+
+ /* we didn't find anything */
+ return -1;
+}
+
+static int free_surface(DRMFILE filp, drm_radeon_private_t *dev_priv, int lower)
+{
+ struct radeon_virt_surface *s;
+ int i;
+ /* find the virtual surface */
+ for(i = 0; i < 2*RADEON_MAX_SURFACES; i++) {
+ s = &(dev_priv->virt_surfaces[i]);
+ if (s->filp) {
+ if ((lower == s->lower) && (filp == s->filp)) {
+ if (dev_priv->surfaces[s->surface_index].lower == s->lower)
+ dev_priv->surfaces[s->surface_index].lower = s->upper;
+
+ if (dev_priv->surfaces[s->surface_index].upper == s->upper)
+ dev_priv->surfaces[s->surface_index].upper = s->lower;
+
+ dev_priv->surfaces[s->surface_index].refcount--;
+ if (dev_priv->surfaces[s->surface_index].refcount == 0)
+ dev_priv->surfaces[s->surface_index].flags = 0;
+ s->filp = 0;
+ radeon_apply_surface_regs(s->surface_index, dev_priv);
+ return 0;
+ }
+ }
+ }
+ return 1;
+}
+
+static void radeon_surfaces_release(DRMFILE filp, drm_radeon_private_t *dev_priv)
+{
+ int i;
+ for( i = 0; i < 2*RADEON_MAX_SURFACES; i++)
+ {
+ if (dev_priv->virt_surfaces[i].filp == filp)
+ free_surface(filp, dev_priv, dev_priv->virt_surfaces[i].lower);
+ }
+}
+
/* ================================================================
* IOCTL functions
*/
+int radeon_surface_alloc(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_surface_alloc_t alloc;
+
+ if (!dev_priv) {
+ DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
+ return DRM_ERR(EINVAL);
+ }
+
+ DRM_COPY_FROM_USER_IOCTL(alloc, (drm_radeon_surface_alloc_t __user *)data,
+ sizeof(alloc));
+
+ if (alloc_surface(&alloc, dev_priv, filp) == -1)
+ return DRM_ERR(EINVAL);
+ else
+ return 0;
+}
+
+int radeon_surface_free(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_surface_free_t memfree;
+
+ if (!dev_priv) {
+ DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
+ return DRM_ERR(EINVAL);
+ }
+
+ DRM_COPY_FROM_USER_IOCTL(memfree, (drm_radeon_mem_free_t __user *)data,
+ sizeof(memfree) );
+
+ if (free_surface(filp, dev_priv, memfree.address))
+ return DRM_ERR(EINVAL);
+ else
+ return 0;
+}
+
int radeon_cp_clear(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
@@ -2704,6 +2900,20 @@ int radeon_cp_setparam(DRM_IOCTL_ARGS)
radeon_priv = filp_priv->driver_priv;
radeon_priv->radeon_fb_delta = dev_priv->fb_location - sp.value;
break;
+ case RADEON_SETPARAM_SWITCH_TILING:
+ if (sp.value == 0) {
+ DRM_DEBUG( "color tiling disabled\n" );
+ dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO;
+ dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO;
+ dev_priv->sarea_priv->tiling_enabled = 0;
+ }
+ else if (sp.value == 1) {
+ DRM_DEBUG( "color tiling enabled\n" );
+ dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO;
+ dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO;
+ dev_priv->sarea_priv->tiling_enabled = 1;
+ }
+ break;
default:
DRM_DEBUG("Invalid parameter %d\n", sp.param);
return DRM_ERR(EINVAL);
@@ -2715,6 +2925,7 @@ int radeon_cp_setparam(DRM_IOCTL_ARGS)
/* When a client dies:
* - Check for and clean up flipped page state
* - Free any alloced GART memory.
+ * - Free any alloced radeon surfaces.
*
* DRM infrastructure takes care of reclaiming dma buffers.
*/
@@ -2727,6 +2938,7 @@ void radeon_driver_prerelease(drm_device_t * dev, DRMFILE filp)
}
radeon_mem_release(filp, dev_priv->gart_heap);
radeon_mem_release(filp, dev_priv->fb_heap);
+ radeon_surfaces_release(filp, dev_priv);
}
}