summaryrefslogtreecommitdiff
path: root/shared
diff options
context:
space:
mode:
Diffstat (limited to 'shared')
-rw-r--r--shared/radeon.h82
-rw-r--r--shared/radeon_cp.c3
-rw-r--r--shared/radeon_drm.h59
-rw-r--r--shared/radeon_drv.h43
-rw-r--r--shared/radeon_irq.c214
-rw-r--r--shared/radeon_mem.c334
-rw-r--r--shared/radeon_state.c70
7 files changed, 785 insertions, 20 deletions
diff --git a/shared/radeon.h b/shared/radeon.h
index 885e4297..4cb00986 100644
--- a/shared/radeon.h
+++ b/shared/radeon.h
@@ -48,10 +48,10 @@
#define DRIVER_NAME "radeon"
#define DRIVER_DESC "ATI Radeon"
-#define DRIVER_DATE "20020611"
+#define DRIVER_DATE "20020828"
#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 5
+#define DRIVER_MINOR 6
#define DRIVER_PATCHLEVEL 0
/* Interface history:
@@ -68,6 +68,11 @@
* 1.5 - Add r200 packets to cmdbuf ioctl
* - Add r200 function to init ioctl
* - Add 'scalar2' instruction to cmdbuf
+ * 1.6 - Add static agp memory manager
+ * Add irq handler (won't be turned on unless X server knows to)
+ * Add irq ioctls and irq_active getparam.
+ * Add wait command for cmdbuf ioctl
+ * Add agp offset query for getparam
*/
#define DRIVER_IOCTLS \
[DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { radeon_cp_buffers, 1, 0 }, \
@@ -88,9 +93,18 @@
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_VERTEX2)] = { radeon_cp_vertex2, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_CMDBUF)] = { radeon_cp_cmdbuf, 1, 0 }, \
[DRM_IOCTL_NR(DRM_IOCTL_RADEON_GETPARAM)] = { radeon_cp_getparam, 1, 0 }, \
- [DRM_IOCTL_NR(DRM_IOCTL_RADEON_FLIP)] = { radeon_cp_flip, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_FLIP)] = { radeon_cp_flip, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_ALLOC)] = { radeon_mem_alloc, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_FREE)] = { radeon_mem_free, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_INIT_HEAP)] = { radeon_mem_init_heap, 1, 1 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_IRQ_EMIT)] = { radeon_irq_emit, 1, 0 }, \
+ [DRM_IOCTL_NR(DRM_IOCTL_RADEON_IRQ_WAIT)] = { radeon_irq_wait, 1, 0 },
-/* Driver customization:
+/* When a client dies:
+ * - Check for and clean up flipped page state
+ * - Free any alloced agp memory.
+ *
+ * DRM infrastructure takes care of reclaiming dma buffers.
*/
#define DRIVER_PRERELEASE() do { \
if ( dev->dev_private ) { \
@@ -98,22 +112,78 @@
if ( dev_priv->page_flipping ) { \
radeon_do_cleanup_pageflip( dev ); \
} \
+ radeon_mem_release( dev_priv->agp_heap ); \
} \
} while (0)
+/* On unloading the module:
+ * - Free memory heap structure
+ * - Remove mappings made at startup and free dev_private.
+ */
#define DRIVER_PRETAKEDOWN() do { \
- if ( dev->dev_private ) radeon_do_cleanup_cp( dev ); \
+ if ( dev->dev_private ) { \
+ drm_radeon_private_t *dev_priv = dev->dev_private; \
+ radeon_mem_takedown( &(dev_priv->agp_heap) ); \
+ radeon_do_cleanup_cp( dev ); \
+ } \
} while (0)
/* DMA customization:
*/
#define __HAVE_DMA 1
+
+#define __HAVE_DMA_IRQ 1
+#define __HAVE_DMA_IRQ_BH 1
+#define __HAVE_SHARED_IRQ 1
+
+#define DRIVER_PREINSTALL() do { \
+ drm_radeon_private_t *dev_priv = \
+ (drm_radeon_private_t *)dev->dev_private; \
+ u32 tmp; \
+ \
+ /* Clear bit if it's already high: */ \
+ tmp = RADEON_READ( RADEON_GEN_INT_STATUS ); \
+ tmp = tmp & RADEON_SW_INT_TEST_ACK; \
+ RADEON_WRITE( RADEON_GEN_INT_STATUS, tmp ); \
+ \
+ /* Disable *all* interrupts */ \
+ RADEON_WRITE( RADEON_GEN_INT_CNTL, 0 ); \
+} while (0)
+
+#ifdef __linux__
+#define IWH(x) init_waitqueue_head(x)
+#else
+#define IWH(x)
+#endif
+
+#define DRIVER_POSTINSTALL() do { \
+ drm_radeon_private_t *dev_priv = \
+ (drm_radeon_private_t *)dev->dev_private; \
+ \
+ atomic_set(&dev_priv->irq_received, 0); \
+ atomic_set(&dev_priv->irq_emitted, 0); \
+ IWH(&dev_priv->irq_queue); \
+ \
+ /* Turn on SW_INT only */ \
+ RADEON_WRITE( RADEON_GEN_INT_CNTL, \
+ RADEON_SW_INT_ENABLE ); \
+} while (0)
+
+#define DRIVER_UNINSTALL() do { \
+ drm_radeon_private_t *dev_priv = \
+ (drm_radeon_private_t *)dev->dev_private; \
+ if ( dev_priv ) { \
+ /* Disable *all* interrupts */ \
+ RADEON_WRITE( RADEON_GEN_INT_CNTL, 0 ); \
+ } \
+} while (0)
+
/* Buffer customization:
*/
#define DRIVER_BUF_PRIV_T drm_radeon_buf_priv_t
-#define DRIVER_AGP_BUFFERS_MAP( dev ) \
+#define DRIVER_AGP_BUFFERS_MAP( dev ) \
((drm_radeon_private_t *)((dev)->dev_private))->buffers
#endif
diff --git a/shared/radeon_cp.c b/shared/radeon_cp.c
index 86ea01bc..c0fb63ef 100644
--- a/shared/radeon_cp.c
+++ b/shared/radeon_cp.c
@@ -1412,6 +1412,9 @@ int radeon_cp_idle( DRM_IOCTL_ARGS )
LOCK_TEST_WITH_RETURN( dev );
+/* if (dev->irq) */
+/* radeon_emit_and_wait_irq( dev ); */
+
return radeon_do_cp_idle( dev_priv );
}
diff --git a/shared/radeon_drm.h b/shared/radeon_drm.h
index 6469bfb8..9a747e04 100644
--- a/shared/radeon_drm.h
+++ b/shared/radeon_drm.h
@@ -142,6 +142,9 @@
#define RADEON_CMD_PACKET3 5 /* emit hw packet */
#define RADEON_CMD_PACKET3_CLIP 6 /* emit hw packet wrapped in cliprects */
#define RADEON_CMD_SCALARS2 7 /* r200 stopgap */
+#define RADEON_CMD_WAIT 8 /* emit hw wait commands -- note:
+ * doesn't make the cpu wait, just
+ * the graphics hardware */
typedef union {
@@ -161,8 +164,14 @@ typedef union {
struct {
unsigned char cmd_type, buf_idx, pad0, pad1;
} dma;
+ struct {
+ unsigned char cmd_type, flags, pad0, pad1;
+ } wait;
} drm_radeon_cmd_header_t;
+#define RADEON_WAIT_2D 0x1
+#define RADEON_WAIT_3D 0x2
+
#define RADEON_FRONT 0x1
#define RADEON_BACK 0x2
@@ -364,6 +373,11 @@ typedef struct {
#define DRM_IOCTL_RADEON_CMDBUF DRM_IOW( 0x50, drm_radeon_cmd_buffer_t)
#define DRM_IOCTL_RADEON_GETPARAM DRM_IOWR(0x51, drm_radeon_getparam_t)
#define DRM_IOCTL_RADEON_FLIP DRM_IO( 0x52)
+#define DRM_IOCTL_RADEON_ALLOC DRM_IOWR( 0x53, drm_radeon_mem_alloc_t)
+#define DRM_IOCTL_RADEON_FREE DRM_IOW( 0x54, drm_radeon_mem_free_t)
+#define DRM_IOCTL_RADEON_INIT_HEAP DRM_IOW( 0x55, drm_radeon_mem_init_heap_t)
+#define DRM_IOCTL_RADEON_IRQ_EMIT DRM_IOWR( 0x56, drm_radeon_irq_emit_t)
+#define DRM_IOCTL_RADEON_IRQ_WAIT DRM_IOW( 0x57, drm_radeon_irq_wait_t)
typedef struct drm_radeon_init {
enum {
@@ -499,14 +513,51 @@ typedef struct drm_radeon_indirect {
/* 1.3: An ioctl to get parameters that aren't available to the 3d
* client any other way.
*/
-#define RADEON_PARAM_AGP_BUFFER_OFFSET 0x1
-#define RADEON_PARAM_LAST_FRAME 0x2
-#define RADEON_PARAM_LAST_DISPATCH 0x3
-#define RADEON_PARAM_LAST_CLEAR 0x4
+#define RADEON_PARAM_AGP_BUFFER_OFFSET 1 /* card offset of 1st agp buffer */
+#define RADEON_PARAM_LAST_FRAME 2
+#define RADEON_PARAM_LAST_DISPATCH 3
+#define RADEON_PARAM_LAST_CLEAR 4
+#define RADEON_PARAM_IRQ_ACTIVE 5
+#define RADEON_PARAM_AGP_BASE 6 /* card offset of agp base */
typedef struct drm_radeon_getparam {
int param;
int *value;
} drm_radeon_getparam_t;
+/* 1.6: Set up a memory manager for regions of shared memory:
+ */
+#define RADEON_MEM_REGION_AGP 1
+#define RADEON_MEM_REGION_FB 2
+
+typedef struct drm_radeon_mem_alloc {
+ int region;
+ int alignment;
+ int size;
+ int *region_offset; /* offset from start of fb or agp */
+} drm_radeon_mem_alloc_t;
+
+typedef struct drm_radeon_mem_free {
+ int region;
+ int region_offset;
+} drm_radeon_mem_free_t;
+
+typedef struct drm_radeon_mem_init_heap {
+ int region;
+ int size;
+ int start;
+} drm_radeon_mem_init_heap_t;
+
+
+/* 1.6: Userspace can request & wait on irq's:
+ */
+typedef struct drm_radeon_irq_emit {
+ int *irq_seq;
+} drm_radeon_irq_emit_t;
+
+typedef struct drm_radeon_irq_wait {
+ int irq_seq;
+} drm_radeon_irq_wait_t;
+
+
#endif
diff --git a/shared/radeon_drv.h b/shared/radeon_drv.h
index e012bbbd..efe3020c 100644
--- a/shared/radeon_drv.h
+++ b/shared/radeon_drv.h
@@ -61,6 +61,15 @@ typedef struct drm_radeon_depth_clear_t {
u32 se_cntl;
} drm_radeon_depth_clear_t;
+
+struct mem_block {
+ struct mem_block *next;
+ struct mem_block *prev;
+ int start;
+ int size;
+ int pid; /* 0: free, -1: heap, other: real pids */
+};
+
typedef struct drm_radeon_private {
drm_radeon_ring_buffer_t ring;
drm_radeon_sarea_t *sarea_priv;
@@ -126,6 +135,14 @@ typedef struct drm_radeon_private {
drm_map_t *ring_rptr;
drm_map_t *buffers;
drm_map_t *agp_textures;
+
+ struct mem_block *agp_heap;
+ struct mem_block *fb_heap;
+
+ wait_queue_head_t irq_queue;
+ atomic_t irq_received;
+ atomic_t irq_emitted;
+
} drm_radeon_private_t;
typedef struct drm_radeon_buf_priv {
@@ -164,6 +181,20 @@ extern int radeon_cp_cmdbuf( DRM_IOCTL_ARGS );
extern int radeon_cp_getparam( DRM_IOCTL_ARGS );
extern int radeon_cp_flip( DRM_IOCTL_ARGS );
+extern int radeon_mem_alloc( DRM_IOCTL_ARGS );
+extern int radeon_mem_free( DRM_IOCTL_ARGS );
+extern int radeon_mem_init_heap( DRM_IOCTL_ARGS );
+extern void radeon_mem_takedown( struct mem_block **heap );
+extern void radeon_mem_release( struct mem_block *heap );
+
+extern int radeon_irq_emit( DRM_IOCTL_ARGS );
+extern int radeon_irq_wait( DRM_IOCTL_ARGS );
+
+extern int radeon_emit_and_wait_irq(drm_device_t *dev);
+extern int radeon_wait_irq(drm_device_t *dev, int irq_nr);
+extern int radeon_emit_irq(drm_device_t *dev);
+
+
/* Flags for stats.boxes
*/
#define RADEON_BOX_DMA_IDLE 0x1
@@ -238,6 +269,16 @@ extern int radeon_cp_flip( DRM_IOCTL_ARGS );
? DRM_READ32( &dev_priv->scratch[(x)] ) \
: RADEON_READ( RADEON_SCRATCH_REG0 + 4*(x) ) )
+
+#define RADEON_GEN_INT_CNTL 0x0040
+# define RADEON_GUI_IDLE_INT_ENABLE (1 << 19)
+# define RADEON_SW_INT_ENABLE (1 << 25)
+
+#define RADEON_GEN_INT_STATUS 0x0044
+# define RADEON_GUI_IDLE_INT_TEST_ACK (1 << 19)
+# define RADEON_SW_INT_TEST_ACK (1 << 25)
+# define RADEON_SW_INT_FIRE (1 << 26)
+
#define RADEON_HOST_PATH_CNTL 0x0130
# define RADEON_HDP_SOFT_RESET (1 << 26)
# define RADEON_HDP_WC_TIMEOUT_MASK (7 << 28)
@@ -526,6 +567,8 @@ extern int radeon_cp_flip( DRM_IOCTL_ARGS );
#define RADEON_TXFORMAT_ARGB4444 5
#define RADEON_TXFORMAT_ARGB8888 6
#define RADEON_TXFORMAT_RGBA8888 7
+#define RADEON_TXFORMAT_VYUY422 10
+#define RADEON_TXFORMAT_YVYU422 11
#define R200_PP_TXCBLEND_0 0x2f00
#define R200_PP_TXCBLEND_1 0x2f10
diff --git a/shared/radeon_irq.c b/shared/radeon_irq.c
new file mode 100644
index 00000000..11609393
--- /dev/null
+++ b/shared/radeon_irq.c
@@ -0,0 +1,214 @@
+/* radeon_mem.c -- Simple agp/fb memory manager for radeon -*- linux-c -*-
+ *
+ * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
+ *
+ * The Weather Channel (TM) funded Tungsten Graphics to develop the
+ * initial release of the Radeon 8500 driver under the XFree86 license.
+ * This notice must be preserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "radeon.h"
+#include "drmP.h"
+#include "drm.h"
+#include "radeon_drm.h"
+#include "radeon_drv.h"
+
+/* Interrupts - Used for device synchronization and flushing in the
+ * following circumstances:
+ *
+ * - Exclusive FB access with hw idle:
+ * - Wait for GUI Idle (?) interrupt, then do normal flush.
+ *
+ * - Frame throttling, NV_fence:
+ * - Drop marker irq's into command stream ahead of time.
+ * - Wait on irq's with lock *not held*
+ * - Check each for termination condition
+ *
+ * - Internally in cp_getbuffer, etc:
+ * - as above, but wait with lock held???
+ *
+ * NOTE: These functions are misleadingly named -- the irq's aren't
+ * tied to dma at all, this is just a hangover from dri prehistory.
+ */
+
+void DRM(dma_service)( DRM_IRQ_ARGS )
+{
+ drm_device_t *dev = (drm_device_t *) arg;
+ drm_radeon_private_t *dev_priv =
+ (drm_radeon_private_t *)dev->dev_private;
+ u32 temp;
+
+ /* Need to wait for fifo to drain?
+ */
+ temp = RADEON_READ(RADEON_GEN_INT_STATUS);
+ temp = temp & RADEON_SW_INT_TEST_ACK;
+ if (temp == 0) return;
+ RADEON_WRITE(RADEON_GEN_INT_STATUS, temp);
+
+ atomic_inc(&dev_priv->irq_received);
+#ifdef __linux__
+ queue_task(&dev->tq, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+#endif /* __linux__ */
+#ifdef __FreeBSD__
+ taskqueue_enqueue(taskqueue_swi, &dev->task);
+#endif /* __FreeBSD__ */
+}
+
+void DRM(dma_immediate_bh)( DRM_TASKQUEUE_ARGS )
+{
+ drm_device_t *dev = (drm_device_t *) arg;
+ drm_radeon_private_t *dev_priv =
+ (drm_radeon_private_t *)dev->dev_private;
+
+#ifdef __linux__
+ wake_up_interruptible(&dev_priv->irq_queue);
+#endif /* __linux__ */
+#ifdef __FreeBSD__
+ wakeup( &dev_priv->irq_queue );
+#endif
+}
+
+
+int radeon_emit_irq(drm_device_t *dev)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ RING_LOCALS;
+
+ atomic_inc(&dev_priv->irq_emitted);
+
+ BEGIN_RING(2);
+ OUT_RING( CP_PACKET0( RADEON_GEN_INT_STATUS, 0 ) );
+ OUT_RING( RADEON_SW_INT_FIRE );
+ ADVANCE_RING();
+ COMMIT_RING();
+
+ return atomic_read(&dev_priv->irq_emitted);
+}
+
+
+int radeon_wait_irq(drm_device_t *dev, int irq_nr)
+{
+ drm_radeon_private_t *dev_priv =
+ (drm_radeon_private_t *)dev->dev_private;
+#ifdef __linux__
+ DECLARE_WAITQUEUE(entry, current);
+ unsigned long end = jiffies + HZ*3;
+#endif /* __linux__ */
+ int ret = 0;
+
+ if (atomic_read(&dev_priv->irq_received) >= irq_nr)
+ return 0;
+
+ dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+#ifdef __linux__
+ add_wait_queue(&dev_priv->irq_queue, &entry);
+
+ for (;;) {
+ current->state = TASK_INTERRUPTIBLE;
+ if (atomic_read(&dev_priv->irq_received) >= irq_nr)
+ break;
+ if((signed)(end - jiffies) <= 0) {
+ ret = -EBUSY; /* Lockup? Missed irq? */
+ break;
+ }
+ schedule_timeout(HZ*3);
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+ }
+
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&dev_priv->irq_queue, &entry);
+ return ret;
+#endif /* __linux__ */
+
+#ifdef __FreeBSD__
+ ret = tsleep( &dev_priv->irq_queue, PZERO | PCATCH, \
+ "rdnirq", 3*hz );
+ if ( (ret == EWOULDBLOCK) || (ret == EINTR) )
+ return DRM_ERR(EBUSY);
+ return ret;
+#endif /* __FreeBSD__ */
+}
+
+
+int radeon_emit_and_wait_irq(drm_device_t *dev)
+{
+ return radeon_wait_irq( dev, radeon_emit_irq(dev) );
+}
+
+
+/* Needs the lock as it touches the ring.
+ */
+int radeon_irq_emit( DRM_IOCTL_ARGS )
+{
+ DRM_DEVICE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_irq_emit_t emit;
+ int result;
+
+ LOCK_TEST_WITH_RETURN( dev );
+
+ if ( !dev_priv ) {
+ DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
+ return DRM_ERR(EINVAL);
+ }
+
+ DRM_COPY_FROM_USER_IOCTL( emit, (drm_radeon_irq_emit_t *)data,
+ sizeof(emit) );
+
+ result = radeon_emit_irq( dev );
+
+ if ( DRM_COPY_TO_USER( emit.irq_seq, &result, sizeof(int) ) ) {
+ DRM_ERROR( "copy_to_user\n" );
+ return DRM_ERR(EFAULT);
+ }
+
+ return 0;
+}
+
+
+/* Doesn't need the hardware lock.
+ */
+int radeon_irq_wait( DRM_IOCTL_ARGS )
+{
+ DRM_DEVICE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_irq_wait_t irqwait;
+
+ if ( !dev_priv ) {
+ DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
+ return DRM_ERR(EINVAL);
+ }
+
+ DRM_COPY_FROM_USER_IOCTL( irqwait, (drm_radeon_irq_wait_t *)data,
+ sizeof(irqwait) );
+
+ return radeon_wait_irq( dev, irqwait.irq_seq );
+}
+
diff --git a/shared/radeon_mem.c b/shared/radeon_mem.c
new file mode 100644
index 00000000..d77c60b9
--- /dev/null
+++ b/shared/radeon_mem.c
@@ -0,0 +1,334 @@
+/* radeon_mem.c -- Simple agp/fb memory manager for radeon -*- linux-c -*-
+ *
+ * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
+ *
+ * The Weather Channel (TM) funded Tungsten Graphics to develop the
+ * initial release of the Radeon 8500 driver under the XFree86 license.
+ * This notice must be preserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "radeon.h"
+#include "drmP.h"
+#include "drm.h"
+#include "radeon_drm.h"
+#include "radeon_drv.h"
+
+/* Very simple allocator for agp memory, working on a static range
+ * already mapped into each client's address space.
+ */
+
+static struct mem_block *split_block(struct mem_block *p, int start, int size,
+ int pid )
+{
+ /* Maybe cut off the start of an existing block */
+ if (start > p->start) {
+ struct mem_block *newblock = DRM_MALLOC(sizeof(*newblock));
+ if (!newblock)
+ goto out;
+ newblock->start = start;
+ newblock->size = p->size - (start - p->start);
+ newblock->pid = 0;
+ newblock->next = p->next;
+ newblock->prev = p;
+ p->next->prev = newblock;
+ p->next = newblock;
+ p->size -= newblock->size;
+ p = newblock;
+ }
+
+ /* Maybe cut off the end of an existing block */
+ if (size < p->size) {
+ struct mem_block *newblock = DRM_MALLOC(sizeof(*newblock));
+ if (!newblock)
+ goto out;
+ newblock->start = start + size;
+ newblock->size = p->size - size;
+ newblock->pid = 0;
+ newblock->next = p->next;
+ newblock->prev = p;
+ p->next->prev = newblock;
+ p->next = newblock;
+ p->size = size;
+ }
+
+ out:
+ /* Our block is in the middle */
+ p->pid = pid;
+ return p;
+}
+
+static struct mem_block *alloc_block( struct mem_block *heap, int size,
+ int align2, int pid )
+{
+ struct mem_block *p;
+ int mask = (1 << align2)-1;
+
+ for (p = heap->next ; p != heap ; p = p->next) {
+ int start = (p->start + mask) & ~mask;
+ if (p->pid == 0 && start + size <= p->start + p->size)
+ return split_block( p, start, size, pid );
+ }
+
+ return NULL;
+}
+
+static struct mem_block *find_block( struct mem_block *heap, int start )
+{
+ struct mem_block *p;
+
+ for (p = heap->next ; p != heap ; p = p->next)
+ if (p->start == start)
+ return p;
+
+ return NULL;
+}
+
+
+static void free_block( struct mem_block *p )
+{
+ p->pid = 0;
+
+ /* Assumes a single contiguous range. Needs a special pid in
+ * 'heap' to stop it being subsumed.
+ */
+ if (p->next->pid == 0) {
+ struct mem_block *q = p->next;
+ p->size += q->size;
+ p->next = q->next;
+ p->next->prev = p;
+ DRM_FREE(p);
+ }
+
+ if (p->prev->pid == 0) {
+ struct mem_block *q = p->prev;
+ q->size += p->size;
+ q->next = p->next;
+ q->next->prev = q;
+ DRM_FREE(p);
+ }
+}
+
+static void print_heap( struct mem_block *heap )
+{
+ struct mem_block *p;
+
+ for (p = heap->next ; p != heap ; p = p->next)
+ DRM_DEBUG("0x%x..0x%x (0x%x) -- owner %d\n",
+ p->start, p->start + p->size,
+ p->size, p->pid);
+}
+
+/* Initialize. How to check for an uninitialized heap?
+ */
+static int init_heap(struct mem_block **heap, int start, int size)
+{
+ struct mem_block *blocks = DRM_MALLOC(sizeof(*blocks));
+
+ if (!blocks)
+ return -ENOMEM;
+
+ *heap = DRM_MALLOC(sizeof(**heap));
+ if (!*heap) {
+ DRM_FREE( blocks );
+ return -ENOMEM;
+ }
+
+ blocks->start = start;
+ blocks->size = size;
+ blocks->pid = 0;
+ blocks->next = blocks->prev = *heap;
+
+ memset( *heap, 0, sizeof(**heap) );
+ (*heap)->pid = -1;
+ (*heap)->next = (*heap)->prev = blocks;
+ return 0;
+}
+
+
+/* Free all blocks associated with the releasing pid.
+ */
+void radeon_mem_release( struct mem_block *heap )
+{
+ int pid = DRM_CURRENTPID;
+ struct mem_block *p;
+
+ if (!heap || !heap->next)
+ return;
+
+ for (p = heap->next ; p != heap ; p = p->next) {
+ if (p->pid == pid)
+ p->pid = 0;
+ }
+
+ /* Assumes a single contiguous range. Needs a special pid in
+ * 'heap' to stop it being subsumed.
+ */
+ for (p = heap->next ; p != heap ; p = p->next) {
+ while (p->pid == 0 && p->next->pid == 0) {
+ struct mem_block *q = p->next;
+ p->size += q->size;
+ p->next = q->next;
+ p->next->prev = p;
+ DRM_FREE(q);
+ }
+ }
+}
+
+/* Shutdown.
+ */
+void radeon_mem_takedown( struct mem_block **heap )
+{
+ struct mem_block *p;
+
+ if (!*heap)
+ return;
+
+ for (p = (*heap)->next ; p != *heap ; ) {
+ struct mem_block *q = p;
+ p = p->next;
+ DRM_FREE(q);
+ }
+
+ DRM_FREE( *heap );
+ *heap = 0;
+}
+
+
+
+/* IOCTL HANDLERS */
+
+static struct mem_block **get_heap( drm_radeon_private_t *dev_priv,
+ int region )
+{
+ switch( region ) {
+ case RADEON_MEM_REGION_AGP:
+ return &dev_priv->agp_heap;
+ case RADEON_MEM_REGION_FB:
+ return &dev_priv->fb_heap;
+ default:
+ return 0;
+ }
+}
+
+int radeon_mem_alloc( DRM_IOCTL_ARGS )
+{
+ DRM_DEVICE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_mem_alloc_t alloc;
+ struct mem_block *block, **heap;
+
+ if ( !dev_priv ) {
+ DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
+ return DRM_ERR(EINVAL);
+ }
+
+ DRM_COPY_FROM_USER_IOCTL( alloc, (drm_radeon_mem_alloc_t *)data,
+ sizeof(alloc) );
+
+ heap = get_heap( dev_priv, alloc.region );
+ if (!heap || !*heap)
+ return DRM_ERR(EFAULT);
+
+ /* Make things easier on ourselves: all allocations at least
+ * 4k aligned.
+ */
+ if (alloc.alignment < 12)
+ alloc.alignment = 12;
+
+ block = alloc_block( *heap, alloc.size, alloc.alignment,
+ DRM_CURRENTPID );
+
+ if (!block)
+ return DRM_ERR(ENOMEM);
+
+ if ( DRM_COPY_TO_USER( alloc.region_offset, &block->start,
+ sizeof(int) ) ) {
+ DRM_ERROR( "copy_to_user\n" );
+ return DRM_ERR(EFAULT);
+ }
+
+ return 0;
+}
+
+
+
+int radeon_mem_free( DRM_IOCTL_ARGS )
+{
+ DRM_DEVICE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_mem_free_t memfree;
+ struct mem_block *block, **heap;
+
+ if ( !dev_priv ) {
+ DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
+ return DRM_ERR(EINVAL);
+ }
+
+ DRM_COPY_FROM_USER_IOCTL( memfree, (drm_radeon_mem_free_t *)data,
+ sizeof(memfree) );
+
+ heap = get_heap( dev_priv, memfree.region );
+ if (!heap || !*heap)
+ return DRM_ERR(EFAULT);
+
+ block = find_block( *heap, memfree.region_offset );
+ if (!block)
+ return DRM_ERR(EFAULT);
+
+ if (block->pid != DRM_CURRENTPID)
+ return DRM_ERR(EPERM);
+
+ free_block( block );
+ return 0;
+}
+
+int radeon_mem_init_heap( DRM_IOCTL_ARGS )
+{
+ DRM_DEVICE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ drm_radeon_mem_init_heap_t initheap;
+ struct mem_block **heap;
+
+ if ( !dev_priv ) {
+ DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
+ return DRM_ERR(EINVAL);
+ }
+
+ DRM_COPY_FROM_USER_IOCTL( initheap, (drm_radeon_mem_init_heap_t *)data,
+ sizeof(initheap) );
+
+ heap = get_heap( dev_priv, initheap.region );
+ if (!heap)
+ return DRM_ERR(EFAULT);
+
+ if (*heap) {
+ DRM_ERROR("heap already initialized?");
+ return DRM_ERR(EFAULT);
+ }
+
+ return init_heap( heap, initheap.start, initheap.size );
+}
+
+
diff --git a/shared/radeon_state.c b/shared/radeon_state.c
index 6dbf6093..0172128b 100644
--- a/shared/radeon_state.c
+++ b/shared/radeon_state.c
@@ -719,7 +719,6 @@ static void radeon_cp_dispatch_swap( drm_device_t *dev )
RING_LOCALS;
DRM_DEBUG( "\n" );
-
/* Do some trivial performance monitoring...
*/
if (dev_priv->do_boxes)
@@ -1088,6 +1087,8 @@ static int radeon_cp_dispatch_texture( drm_device_t *dev,
case RADEON_TXFORMAT_ARGB1555:
case RADEON_TXFORMAT_RGB565:
case RADEON_TXFORMAT_ARGB4444:
+ case RADEON_TXFORMAT_VYUY422:
+ case RADEON_TXFORMAT_YVYU422:
format = RADEON_COLOR_FORMAT_RGB565;
tex_width = tex->width * 2;
blit_width = image->width * 2;
@@ -1226,6 +1227,7 @@ static int radeon_cp_dispatch_texture( drm_device_t *dev,
return ret;
}
+
static void radeon_cp_dispatch_stipple( drm_device_t *dev, u32 *stipple )
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -1306,6 +1308,9 @@ static int radeon_do_init_pageflip( drm_device_t *dev )
return 0;
}
+/* Called whenever a client dies, from DRM(release).
+ * NOTE: Lock isn't necessarily held when this is called!
+ */
int radeon_do_cleanup_pageflip( drm_device_t *dev )
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -1936,14 +1941,18 @@ static int radeon_emit_packet3_cliprect( drm_device_t *dev,
if ( i < cmdbuf->nbox ) {
if (DRM_COPY_FROM_USER_UNCHECKED( &box, &boxes[i], sizeof(box) ))
return DRM_ERR(EFAULT);
- /* FIXME The second and subsequent times round this loop, send a
- * WAIT_UNTIL_3D_IDLE before calling emit_clip_rect(). This
- * fixes a lockup on fast machines when sending several
- * cliprects with a cmdbuf, as when waving a 2D window over
- * a 3D window. Something in the commands from user space
- * seems to hang the card when they're sent several times
- * in a row. That would be the correct place to fix it but
- * this works around it until I can figure that out - Tim Smith */
+ /* FIXME The second and subsequent times round
+ * this loop, send a WAIT_UNTIL_3D_IDLE before
+ * calling emit_clip_rect(). This fixes a
+ * lockup on fast machines when sending
+ * several cliprects with a cmdbuf, as when
+ * waving a 2D window over a 3D
+ * window. Something in the commands from user
+ * space seems to hang the card when they're
+ * sent several times in a row. That would be
+ * the correct place to fix it but this works
+ * around it until I can figure that out - Tim
+ * Smith */
if ( i ) {
BEGIN_RING( 2 );
RADEON_WAIT_UNTIL_3D_IDLE();
@@ -1967,6 +1976,34 @@ static int radeon_emit_packet3_cliprect( drm_device_t *dev,
}
+static int radeon_emit_wait( drm_device_t *dev, int flags )
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ RING_LOCALS;
+
+ DRM_DEBUG("%s: %x\n", __FUNCTION__, flags);
+ switch (flags) {
+ case RADEON_WAIT_2D:
+ BEGIN_RING( 2 );
+ RADEON_WAIT_UNTIL_2D_IDLE();
+ ADVANCE_RING();
+ break;
+ case RADEON_WAIT_3D:
+ BEGIN_RING( 2 );
+ RADEON_WAIT_UNTIL_3D_IDLE();
+ ADVANCE_RING();
+ break;
+ case RADEON_WAIT_2D|RADEON_WAIT_3D:
+ BEGIN_RING( 2 );
+ RADEON_WAIT_UNTIL_IDLE();
+ ADVANCE_RING();
+ break;
+ default:
+ return DRM_ERR(EINVAL);
+ }
+
+ return 0;
+}
int radeon_cp_cmdbuf( DRM_IOCTL_ARGS )
{
@@ -1989,7 +2026,6 @@ int radeon_cp_cmdbuf( DRM_IOCTL_ARGS )
DRM_COPY_FROM_USER_IOCTL( cmdbuf, (drm_radeon_cmd_buffer_t *)data,
sizeof(cmdbuf) );
- DRM_DEBUG( "pid=%d\n", DRM_CURRENTPID );
RING_SPACE_TEST_WITH_RETURN( dev_priv );
VB_AGE_TEST_WITH_RETURN( dev_priv );
@@ -2080,6 +2116,14 @@ int radeon_cp_cmdbuf( DRM_IOCTL_ARGS )
return DRM_ERR(EINVAL);
}
break;
+
+ case RADEON_CMD_WAIT:
+ DRM_DEBUG("RADEON_CMD_WAIT\n");
+ if (radeon_emit_wait( dev, header.wait.flags )) {
+ DRM_ERROR("radeon_emit_wait failed\n");
+ return DRM_ERR(EINVAL);
+ }
+ break;
default:
DRM_ERROR("bad cmd_type %d at %p\n",
header.header.cmd_type,
@@ -2128,6 +2172,12 @@ int radeon_cp_getparam( DRM_IOCTL_ARGS )
dev_priv->stats.last_clear_reads++;
value = GET_SCRATCH( 2 );
break;
+ case RADEON_PARAM_IRQ_ACTIVE:
+ value = dev->irq ? 1 : 0;
+ break;
+ case RADEON_PARAM_AGP_BASE:
+ value = dev_priv->agp_vm_start;
+ break;
default:
return DRM_ERR(EINVAL);
}