summaryrefslogtreecommitdiff
path: root/shared-core
diff options
context:
space:
mode:
Diffstat (limited to 'shared-core')
-rw-r--r--shared-core/drm.h98
-rw-r--r--shared-core/i915_dma.c118
-rw-r--r--shared-core/i915_drm.h158
-rw-r--r--shared-core/i915_drv.h239
-rw-r--r--shared-core/i915_init.c278
-rw-r--r--shared-core/i915_irq.c108
-rw-r--r--shared-core/radeon_cp.c26
-rw-r--r--shared-core/radeon_drv.h4
8 files changed, 778 insertions, 251 deletions
diff --git a/shared-core/drm.h b/shared-core/drm.h
index 40653b4b..59cbbdd9 100644
--- a/shared-core/drm.h
+++ b/shared-core/drm.h
@@ -993,6 +993,93 @@ struct drm_mm_info_arg {
uint64_t p_size;
};
+struct drm_gem_create {
+ /**
+ * Requested size for the object.
+ *
+ * The (page-aligned) allocated size for the object will be returned.
+ */
+ uint64_t size;
+ /**
+ * Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ uint32_t handle;
+ uint32_t pad;
+};
+
+struct drm_gem_close {
+ /** Handle of the object to be closed. */
+ uint32_t handle;
+ uint32_t pad;
+};
+
+struct drm_gem_pread {
+ /** Handle for the object being read. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset into the object to read from */
+ uint64_t offset;
+ /** Length of data to read */
+ uint64_t size;
+ /** Pointer to write the data into. */
+ uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */
+};
+
+struct drm_gem_pwrite {
+ /** Handle for the object being written to. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset into the object to write to */
+ uint64_t offset;
+ /** Length of data to write */
+ uint64_t size;
+ /** Pointer to read the data from. */
+ uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */
+};
+
+struct drm_gem_mmap {
+ /** Handle for the object being mapped. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset in the object to map. */
+ uint64_t offset;
+ /**
+ * Length of data to map.
+ *
+ * The value will be page-aligned.
+ */
+ uint64_t size;
+ /** Returned pointer the data was mapped at */
+ uint64_t addr_ptr; /* void *, but pointers are not 32/64 compatible */
+};
+
+struct drm_gem_flink {
+ /** Handle for the object being named */
+ uint32_t handle;
+ /** Returned global name */
+ uint32_t name;
+};
+
+struct drm_gem_open {
+ /** Name of object being opened */
+ uint32_t name;
+ /** Returned handle for the object */
+ uint32_t handle;
+ /** Returned size of the object */
+ uint64_t size;
+};
+
+struct drm_gem_set_domain {
+ /** Handle for the object */
+ uint32_t handle;
+ /** New read domains */
+ uint32_t read_domains;
+ /** New write domain */
+ uint32_t write_domain;
+};
+#define DRM_GEM_DOMAIN_CPU 0x00000001
/*
* Drm mode setting
@@ -1209,7 +1296,7 @@ struct drm_mode_crtc_lut {
#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
-#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
+#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
@@ -1261,6 +1348,15 @@ struct drm_mode_crtc_lut {
#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
+#define DRM_IOCTL_GEM_CREATE DRM_IOWR(0x09, struct drm_gem_create)
+#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x0a, struct drm_gem_close)
+#define DRM_IOCTL_GEM_PREAD DRM_IOW (0x0b, struct drm_gem_pread)
+#define DRM_IOCTL_GEM_PWRITE DRM_IOW (0x0c, struct drm_gem_pwrite)
+#define DRM_IOCTL_GEM_MMAP DRM_IOWR(0x0d, struct drm_gem_mmap)
+#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0e, struct drm_gem_flink)
+#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0f, struct drm_gem_open)
+#define DRM_IOCTL_GEM_SET_DOMAIN DRM_IOW (0xb7, struct drm_gem_set_domain)
+
#define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, struct drm_mm_init_arg)
#define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, struct drm_mm_type_arg)
#define DRM_IOCTL_MM_LOCK DRM_IOWR(0xc2, struct drm_mm_type_arg)
diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c
index db857fbd..0cd920de 100644
--- a/shared-core/i915_dma.c
+++ b/shared-core/i915_dma.c
@@ -41,10 +41,14 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_ring_buffer *ring = &(dev_priv->ring);
u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+ u32 acthd_reg = IS_I965G(dev) ? I965REG_ACTHD : I915REG_ACTHD;
+ u32 last_acthd = I915_READ(acthd_reg);
+ u32 acthd;
int i;
for (i = 0; i < 10000; i++) {
ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+ acthd = I915_READ(acthd_reg);
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->Size;
@@ -54,13 +58,41 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
if (ring->head != last_head)
i = 0;
+ if (acthd != last_acthd)
+ i = 0;
+
last_head = ring->head;
- DRM_UDELAY(1);
+ last_acthd = acthd;
+ msleep_interruptible (10);
}
return -EBUSY;
}
+#if I915_RING_VALIDATE
+/**
+ * Validate the cached ring tail value
+ *
+ * If the X server writes to the ring and DRM doesn't
+ * reload the head and tail pointers, it will end up writing
+ * data to the wrong place in the ring, causing havoc.
+ */
+void i915_ring_validate(struct drm_device *dev, const char *func, int line)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
+ u32 tail = I915_READ(LP_RING+RING_TAIL) & HEAD_ADDR;
+ u32 head = I915_READ(LP_RING+RING_HEAD) & HEAD_ADDR;
+
+ if (tail != ring->tail) {
+ DRM_ERROR("%s:%d head sw %x, hw %x. tail sw %x hw %x\n",
+ func, line,
+ ring->head, head, ring->tail, tail);
+ BUG_ON(1);
+ }
+}
+#endif
+
void i915_kernel_lost_context(struct drm_device * dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -107,17 +139,16 @@ int i915_dma_cleanup(struct drm_device * dev)
I915_WRITE(0x02080, 0x1ffff000);
}
- if (dev_priv->status_gfx_addr) {
- dev_priv->status_gfx_addr = 0;
+ if (dev_priv->hws_agpoffset) {
+ dev_priv->hws_agpoffset = 0;
drm_core_ioremapfree(&dev_priv->hws_map, dev);
I915_WRITE(0x02080, 0x1ffff000);
}
-
return 0;
}
-#if defined(I915_HAVE_BUFFER)
+#if defined(I915_HAVE_BUFFER) && defined(DRI2)
#define DRI2_SAREA_BLOCK_TYPE(b) ((b) >> 16)
#define DRI2_SAREA_BLOCK_SIZE(b) ((b) & 0xffff)
#define DRI2_SAREA_BLOCK_NEXT(p) \
@@ -195,27 +226,22 @@ static int i915_initialize(struct drm_device * dev,
}
}
-
#ifdef I915_HAVE_BUFFER
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS;
}
#endif
- if (!dev_priv->ring.Size) {
- dev_priv->ring.Start = init->ring_start;
- dev_priv->ring.End = init->ring_end;
+ if (init->ring_size != 0) {
dev_priv->ring.Size = init->ring_size;
dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
-
dev_priv->ring.map.offset = init->ring_start;
dev_priv->ring.map.size = init->ring_size;
dev_priv->ring.map.type = 0;
dev_priv->ring.map.flags = 0;
dev_priv->ring.map.mtrr = 0;
-
drm_core_ioremap(&dev_priv->ring.map, dev);
-
+
if (dev_priv->ring.map.handle == NULL) {
i915_dma_cleanup(dev);
DRM_ERROR("can not ioremap virtual address for"
@@ -225,7 +251,6 @@ static int i915_initialize(struct drm_device * dev,
dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
}
-
dev_priv->cpp = init->cpp;
master_priv->sarea_priv->pf_current_page = 0;
@@ -251,10 +276,10 @@ static int i915_initialize(struct drm_device * dev,
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
}
- dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
+ dev_priv->hws_vaddr = dev_priv->status_page_dmah->vaddr;
dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+ memset(dev_priv->hws_vaddr, 0, PAGE_SIZE);
I915_WRITE(0x02080, dev_priv->dma_status_page);
}
@@ -264,8 +289,7 @@ static int i915_initialize(struct drm_device * dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
mutex_init(&dev_priv->cmdbuf_mutex);
}
-#endif
-#if defined(I915_HAVE_BUFFER)
+#ifdef DRI2
if (init->func == I915_INIT_DMA2) {
int ret = setup_dri2_sarea(dev, file_priv, init);
if (ret) {
@@ -274,7 +298,8 @@ static int i915_initialize(struct drm_device * dev,
return ret;
}
}
-#endif
+#endif /* DRI2 */
+#endif /* I915_HAVE_BUFFER */
return 0;
}
@@ -288,11 +313,6 @@ static int i915_dma_resume(struct drm_device * dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
- if (!dev_priv->mmio_map) {
- DRM_ERROR("can not find mmio map!\n");
- return -EINVAL;
- }
-
if (dev_priv->ring.map.handle == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
@@ -300,16 +320,16 @@ static int i915_dma_resume(struct drm_device * dev)
}
/* Program Hardware Status Page */
- if (!dev_priv->hw_status_page) {
+ if (!dev_priv->hws_vaddr) {
DRM_ERROR("Can not find hardware status page\n");
return -EINVAL;
}
- DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
+ DRM_DEBUG("hw status page @ %p\n", dev_priv->hws_vaddr);
- if (dev_priv->status_gfx_addr != 0)
- I915_WRITE(0x02080, dev_priv->status_gfx_addr);
+ if (dev_priv->hws_agpoffset != 0)
+ I915_WRITE(HWS_PGA, dev_priv->hws_agpoffset);
else
- I915_WRITE(0x02080, dev_priv->dma_status_page);
+ I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
DRM_DEBUG("Enabled hardware status page\n");
return 0;
@@ -456,9 +476,9 @@ static int i915_emit_cmds(struct drm_device *dev, int __user *buffer,
return 0;
}
-static int i915_emit_box(struct drm_device * dev,
- struct drm_clip_rect __user * boxes,
- int i, int DR1, int DR4)
+int i915_emit_box(struct drm_device * dev,
+ struct drm_clip_rect __user * boxes,
+ int i, int DR1, int DR4)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_clip_rect box;
@@ -514,7 +534,7 @@ void i915_emit_breadcrumb(struct drm_device *dev)
BEGIN_LP_RING(4);
OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(20);
+ OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(dev_priv->counter);
OUT_RING(0);
ADVANCE_LP_RING();
@@ -713,9 +733,19 @@ void i915_dispatch_flip(struct drm_device * dev, int planes, int sync)
int i915_quiescent(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
i915_kernel_lost_context(dev);
- return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
+ ret = i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
+ if (ret)
+ {
+ i915_kernel_lost_context (dev);
+ DRM_ERROR ("not quiescent head %08x tail %08x space %08x\n",
+ dev_priv->ring.head,
+ dev_priv->ring.tail,
+ dev_priv->ring.space);
+ }
+ return ret;
}
static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -1003,7 +1033,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
- dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
+ dev_priv->hws_agpoffset = hws->addr & (0x1ffff<<12);
dev_priv->hws_map.offset = dev->agp->base + hws->addr;
dev_priv->hws_map.size = 4*1024;
@@ -1014,18 +1044,18 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
drm_core_ioremap(&dev_priv->hws_map, dev);
if (dev_priv->hws_map.handle == NULL) {
i915_dma_cleanup(dev);
- dev_priv->status_gfx_addr = 0;
+ dev_priv->hws_agpoffset = 0;
DRM_ERROR("can not ioremap virtual address for"
" G33 hw status page\n");
return -ENOMEM;
}
- dev_priv->hw_status_page = dev_priv->hws_map.handle;
+ dev_priv->hws_vaddr = dev_priv->hws_map.handle;
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
- I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+ memset(dev_priv->hws_vaddr, 0, PAGE_SIZE);
+ I915_WRITE(HWS_PGA, dev_priv->hws_agpoffset);
DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
- dev_priv->status_gfx_addr);
- DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
+ dev_priv->hws_agpoffset);
+ DRM_DEBUG("load hws at %p\n", dev_priv->hws_vaddr);
return 0;
}
@@ -1051,6 +1081,14 @@ struct drm_ioctl_desc i915_ioctls[] = {
#ifdef I915_HAVE_BUFFER
DRM_IOCTL_DEF(DRM_I915_EXECBUFFER, i915_execbuffer, DRM_AUTH),
#endif
+ DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/shared-core/i915_drm.h b/shared-core/i915_drm.h
index bdcac9aa..445bb279 100644
--- a/shared-core/i915_drm.h
+++ b/shared-core/i915_drm.h
@@ -176,6 +176,14 @@ typedef struct drm_i915_sarea {
#define DRM_I915_MMIO 0x10
#define DRM_I915_HWS_ADDR 0x11
#define DRM_I915_EXECBUFFER 0x12
+#define DRM_I915_GEM_INIT 0x13
+#define DRM_I915_GEM_EXECBUFFER 0x14
+#define DRM_I915_GEM_PIN 0x15
+#define DRM_I915_GEM_UNPIN 0x16
+#define DRM_I915_GEM_BUSY 0x17
+#define DRM_I915_GEM_THROTTLE 0x18
+#define DRM_I915_GEM_ENTERVT 0x19
+#define DRM_I915_GEM_LEAVEVT 0x20
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -195,6 +203,14 @@ typedef struct drm_i915_sarea {
#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
#define DRM_IOCTL_I915_MMIO DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_MMIO, drm_i915_mmio)
#define DRM_IOCTL_I915_EXECBUFFER DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_EXECBUFFER, struct drm_i915_execbuffer)
+#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
+#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
+#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
+#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
+#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
+#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
+#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
+#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
/* Asynchronous page flipping:
*/
@@ -399,4 +415,146 @@ struct drm_i915_execbuffer {
struct drm_fence_arg fence_arg;
};
+struct drm_i915_gem_init {
+ /**
+ * Beginning offset in the GTT to be managed by the DRM memory
+ * manager.
+ */
+ uint64_t gtt_start;
+ /**
+ * Ending offset in the GTT to be managed by the DRM memory
+ * manager.
+ */
+ uint64_t gtt_end;
+};
+
+struct drm_i915_gem_relocation_entry {
+ /**
+ * Handle of the buffer being pointed to by this relocation entry.
+ *
+ * It's appealing to make this be an index into the mm_validate_entry
+ * list to refer to the buffer, but this allows the driver to create
+ * a relocation list for state buffers and not re-write it per
+ * exec using the buffer.
+ */
+ uint32_t target_handle;
+
+ /**
+ * Value to be added to the offset of the target buffer to make up
+ * the relocation entry.
+ */
+ uint32_t delta;
+
+ /** Offset in the buffer the relocation entry will be written into */
+ uint64_t offset;
+
+ /**
+ * Offset value of the target buffer that the relocation entry was last
+ * written as.
+ *
+ * If the buffer has the same offset as last time, we can skip syncing
+ * and writing the relocation. This value is written back out by
+ * the execbuffer ioctl when the relocation is written.
+ */
+ uint64_t presumed_offset;
+
+ /**
+ * Target memory domains read by this operation.
+ */
+ uint32_t read_domains;
+
+ /**
+ * Target memory domains written by this operation.
+ *
+ * Note that only one domain may be written by the whole
+ * execbuffer operation, so that where there are conflicts,
+ * the application will get -EINVAL back.
+ */
+ uint32_t write_domain;
+};
+
+/**
+ * Intel memory domains
+ *
+ * Most of these just align with the various caches in
+ * the system and are used to flush and invalidate as
+ * objects end up cached in different domains.
+ */
+
+/* 0x00000001 is DRM_GEM_DOMAIN_CPU */
+#define DRM_GEM_DOMAIN_I915_RENDER 0x00000002 /* Render cache, used by 2D and 3D drawing */
+#define DRM_GEM_DOMAIN_I915_SAMPLER 0x00000004 /* Sampler cache, used by texture engine */
+#define DRM_GEM_DOMAIN_I915_COMMAND 0x00000008 /* Command queue, used to load batch buffers */
+#define DRM_GEM_DOMAIN_I915_INSTRUCTION 0x00000010 /* Instruction cache, used by shader programs */
+#define DRM_GEM_DOMAIN_I915_VERTEX 0x00000020 /* Vertex address cache */
+
+struct drm_i915_gem_exec_object {
+ /**
+ * User's handle for a buffer to be bound into the GTT for this
+ * operation.
+ */
+ uint32_t handle;
+
+ /** List of relocations to be performed on this buffer */
+ uint32_t relocation_count;
+ uint64_t relocs_ptr; /* struct drm_i915_gem_relocation_entry *relocs */
+
+ /** Required alignment in graphics aperture */
+ uint64_t alignment;
+
+ /**
+ * Returned value of the updated offset of the object, for future
+ * presumed_offset writes.
+ */
+ uint64_t offset;
+};
+
+struct drm_i915_gem_execbuffer {
+ /**
+ * List of buffers to be validated with their relocations to be
+ * performend on them.
+ *
+ * These buffers must be listed in an order such that all relocations
+ * a buffer is performing refer to buffers that have already appeared
+ * in the validate list.
+ */
+ uint64_t buffers_ptr; /* struct drm_i915_gem_validate_entry *buffers */
+ uint32_t buffer_count;
+
+ /** Offset in the batchbuffer to start execution from. */
+ uint32_t batch_start_offset;
+ /** Bytes used in batchbuffer from batch_start_offset */
+ uint32_t batch_len;
+ uint32_t DR1;
+ uint32_t DR4;
+ uint32_t num_cliprects;
+ uint64_t cliprects_ptr; /* struct drm_clip_rect *cliprects */
+};
+
+struct drm_i915_gem_pin {
+ /** Handle of the buffer to be pinned. */
+ uint32_t handle;
+ uint32_t pad;
+
+ /** alignment required within the aperture */
+ uint64_t alignment;
+
+ /** Returned GTT offset of the buffer. */
+ uint64_t offset;
+};
+
+struct drm_i915_gem_unpin {
+ /** Handle of the buffer to be unpinned. */
+ uint32_t handle;
+ uint32_t pad;
+};
+
+struct drm_i915_gem_busy {
+ /** Handle of the buffer to check for busy */
+ uint32_t handle;
+
+ /** Return busy status (1 if busy, 0 if idle) */
+ uint32_t busy;
+};
+
#endif /* _I915_DRM_H_ */
diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h
index 6d72c051..06aa00ab 100644
--- a/shared-core/i915_drv.h
+++ b/shared-core/i915_drv.h
@@ -81,14 +81,13 @@ struct drm_i915_validate_buffer;
struct drm_i915_ring_buffer {
int tail_mask;
- unsigned long Start;
- unsigned long End;
unsigned long Size;
u8 *virtual_start;
int head;
int tail;
int space;
drm_local_map_t map;
+ struct drm_gem_object *ring_obj;
};
struct mem_block {
@@ -114,7 +113,7 @@ struct drm_i915_master_private {
};
struct drm_i915_private {
- struct drm_buffer_object *ring_buffer;
+ struct drm_device *dev;
drm_local_map_t *mmio_map;
@@ -124,12 +123,12 @@ struct drm_i915_private {
struct drm_i915_ring_buffer ring;
struct drm_dma_handle *status_page_dmah;
- void *hw_status_page;
dma_addr_t dma_status_page;
uint32_t counter;
- unsigned int status_gfx_addr;
+ uint32_t hws_agpoffset;
drm_local_map_t hws_map;
- struct drm_buffer_object *hws_bo;
+ void *hws_vaddr;
+ struct drm_memrange_node *hws;
unsigned int cpp;
@@ -145,12 +144,14 @@ struct drm_i915_private {
DRM_SPINTYPE user_irq_lock;
int user_irq_refcount;
int fence_irq_on;
- uint32_t irq_enable_reg;
+ uint32_t irq_mask_reg;
int irq_enabled;
struct workqueue_struct *wq;
bool cursor_needs_physical;
+ struct drm_memrange vram;
+
#ifdef I915_HAVE_FENCE
uint32_t flush_sequence;
uint32_t flush_flags;
@@ -175,17 +176,78 @@ struct drm_i915_private {
struct drm_display_mode *panel_fixed_mode;
struct drm_display_mode *vbt_mode; /* if any */
-#if defined(I915_HAVE_BUFFER)
+#if defined(I915_HAVE_BUFFER) && defined(DRI2)
/* DRI2 sarea */
- struct drm_buffer_object *sarea_bo;
- struct drm_bo_kmap_obj sarea_kmap;
+ struct drm_gem_object *sarea_object;
+ struct drm_bo_kmap_obj sarea_kmap;
+#endif
/* Feature bits from the VBIOS */
int int_tv_support:1;
int lvds_dither:1;
int lvds_vbt:1;
int int_crt_support:1;
-#endif
+
+ struct {
+ struct drm_memrange gtt_space;
+
+ /**
+ * List of objects currently involved in rendering from the
+ * ringbuffer.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head active_list;
+
+ /**
+ * List of objects which are not in the ringbuffer but which
+ * still have a write_domain which needs to be flushed before
+ * unbinding.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head flushing_list;
+
+ /**
+ * LRU list of objects which are not in the ringbuffer and
+ * are ready to unbind, but are still in the GTT.
+ *
+ * A reference is not held on the buffer while on this list,
+ * as merely being GTT-bound shouldn't prevent its being
+ * freed, and we'll pull it off the list in the free path.
+ */
+ struct list_head inactive_list;
+
+ /**
+ * List of breadcrumbs associated with GPU requests currently
+ * outstanding.
+ */
+ struct list_head request_list;
+
+ /**
+ * We leave the user IRQ off as much as possible,
+ * but this means that requests will finish and never
+ * be retired once the system goes idle. Set a timer to
+ * fire periodically while the ring is running. When it
+ * fires, go retire requests.
+ */
+ struct timer_list retire_timer;
+ struct work_struct retire_task;
+
+ uint32_t next_gem_seqno;
+
+ /**
+ * Flag if the X Server, and thus DRM, is not currently in
+ * control of the device.
+ *
+ * This is set between LeaveVT and EnterVT. It needs to be
+ * replaced with a semaphore. It also needs to be
+ * transitioned away from for kernel modesetting.
+ */
+ int suspended;
+ } mm;
+
+ struct work_struct user_interrupt_task;
/* Register state */
u8 saveLBB;
@@ -284,6 +346,68 @@ enum intel_chip_family {
CHIP_I965 = 0x08,
};
+/** driver private structure attached to each drm_gem_object */
+struct drm_i915_gem_object {
+ struct drm_gem_object *obj;
+
+ /** Current space allocated to this object in the GTT, if any. */
+ struct drm_memrange_node *gtt_space;
+
+ /** This object's place on the active/flushing/inactive lists */
+ struct list_head list;
+
+ /**
+ * This is set if the object is on the active or flushing lists
+ * (has pending rendering), and is not set if it's on inactive (ready
+ * to be unbound).
+ */
+ int active;
+
+ /** AGP memory structure for our GTT binding. */
+ DRM_AGP_MEM *agp_mem;
+
+ struct page **page_list;
+
+ /**
+ * Current offset of the object in GTT space.
+ *
+ * This is the same as gtt_space->start
+ */
+ uint32_t gtt_offset;
+
+ /** Boolean whether this object has a valid gtt offset. */
+ int gtt_bound;
+
+ /** How many users have pinned this object in GTT space */
+ int pin_count;
+
+ /** Breadcrumb of last rendering to the buffer. */
+ uint32_t last_rendering_seqno;
+};
+
+/**
+ * Request queue structure.
+ *
+ * The request queue allows us to note sequence numbers that have been emitted
+ * and may be associated with active buffers to be retired.
+ *
+ * By keeping this list, we can avoid having to do questionable
+ * sequence-number comparisons on buffer last_rendering_seqnos, and associate
+ * an emission time with seqnos for tracking how far ahead of the GPU we are.
+ */
+struct drm_i915_gem_request {
+ /** GEM sequence number associated with this request. */
+ uint32_t seqno;
+
+ /** Time at which this request was emitted, in jiffies. */
+ unsigned long emitted_jiffies;
+
+ /** Cache domains that were flushed at the start of the request. */
+ uint32_t flush_domains;
+
+ struct list_head list;
+};
+
extern struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
@@ -309,6 +433,10 @@ extern int i915_dispatch_batchbuffer(struct drm_device * dev,
drm_i915_batchbuffer_t * batch);
extern int i915_quiescent(struct drm_device *dev);
+int i915_emit_box(struct drm_device * dev,
+ struct drm_clip_rect __user * boxes,
+ int i, int DR1, int DR4);
+
/* i915_irq.c */
extern int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -325,6 +453,7 @@ extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_emit_irq(struct drm_device * dev);
extern void i915_enable_interrupt (struct drm_device *dev);
+extern int i915_wait_irq(struct drm_device * dev, int irq_nr);
extern int i915_enable_vblank(struct drm_device *dev, int crtc);
extern void i915_disable_vblank(struct drm_device *dev, int crtc);
extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
@@ -332,6 +461,7 @@ extern int i915_vblank_swap(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern void i915_user_irq_on(struct drm_device *dev);
extern void i915_user_irq_off(struct drm_device *dev);
+extern void i915_user_interrupt_handler(struct work_struct *work);
/* i915_mem.c */
extern int i915_mem_alloc(struct drm_device *dev, void *data,
@@ -353,7 +483,7 @@ extern void i915_invalidate_reported_sequence(struct drm_device *dev);
#endif
-#ifdef I915_HAVE_BUFFER
+#if defined(I915_HAVE_BUFFER) && defined(I915_TTM)
/* i915_buffer.c */
extern struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev);
extern int i915_fence_type(struct drm_buffer_object *bo, uint32_t *fclass,
@@ -365,10 +495,46 @@ extern uint64_t i915_evict_flags(struct drm_buffer_object *bo);
extern int i915_move(struct drm_buffer_object *bo, int evict,
int no_wait, struct drm_bo_mem_reg *new_mem);
void i915_flush_ttm(struct drm_ttm *ttm);
+#endif /* ttm */
+#ifdef I915_HAVE_BUFFER
/* i915_execbuf.c */
int i915_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-
+/* i915_gem.c */
+int i915_gem_init_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_init_object(struct drm_gem_object *obj);
+void i915_gem_free_object(struct drm_gem_object *obj);
+int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
+void i915_gem_object_unpin(struct drm_gem_object *obj);
+int i915_gem_set_domain(struct drm_gem_object *obj,
+ struct drm_file *file_priv,
+ uint32_t read_domains,
+ uint32_t write_domain);
+int i915_gem_flush_pwrite(struct drm_gem_object *obj,
+ uint64_t offset, uint64_t size);
+void i915_gem_lastclose(struct drm_device *dev);
+void i915_gem_retire_requests(struct drm_device *dev);
+void i915_gem_retire_timeout(unsigned long data);
+void i915_gem_retire_handler(struct work_struct *work);
+int i915_gem_init_ringbuffer(struct drm_device *dev);
+void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
+int i915_gem_do_init(struct drm_device *dev, unsigned long start,
+ unsigned long end);
#endif
extern unsigned int i915_fbpercrtc;
@@ -392,16 +558,25 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
#define I915_VERBOSE 0
+#define I915_RING_VALIDATE 0
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
#define RING_LOCALS unsigned int outring, ringmask, outcount; \
volatile char *virt;
+#if I915_RING_VALIDATE
+void i915_ring_validate(struct drm_device *dev, const char *func, int line);
+#define I915_RING_DO_VALIDATE(dev) i915_ring_validate(dev, __FUNCTION__, __LINE__)
+#else
+#define I915_RING_DO_VALIDATE(dev)
+#endif
+
#define BEGIN_LP_RING(n) do { \
if (I915_VERBOSE) \
DRM_DEBUG("BEGIN_LP_RING(%d)\n", \
(n)); \
+ I915_RING_DO_VALIDATE(dev); \
if (dev_priv->ring.space < (n)*4) \
i915_wait_ring(dev, (n)*4, __FUNCTION__); \
outcount = 0; \
@@ -420,17 +595,12 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
#define ADVANCE_LP_RING() do { \
if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring); \
+ I915_RING_DO_VALIDATE(dev); \
dev_priv->ring.tail = outring; \
dev_priv->ring.space -= outcount * 4; \
I915_WRITE(PRB0_TAIL, outring); \
} while(0)
-#define BREADCRUMB_BITS 31
-#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
-
-#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5])
-#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
-
extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
/*
@@ -532,17 +702,40 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) /* used to have 1<<22? */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
+#define MI_STORE_DWORD_INDEX_SHIFT 2
#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1)
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
#define MI_BATCH_NON_SECURE (1)
#define MI_BATCH_NON_SECURE_I965 (1<<8)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
+#define BREADCRUMB_BITS 31
+#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
+
+#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hws_vaddr))[5])
+
+/**
+ * Reads a dword out of the status page, which is written to from the command
+ * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
+ * MI_STORE_DATA_IMM.
+ *
+ * The following dwords have a reserved meaning:
+ * 0: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
+ * 4: ring 0 head pointer
+ * 5: ring 1 head pointer (915-class)
+ * 6: ring 2 head pointer (915-class)
+ *
+ * The area from dword 0x10 to 0x3ff is available for driver usage.
+ */
+#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hws_vaddr))[reg])
+#define I915_GEM_HWS_INDEX 0x10
+
/*
* 3D instructions used by the kernel
*/
#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags))
+#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24))
#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
#define SC_UPDATE_SCISSOR (0x1<<1)
@@ -603,6 +796,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define PRB1_HEAD 0x02044 /* 915+ only */
#define PRB1_START 0x02048 /* 915+ only */
#define PRB1_CTL 0x0204c /* 915+ only */
+#define I965REG_ACTHD 0x02074
#define HWS_PGA 0x02080
#define IPEIR 0x02088
#define NOPID 0x02094
@@ -632,6 +826,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define EMR 0x020b4
#define ESR 0x020b8
#define INSTPM 0x020c0
+#define I915REG_ACTHD 0x020C8
#define FW_BLC 0x020d8
#define FW_BLC_SELF 0x020e0 /* 915+ only */
#define MI_ARB_STATE 0x020e4 /* 915+ only */
@@ -790,12 +985,6 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define ADPA_DPMS_STANDBY (2<<10)
#define ADPA_DPMS_OFF (3<<10)
-#define LP_RING 0x2030
-#define HP_RING 0x2040
-/* The binner has its own ring buffer:
- */
-#define HWB_RING 0x2400
-
#define RING_TAIL 0x00
#define TAIL_ADDR 0x001FFFF8
#define RING_HEAD 0x04
diff --git a/shared-core/i915_init.c b/shared-core/i915_init.c
index f2c07fc6..3f310770 100644
--- a/shared-core/i915_init.c
+++ b/shared-core/i915_init.c
@@ -100,62 +100,11 @@ int i915_probe_agp(struct pci_dev *pdev, unsigned long *aperture_size,
return 0;
}
-int i915_load_modeset_init(struct drm_device *dev)
+static int i915_init_hwstatus(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long agp_size, prealloc_size;
- int size, ret = 0;
-
- i915_probe_agp(dev->pdev, &agp_size, &prealloc_size);
- printk("setting up %ld bytes of VRAM space\n", prealloc_size);
- printk("setting up %ld bytes of TT space\n", (agp_size - prealloc_size));
-
- drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0, prealloc_size >> PAGE_SHIFT, 1);
- drm_bo_init_mm(dev, DRM_BO_MEM_TT, prealloc_size >> PAGE_SHIFT,
- (agp_size - prealloc_size) >> PAGE_SHIFT, 1);
- I915_WRITE(PRB0_CTL, 0);
- I915_WRITE(PRB0_HEAD, 0);
- I915_WRITE(PRB0_TAIL, 0);
-
- size = PRIMARY_RINGBUFFER_SIZE;
- ret = drm_buffer_object_create(dev, size, drm_bo_type_kernel,
- DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE |
- DRM_BO_FLAG_MEM_VRAM |
- DRM_BO_FLAG_NO_EVICT,
- DRM_BO_HINT_DONT_FENCE, 0x1, 0,
- &dev_priv->ring_buffer);
- if (ret < 0) {
- DRM_ERROR("Unable to allocate or pin ring buffer\n");
- goto clean_mm;
- }
-
- /* remap the buffer object properly */
- dev_priv->ring.Start = dev_priv->ring_buffer->offset;
- dev_priv->ring.End = dev_priv->ring.Start + size;
- dev_priv->ring.Size = size;
- dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
-
- /* FIXME: need wrapper with PCI mem checks */
- ret = drm_mem_reg_ioremap(dev, &dev_priv->ring_buffer->mem,
- (void **) &dev_priv->ring.virtual_start);
- if (ret) {
- DRM_ERROR("error mapping ring buffer: %d\n", ret);
- goto destroy_ringbuffer;
- }
-
- DRM_DEBUG("ring start %08lX, %p, %08lX\n", dev_priv->ring.Start,
- dev_priv->ring.virtual_start, dev_priv->ring.Size);
-
- memset((void *)(dev_priv->ring.virtual_start), 0, dev_priv->ring.Size);
- I915_WRITE(PRB0_START, dev_priv->ring.Start);
- I915_WRITE(PRB0_CTL, ((dev_priv->ring.Size - 4096) & RING_NR_PAGES) |
- (RING_NO_REPORT | RING_VALID));
-
- /* Allow hardware batchbuffers unless told otherwise.
- */
- dev_priv->allow_batchbuffer = 1;
- dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS;
- mutex_init(&dev_priv->cmdbuf_mutex);
+ struct drm_memrange_node *free_space;
+ int ret = 0;
/* Program Hardware Status Page */
if (!IS_G33(dev)) {
@@ -165,52 +114,105 @@ int i915_load_modeset_init(struct drm_device *dev)
if (!dev_priv->status_page_dmah) {
DRM_ERROR("Can not allocate hardware status page\n");
ret = -ENOMEM;
- goto destroy_ringbuffer;
+ goto out;
}
- dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
+ dev_priv->hws_vaddr = dev_priv->status_page_dmah->vaddr;
dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
-
I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
} else {
- size = 4 * 1024;
- ret = drm_buffer_object_create(dev, size,
- drm_bo_type_kernel,
- DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE |
- DRM_BO_FLAG_MEM_VRAM |
- DRM_BO_FLAG_NO_EVICT,
- DRM_BO_HINT_DONT_FENCE, 0x1, 0,
- &dev_priv->hws_bo);
- if (ret < 0) {
+ free_space = drm_memrange_search_free(&dev_priv->vram,
+ PAGE_SIZE,
+ PAGE_SIZE, 0);
+ if (!free_space) {
+ DRM_ERROR("No free vram available, aborting\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dev_priv->hws = drm_memrange_get_block(free_space, PAGE_SIZE,
+ PAGE_SIZE);
+ if (!dev_priv->hws) {
DRM_ERROR("Unable to allocate or pin hw status page\n");
ret = -EINVAL;
- goto destroy_ringbuffer;
+ goto out;
}
- dev_priv->status_gfx_addr =
- dev_priv->hws_bo->offset & (0x1ffff << 12);
+ dev_priv->hws_agpoffset = dev_priv->hws->start;
dev_priv->hws_map.offset = dev->agp->base +
- dev_priv->hws_bo->offset;
- dev_priv->hws_map.size = size;
+ dev_priv->hws->start;
+ dev_priv->hws_map.size = PAGE_SIZE;
dev_priv->hws_map.type= 0;
dev_priv->hws_map.flags= 0;
dev_priv->hws_map.mtrr = 0;
drm_core_ioremap(&dev_priv->hws_map, dev);
if (dev_priv->hws_map.handle == NULL) {
- dev_priv->status_gfx_addr = 0;
+ dev_priv->hws_agpoffset = 0;
DRM_ERROR("can not ioremap virtual addr for"
"G33 hw status page\n");
ret = -ENOMEM;
- goto destroy_hws;
+ goto out_free;
}
- dev_priv->hw_status_page = dev_priv->hws_map.handle;
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
- I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+ dev_priv->hws_vaddr = dev_priv->hws_map.handle;
+ I915_WRITE(HWS_PGA, dev_priv->hws_agpoffset);
}
+
+ memset(dev_priv->hws_vaddr, 0, PAGE_SIZE);
+
DRM_DEBUG("Enabled hardware status page\n");
+ return 0;
+
+out_free:
+ /* free hws */
+out:
+ return ret;
+}
+
+static void i915_cleanup_hwstatus(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!IS_G33(dev)) {
+ if (dev_priv->status_page_dmah)
+ drm_pci_free(dev, dev_priv->status_page_dmah);
+ } else {
+ if (dev_priv->hws_map.handle)
+ drm_core_ioremapfree(&dev_priv->hws_map, dev);
+ if (dev_priv->hws)
+ drm_memrange_put_block(dev_priv->hws);
+ }
+ I915_WRITE(HWS_PGA, 0x1ffff000);
+}
+
+static int i915_load_modeset_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long agp_size, prealloc_size;
+ int ret = 0;
+
+ i915_probe_agp(dev->pdev, &agp_size, &prealloc_size);
+
+ /* Basic memrange allocator for stolen space (aka vram) */
+ drm_memrange_init(&dev_priv->vram, 0, prealloc_size);
+ /* Let GEM Manage from end of prealloc space to end of aperture */
+ i915_gem_do_init(dev, prealloc_size, agp_size);
+
+ ret = i915_gem_init_ringbuffer(dev);
+ if (ret)
+ goto out;
+
+ ret = i915_init_hwstatus(dev);
+ if (ret)
+ goto destroy_ringbuffer;
+
+ /* Allow hardware batchbuffers unless told otherwise.
+ */
+ dev_priv->allow_batchbuffer = 1;
+ dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS;
+ mutex_init(&dev_priv->cmdbuf_mutex);
+
dev_priv->wq = create_singlethread_workqueue("i915");
if (dev_priv->wq == 0) {
DRM_DEBUG("Error\n");
@@ -228,9 +230,6 @@ int i915_load_modeset_init(struct drm_device *dev)
intel_modeset_init(dev);
drm_helper_initial_config(dev, false);
- drm_mm_print(&dev->bm.man[DRM_BO_MEM_VRAM].manager, "VRAM");
- drm_mm_print(&dev->bm.man[DRM_BO_MEM_TT].manager, "TT");
-
dev->devname = kstrdup(DRIVER_NAME, GFP_KERNEL);
if (!dev->devname) {
ret = -ENOMEM;
@@ -249,25 +248,10 @@ modeset_cleanup:
destroy_wq:
destroy_workqueue(dev_priv->wq);
destroy_hws:
- if (!IS_G33(dev)) {
- if (dev_priv->status_page_dmah)
- drm_pci_free(dev, dev_priv->status_page_dmah);
- } else {
- if (dev_priv->hws_map.handle)
- drm_core_ioremapfree(&dev_priv->hws_map, dev);
- if (dev_priv->hws_bo)
- drm_bo_usage_deref_unlocked(&dev_priv->hws_bo);
- }
- I915_WRITE(HWS_PGA, 0x1ffff000);
+ i915_cleanup_hwstatus(dev);
destroy_ringbuffer:
- if (dev_priv->ring.virtual_start)
- drm_mem_reg_iounmap(dev, &dev_priv->ring_buffer->mem,
- dev_priv->ring.virtual_start);
- if (dev_priv->ring_buffer)
- drm_bo_usage_deref_unlocked(&dev_priv->ring_buffer);
-clean_mm:
- drm_bo_clean_mm(dev, DRM_BO_MEM_VRAM, 1);
- drm_bo_clean_mm(dev, DRM_BO_MEM_TT, 1);
+ i915_gem_cleanup_ringbuffer(dev);
+out:
return ret;
}
@@ -293,7 +277,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
memset(dev_priv, 0, sizeof(struct drm_i915_private));
dev->dev_private = (void *)dev_priv;
-// dev_priv->flags = flags;
+ dev_priv->dev = dev;
/* i915 has 4 more counters */
dev->counters += 4;
@@ -341,6 +325,19 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto free_priv;
}
+ INIT_LIST_HEAD(&dev_priv->mm.active_list);
+ INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
+ INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+ INIT_LIST_HEAD(&dev_priv->mm.request_list);
+ dev_priv->mm.retire_timer.function = i915_gem_retire_timeout;
+ dev_priv->mm.retire_timer.data = (unsigned long) dev;
+ init_timer_deferrable (&dev_priv->mm.retire_timer);
+ INIT_WORK(&dev_priv->mm.retire_task,
+ i915_gem_retire_handler);
+ INIT_WORK(&dev_priv->user_interrupt_task,
+ i915_user_interrupt_handler);
+ dev_priv->mm.next_gem_seqno = 1;
+
#ifdef __linux__
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
intel_init_chipset_flush_compat(dev);
@@ -348,26 +345,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
#endif
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- /*
- * Initialize the memory manager for local and AGP space
- */
- ret = drm_bo_driver_init(dev);
- if (ret) {
- DRM_ERROR("fail to init memory manager for "
- "local & AGP space\n");
- goto out_rmmap;
- }
-
ret = i915_load_modeset_init(dev);
if (ret < 0) {
DRM_ERROR("failed to init modeset\n");
- goto driver_fini;
+ goto out_rmmap;
}
}
return 0;
-driver_fini:
- drm_bo_driver_finish(dev);
out_rmmap:
drm_rmmap(dev, dev_priv->mmio_map);
free_priv:
@@ -392,6 +377,8 @@ int i915_driver_unload(struct drm_device *dev)
drm_core_ioremapfree(&dev_priv->ring.map, dev);
}
#endif
+
+#ifdef DRI2
if (dev_priv->sarea_kmap.virtual) {
drm_bo_kunmap(&dev_priv->sarea_kmap);
dev_priv->sarea_kmap.virtual = NULL;
@@ -404,44 +391,17 @@ int i915_driver_unload(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
dev_priv->sarea_bo = NULL;
}
-
- if (dev_priv->status_page_dmah) {
- drm_pci_free(dev, dev_priv->status_page_dmah);
- dev_priv->status_page_dmah = NULL;
- dev_priv->hw_status_page = NULL;
- dev_priv->dma_status_page = 0;
- /* Need to rewrite hardware status page */
- I915_WRITE(HWS_PGA, 0x1ffff000);
- }
-
- if (dev_priv->status_gfx_addr) {
- dev_priv->status_gfx_addr = 0;
- drm_core_ioremapfree(&dev_priv->hws_map, dev);
- drm_bo_usage_deref_unlocked(&dev_priv->hws_bo);
- I915_WRITE(HWS_PGA, 0x1ffff000);
- }
+#endif
+ i915_cleanup_hwstatus(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- drm_mem_reg_iounmap(dev, &dev_priv->ring_buffer->mem,
- dev_priv->ring.virtual_start);
-
- DRM_DEBUG("usage is %d\n", atomic_read(&dev_priv->ring_buffer->usage));
mutex_lock(&dev->struct_mutex);
- drm_bo_usage_deref_locked(&dev_priv->ring_buffer);
-
- if (drm_bo_clean_mm(dev, DRM_BO_MEM_TT, 1)) {
- DRM_ERROR("Memory manager type 3 not clean. "
- "Delaying takedown\n");
- }
- if (drm_bo_clean_mm(dev, DRM_BO_MEM_VRAM, 1)) {
- DRM_ERROR("Memory manager type 3 not clean. "
- "Delaying takedown\n");
- }
+ i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
+ drm_memrange_takedown(&dev_priv->vram);
+ i915_gem_lastclose(dev);
}
- drm_bo_driver_finish(dev);
-
#ifdef __linux__
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
intel_init_chipset_flush_compat(dev);
@@ -500,7 +460,7 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (drm_core_check_feature(dev, DRIVER_MODESET))
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_mem_release(dev, file_priv, dev_priv->agp_heap);
}
@@ -511,8 +471,33 @@ void i915_driver_lastclose(struct drm_device * dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
+#ifdef I915_HAVE_BUFFER
+ if (dev_priv->val_bufs) {
+ vfree(dev_priv->val_bufs);
+ dev_priv->val_bufs = NULL;
+ }
+#endif
+
+ i915_gem_lastclose(dev);
+
if (dev_priv->agp_heap)
i915_mem_takedown(&(dev_priv->agp_heap));
+
+#if defined(DRI2)
+ if (dev_priv->sarea_kmap.virtual) {
+ drm_bo_kunmap(&dev_priv->sarea_kmap);
+ dev_priv->sarea_kmap.virtual = NULL;
+ dev->control->master->lock.hw_lock = NULL;
+ dev->sigdata.lock = NULL;
+ }
+
+ if (dev_priv->sarea_bo) {
+ mutex_lock(&dev->struct_mutex);
+ drm_bo_usage_deref_locked(&dev_priv->sarea_bo);
+ mutex_unlock(&dev->struct_mutex);
+ dev_priv->sarea_bo = NULL;
+ }
+#endif
i915_dma_cleanup(dev);
}
@@ -521,7 +506,8 @@ int i915_driver_firstopen(struct drm_device *dev)
{
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
-
+#if defined(I915_HAVE_BUFFER) && defined(I915_TTM)
drm_bo_driver_init(dev);
+#endif
return 0;
}
diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c
index 2d355688..bd11d37a 100644
--- a/shared-core/i915_irq.c
+++ b/shared-core/i915_irq.c
@@ -443,9 +443,12 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
static struct drm_device *hotplug_dev;
-/*
- * This code is called in a more safe envirmoent to handle the hotplugs.
- * Add code here for hotplug love to userspace.
+/**
+ * Handler for user interrupts in process context (able to sleep, do VFS
+ * operations, etc.
+ *
+ * If another IRQ comes in while we're in this handler, it will still get put
+ * on the queue again to be rerun when we finish.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
static void i915_hotplug_work_func(void *work)
@@ -485,12 +488,26 @@ static int i915_run_hotplug_tasklet(struct drm_device *dev, uint32_t stat)
if (stat & SDVOC_HOTPLUG_INT_STATUS) {
DRM_DEBUG("sDVOC event\n");
}
-
queue_work(dev_priv->wq, &hotplug);
return 0;
}
+void
+i915_user_interrupt_handler(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv;
+ struct drm_device *dev;
+
+ dev_priv = container_of(work, struct drm_i915_private,
+ user_interrupt_task);
+ dev = dev_priv->dev;
+
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_retire_requests(dev);
+ mutex_unlock(&dev->struct_mutex);
+}
+
irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
@@ -507,7 +524,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
else
iir = I915_READ16(IIR);
- iir &= (dev_priv->irq_enable_reg | I915_USER_INTERRUPT);
+ iir &= (dev_priv->irq_mask_reg | I915_USER_INTERRUPT);
#if 0
DRM_DEBUG("flag=%08x\n", iir);
@@ -581,6 +598,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
DRM_WAKEUP(&dev_priv->irq_queue);
#ifdef I915_HAVE_FENCE
i915_fence_handler(dev);
+ schedule_work(&dev_priv->user_interrupt_task);
#endif
}
@@ -636,11 +654,12 @@ void i915_user_irq_on(struct drm_device *dev)
DRM_SPINLOCK(&dev_priv->user_irq_lock);
if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1)){
- dev_priv->irq_enable_reg |= I915_USER_INTERRUPT;
+ dev_priv->irq_mask_reg &= ~I915_USER_INTERRUPT;
if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
- I915_WRITE(IER, dev_priv->irq_enable_reg);
+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
else
- I915_WRITE16(IER, dev_priv->irq_enable_reg);
+ I915_WRITE16(IMR, dev_priv->irq_mask_reg);
+ I915_READ16(IMR);
}
DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
@@ -651,23 +670,30 @@ void i915_user_irq_off(struct drm_device *dev)
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
DRM_SPINLOCK(&dev_priv->user_irq_lock);
+ BUG_ON(dev_priv->irq_enabled && dev_priv->user_irq_refcount <= 0);
if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
- // dev_priv->irq_enable_reg &= ~I915_USER_INTERRUPT;
- // if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
- // I915_WRITE(IER, dev_priv->irq_enable_reg);
- // else
- // I915_WRITE16(IER, dev_priv->irq_enable_reg);
+ dev_priv->irq_mask_reg |= I915_USER_INTERRUPT;
+ if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
+ else
+ I915_WRITE16(IMR, dev_priv->irq_mask_reg);
+ I915_READ16(IMR);
}
DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
}
-static int i915_wait_irq(struct drm_device * dev, int irq_nr)
+int i915_wait_irq(struct drm_device * dev, int irq_nr)
{
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
struct drm_i915_master_private *master_priv;
int ret = 0;
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
READ_BREADCRUMB(dev_priv));
@@ -739,16 +765,17 @@ int i915_enable_vblank(struct drm_device *dev, int plane)
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
int pipe = i915_get_pipe(dev, plane);
u32 pipestat_reg = 0;
+ u32 mask_reg = 0;
u32 pipestat;
switch (pipe) {
case 0:
pipestat_reg = PIPEASTAT;
- dev_priv->irq_enable_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
+ mask_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
break;
case 1:
pipestat_reg = PIPEBSTAT;
- dev_priv->irq_enable_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+ mask_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
break;
default:
DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
@@ -775,11 +802,13 @@ int i915_enable_vblank(struct drm_device *dev, int plane)
I915_WRITE(pipestat_reg, pipestat);
}
+ DRM_SPINLOCK(&dev_priv->user_irq_lock);
+ dev_priv->irq_mask_reg &= ~mask_reg;
if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
- I915_WRITE(IER, dev_priv->irq_enable_reg);
+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
else
- I915_WRITE16(IER, dev_priv->irq_enable_reg);
-
+ I915_WRITE16(IMR, dev_priv->irq_mask_reg);
+ DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
return 0;
}
@@ -789,16 +818,17 @@ void i915_disable_vblank(struct drm_device *dev, int plane)
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
int pipe = i915_get_pipe(dev, plane);
u32 pipestat_reg = 0;
+ u32 mask_reg = 0;
u32 pipestat;
switch (pipe) {
case 0:
pipestat_reg = PIPEASTAT;
- dev_priv->irq_enable_reg &= ~I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
+ mask_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
break;
case 1:
pipestat_reg = PIPEBSTAT;
- dev_priv->irq_enable_reg &= ~I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+ mask_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
break;
default:
DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
@@ -806,13 +836,15 @@ void i915_disable_vblank(struct drm_device *dev, int plane)
break;
}
+ DRM_SPINLOCK(&dev_priv->user_irq_lock);
+ dev_priv->irq_mask_reg |= mask_reg;
if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
- I915_WRITE(IER, dev_priv->irq_enable_reg);
+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
else
- I915_WRITE16(IER, dev_priv->irq_enable_reg);
+ I915_WRITE16(IMR, dev_priv->irq_mask_reg);
+ DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
- if (pipestat_reg)
- {
+ if (pipestat_reg) {
pipestat = I915_READ (pipestat_reg);
pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
PIPE_VBLANK_INTERRUPT_ENABLE);
@@ -822,6 +854,7 @@ void i915_disable_vblank(struct drm_device *dev, int plane)
pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
PIPE_VBLANK_INTERRUPT_STATUS);
I915_WRITE(pipestat_reg, pipestat);
+ (void) I915_READ(pipestat_reg);
}
}
@@ -830,14 +863,14 @@ void i915_enable_interrupt (struct drm_device *dev)
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
struct drm_connector *o;
- dev_priv->irq_enable_reg |= I915_USER_INTERRUPT;
+ dev_priv->irq_mask_reg &= ~I915_USER_INTERRUPT;
if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
if (dev->mode_config.num_connector)
- dev_priv->irq_enable_reg |= I915_DISPLAY_PORT_INTERRUPT;
+ dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT;
} else {
if (dev->mode_config.num_connector)
- dev_priv->irq_enable_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
+ dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
/* Enable global interrupts for hotplug - not a pipeA event */
I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) |
@@ -847,7 +880,8 @@ void i915_enable_interrupt (struct drm_device *dev)
PIPE_HOTPLUG_INTERRUPT_STATUS);
}
- if (dev_priv->irq_enable_reg & (I915_DISPLAY_PORT_INTERRUPT | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT)) {
+ if (!(dev_priv->irq_mask_reg & I915_DISPLAY_PORT_INTERRUPT) ||
+ !(dev_priv->irq_mask_reg & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT)) {
u32 temp = 0;
if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
@@ -891,10 +925,13 @@ void i915_enable_interrupt (struct drm_device *dev)
}
}
- if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
- I915_WRITE(IER, dev_priv->irq_enable_reg);
- else
- I915_WRITE16(IER, dev_priv->irq_enable_reg);
+ if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
+ I915_WRITE(IER, ~dev_priv->irq_mask_reg);
+ } else {
+ I915_WRITE16(IMR, dev_priv->irq_mask_reg);
+ I915_WRITE16(IER, ~(u16)dev_priv->irq_mask_reg);
+ }
dev_priv->irq_enabled = 1;
}
@@ -1134,7 +1171,6 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
tmp = I915_READ16(IIR);
I915_WRITE16(IIR, tmp);
}
-
}
int i915_driver_irq_postinstall(struct drm_device * dev)
@@ -1148,7 +1184,7 @@ int i915_driver_irq_postinstall(struct drm_device * dev)
DRM_SPININIT(&dev_priv->user_irq_lock, "userirq");
dev_priv->user_irq_refcount = 0;
- dev_priv->irq_enable_reg = 0;
+ dev_priv->irq_mask_reg = ~0;
ret = drm_vblank_init(dev, num_pipes);
if (ret)
@@ -1179,7 +1215,7 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
if (!dev_priv)
return;
- dev_priv->irq_enabled = 0;
+ dev_priv->irq_enabled = 1;
temp = I915_READ(PIPEASTAT);
I915_WRITE(PIPEASTAT, temp);
diff --git a/shared-core/radeon_cp.c b/shared-core/radeon_cp.c
index 2e680306..819a61ae 100644
--- a/shared-core/radeon_cp.c
+++ b/shared-core/radeon_cp.c
@@ -112,6 +112,27 @@ static void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_lo
RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_loc);
}
+static void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base)
+{
+ u32 agp_base_hi = upper_32_bits(agp_base);
+ u32 agp_base_lo = agp_base & 0xffffffff;
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) {
+ R500_WRITE_MCIND(RV515_MC_AGP_BASE, agp_base_lo);
+ R500_WRITE_MCIND(RV515_MC_AGP_BASE_2, agp_base_hi);
+ } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) {
+ RS690_WRITE_MCIND(RS690_MC_AGP_BASE, agp_base_lo);
+ RS690_WRITE_MCIND(RS690_MC_AGP_BASE_2, agp_base_hi);
+ } else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) {
+ R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo);
+ R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi);
+ } else {
+ RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_base_lo);
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200)
+ RADEON_WRITE(RADEON_AGP_BASE_2, agp_base_hi);
+ }
+}
+
static int RADEON_READ_PLL(struct drm_device * dev, int addr)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -542,9 +563,8 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
#if __OS_HAS_AGP
if (dev_priv->flags & RADEON_IS_AGP) {
- RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base);
- if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200)
- RADEON_WRITE(RADEON_AGP_BASE_2, 0);
+ radeon_write_agp_base(dev_priv, dev->agp->base);
+
radeon_write_agp_location(dev_priv,
(((dev_priv->gart_vm_start - 1 +
dev_priv->gart_size) & 0xffff0000) |
diff --git a/shared-core/radeon_drv.h b/shared-core/radeon_drv.h
index 1b32b2f4..e263c610 100644
--- a/shared-core/radeon_drv.h
+++ b/shared-core/radeon_drv.h
@@ -524,9 +524,13 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev,
#define RV515_MC_FB_LOCATION 0x01
#define RV515_MC_AGP_LOCATION 0x02
+#define RV515_MC_AGP_BASE 0x03
+#define RV515_MC_AGP_BASE_2 0x04
#define R520_MC_FB_LOCATION 0x04
#define R520_MC_AGP_LOCATION 0x05
+#define R520_MC_AGP_BASE 0x06
+#define R520_MC_AGP_BASE_2 0x07
#define RADEON_MPP_TB_CONFIG 0x01c0
#define RADEON_MEM_CNTL 0x0140