summaryrefslogtreecommitdiff
path: root/shared-core
diff options
context:
space:
mode:
Diffstat (limited to 'shared-core')
-rw-r--r--shared-core/i915_dma.c14
-rw-r--r--shared-core/i915_drv.h18
-rw-r--r--shared-core/i915_irq.c70
3 files changed, 53 insertions, 49 deletions
diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c
index c3a41bd5..5fb9fcff 100644
--- a/shared-core/i915_dma.c
+++ b/shared-core/i915_dma.c
@@ -855,12 +855,14 @@ static int mmio_table_size = sizeof(mmio_table)/sizeof(drm_i915_mmio_entry_t);
static int i915_mmio(DRM_IOCTL_ARGS)
{
- char buf[32];
+ uint32_t buf[8];
DRM_DEVICE;
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_mmio_entry_t *e;
drm_i915_mmio_t mmio;
void __iomem *base;
+ int i;
+
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
@@ -878,7 +880,8 @@ static int i915_mmio(DRM_IOCTL_ARGS)
case I915_MMIO_READ:
if (!(e->flag & I915_MMIO_MAY_READ))
return DRM_ERR(EINVAL);
- memcpy_fromio(buf, base, e->size);
+ for (i = 0; i < e->size / 4; i++)
+ buf[i] = I915_READ(e->offset + i * 4);
if (DRM_COPY_TO_USER(mmio.data, buf, e->size)) {
DRM_ERROR("DRM_COPY_TO_USER failed\n");
return DRM_ERR(EFAULT);
@@ -892,7 +895,8 @@ static int i915_mmio(DRM_IOCTL_ARGS)
DRM_ERROR("DRM_COPY_TO_USER failed\n");
return DRM_ERR(EFAULT);
}
- memcpy_toio(base, buf, e->size);
+ for (i = 0; i < e->size / 4; i++)
+ I915_WRITE(e->offset + i * 4, buf[i]);
break;
}
return 0;
@@ -910,11 +914,11 @@ static int i915_set_status_page(DRM_IOCTL_ARGS)
}
DRM_COPY_FROM_USER_IOCTL(hws, (drm_i915_hws_addr_t __user *) data,
sizeof(hws));
- printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws.addr);
+ DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws.addr);
dev_priv->status_gfx_addr = hws.addr & (0x1ffff<<12);
- dev_priv->hws_map.offset = dev->agp->agp_info.aper_base + hws.addr;
+ dev_priv->hws_map.offset = dev->agp->base + hws.addr;
dev_priv->hws_map.size = 4*1024;
dev_priv->hws_map.type = 0;
dev_priv->hws_map.flags = 0;
diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h
index ee2b474b..1a2220a5 100644
--- a/shared-core/i915_drv.h
+++ b/shared-core/i915_drv.h
@@ -39,6 +39,11 @@
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20070209"
+#if defined(__linux__)
+#define I915_HAVE_FENCE
+#define I915_HAVE_BUFFER
+#endif
+
/* Interface history:
*
* 1.1: Original.
@@ -52,13 +57,12 @@
* 1.9: Usable page flipping and triple buffering
*/
#define DRIVER_MAJOR 1
+#if defined(I915_HAVE_FENCE) && defined(I915_HAVE_BUFFER)
#define DRIVER_MINOR 9
-#define DRIVER_PATCHLEVEL 0
-
-#if defined(__linux__)
-#define I915_HAVE_FENCE
-#define I915_HAVE_BUFFER
+#else
+#define DRIVER_MINOR 6
#endif
+#define DRIVER_PATCHLEVEL 0
typedef struct _drm_i915_ring_buffer {
int tail_mask;
@@ -114,7 +118,7 @@ typedef struct drm_i915_private {
struct mem_block *agp_heap;
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int vblank_pipe;
- spinlock_t user_irq_lock;
+ DRM_SPINTYPE user_irq_lock;
int user_irq_refcount;
int fence_irq_on;
uint32_t irq_enable_reg;
@@ -129,7 +133,7 @@ typedef struct drm_i915_private {
#ifdef I915_HAVE_BUFFER
void *agp_iomap;
#endif
- spinlock_t swaps_lock;
+ DRM_SPINTYPE swaps_lock;
drm_i915_vbl_swap_t vbl_swaps;
unsigned int swaps_pending;
} drm_i915_private_t;
diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c
index 17cccac3..698ecced 100644
--- a/shared-core/i915_irq.c
+++ b/shared-core/i915_irq.c
@@ -51,6 +51,8 @@ i915_dispatch_vsync_flip(struct drm_device *dev, struct drm_drawable_info *drw,
u16 x1, y1, x2, y2;
int pf_pipes = 1 << pipe;
+ DRM_SPINLOCK_ASSERT(&dev->drw_lock);
+
/* If the window is visible on the other pipe, we have to flip on that
* pipe as well.
*/
@@ -90,7 +92,6 @@ i915_dispatch_vsync_flip(struct drm_device *dev, struct drm_drawable_info *drw,
static void i915_vblank_tasklet(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long irqflags;
struct list_head *list, *tmp, hits, *hit;
int nhits, nrects, slice[2], upper[2], lower[2], i, num_pages;
unsigned counter[2] = { atomic_read(&dev->vbl_received),
@@ -112,7 +113,12 @@ static void i915_vblank_tasklet(struct drm_device *dev)
nhits = nrects = 0;
- spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
+ /* No irqsave/restore necessary. This tasklet may be run in an
+ * interrupt context or normal context, but we don't have to worry
+ * about getting interrupted by something acquiring the lock, because
+ * we are the interrupt context thing that acquires the lock.
+ */
+ DRM_SPINLOCK(&dev_priv->swaps_lock);
/* Find buffer swaps scheduled for this vertical blank */
list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
@@ -125,15 +131,15 @@ static void i915_vblank_tasklet(struct drm_device *dev)
list_del(list);
dev_priv->swaps_pending--;
- spin_unlock(&dev_priv->swaps_lock);
- spin_lock(&dev->drw_lock);
+ DRM_SPINUNLOCK(&dev_priv->swaps_lock);
+ DRM_SPINLOCK(&dev->drw_lock);
drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
if (!drw) {
- spin_unlock(&dev->drw_lock);
+ DRM_SPINUNLOCK(&dev->drw_lock);
drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
- spin_lock(&dev_priv->swaps_lock);
+ DRM_SPINLOCK(&dev_priv->swaps_lock);
continue;
}
@@ -150,7 +156,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
}
}
- spin_unlock(&dev->drw_lock);
+ DRM_SPINUNLOCK(&dev->drw_lock);
/* List of hits was empty, or we reached the end of it */
if (hit == &hits)
@@ -158,16 +164,15 @@ static void i915_vblank_tasklet(struct drm_device *dev)
nhits++;
- spin_lock(&dev_priv->swaps_lock);
+ DRM_SPINLOCK(&dev_priv->swaps_lock);
}
+ DRM_SPINUNLOCK(&dev->drw_lock);
+
if (nhits == 0) {
- spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
return;
}
- spin_unlock(&dev_priv->swaps_lock);
-
i915_kernel_lost_context(dev);
upper[0] = upper[1] = 0;
@@ -181,7 +186,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
offsets[2] = sarea_priv->third_offset;
num_pages = sarea_priv->third_handle ? 3 : 2;
- spin_lock(&dev->drw_lock);
+ DRM_SPINLOCK(&dev->drw_lock);
/* Emit blits for buffer swaps, partitioning both outputs into as many
* slices as there are buffer swaps scheduled in order to avoid tearing
@@ -263,7 +268,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
}
}
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+ DRM_SPINUNLOCK(&dev->drw_lock);
list_for_each_safe(hit, tmp, &hits) {
drm_i915_vbl_swap_t *swap_hit =
@@ -363,23 +368,23 @@ int i915_emit_irq(struct drm_device * dev)
void i915_user_irq_on(drm_i915_private_t *dev_priv)
{
- spin_lock(&dev_priv->user_irq_lock);
+ DRM_SPINLOCK(&dev_priv->user_irq_lock);
if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1)){
dev_priv->irq_enable_reg |= USER_INT_FLAG;
I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
}
- spin_unlock(&dev_priv->user_irq_lock);
+ DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
}
void i915_user_irq_off(drm_i915_private_t *dev_priv)
{
- spin_lock(&dev_priv->user_irq_lock);
+ DRM_SPINLOCK(&dev_priv->user_irq_lock);
if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
// dev_priv->irq_enable_reg &= ~USER_INT_FLAG;
// I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
}
- spin_unlock(&dev_priv->user_irq_lock);
+ DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
}
@@ -598,16 +603,6 @@ int i915_vblank_swap(DRM_IOCTL_ARGS)
return DRM_ERR(EINVAL);
}
- spin_lock_irqsave(&dev->drw_lock, irqflags);
-
- if (!drm_get_drawable_info(dev, swap.drawable)) {
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
- DRM_DEBUG("Invalid drawable ID %d\n", swap.drawable);
- return DRM_ERR(EINVAL);
- }
-
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
-
curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received);
if (seqtype == _DRM_VBLANK_RELATIVE)
@@ -630,12 +625,13 @@ int i915_vblank_swap(DRM_IOCTL_ARGS)
LOCK_TEST_WITH_RETURN(dev, filp);
- spin_lock_irqsave(&dev->drw_lock, irqflags);
+ DRM_SPINLOCK_IRQSAVE(&dev->drw_lock, irqflags);
drw = drm_get_drawable_info(dev, swap.drawable);
if (!drw) {
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+ DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock,
+ irqflags);
DRM_DEBUG("Invalid drawable ID %d\n",
swap.drawable);
return DRM_ERR(EINVAL);
@@ -643,13 +639,13 @@ int i915_vblank_swap(DRM_IOCTL_ARGS)
i915_dispatch_vsync_flip(dev, drw, pipe);
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+ DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags);
return 0;
}
}
- spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
+ DRM_SPINLOCK_IRQSAVE(&dev_priv->swaps_lock, irqflags);
list_for_each(list, &dev_priv->vbl_swaps.head) {
vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
@@ -658,13 +654,13 @@ int i915_vblank_swap(DRM_IOCTL_ARGS)
vbl_swap->pipe == pipe &&
vbl_swap->sequence == swap.sequence) {
vbl_swap->flip = (swap.seqtype & _DRM_VBLANK_FLIP);
- spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
+ DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags);
DRM_DEBUG("Already scheduled\n");
return 0;
}
}
- spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
+ DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags);
if (dev_priv->swaps_pending >= 100) {
DRM_DEBUG("Too many swaps queued\n");
@@ -688,12 +684,12 @@ int i915_vblank_swap(DRM_IOCTL_ARGS)
if (vbl_swap->flip)
swap.sequence++;
- spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
+ DRM_SPINLOCK_IRQSAVE(&dev_priv->swaps_lock, irqflags);
list_add_tail((struct list_head *)vbl_swap, &dev_priv->vbl_swaps.head);
dev_priv->swaps_pending++;
- spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
+ DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags);
DRM_COPY_TO_USER_IOCTL((drm_i915_vblank_swap_t __user *) data, swap,
sizeof(swap));
@@ -716,11 +712,11 @@ void i915_driver_irq_postinstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- spin_lock_init(&dev_priv->swaps_lock);
+ DRM_SPININIT(&dev_priv->swaps_lock, "swap");
INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
dev_priv->swaps_pending = 0;
- spin_lock_init(&dev_priv->user_irq_lock);
+ DRM_SPININIT(&dev_priv->user_irq_lock, "userirq");
dev_priv->user_irq_refcount = 0;
i915_enable_interrupt(dev);