summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Hellstrom <thomas-at-tungstengraphics-dot-com>2006-09-12 12:01:00 +0200
committerThomas Hellstrom <thomas-at-tungstengraphics-dot-com>2006-09-12 12:01:00 +0200
commit191e284709ee792a32124e96e43d5876406b93dc (patch)
treee6fdf405683aab861f1daa82a403062681d6875c
parent99acb7936660843090ea8a9f22d2d50d9433e0de (diff)
More bugfixes.
Disable the i915 IRQ turnoff for now since it seems to be causing problems.
-rw-r--r--libdrm/xf86drm.c70
-rw-r--r--libdrm/xf86drm.h4
-rw-r--r--libdrm/xf86mm.h3
-rw-r--r--linux-core/drmP.h3
-rw-r--r--linux-core/drm_bo.c26
-rw-r--r--linux-core/drm_fence.c7
-rw-r--r--linux-core/drm_lock.c2
-rw-r--r--linux-core/drm_ttm.c16
-rw-r--r--linux-core/drm_vm.c9
-rw-r--r--shared-core/drm.h1
-rw-r--r--shared-core/i915_drm.h8
-rw-r--r--shared-core/i915_irq.c8
12 files changed, 115 insertions, 42 deletions
diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c
index 298b812a..2ea6656f 100644
--- a/libdrm/xf86drm.c
+++ b/libdrm/xf86drm.c
@@ -2256,6 +2256,7 @@ int drmFenceCreate(int fd, int shareable, int class,unsigned type,
fence->handle = arg.handle;
fence->class = arg.class;
fence->type = arg.type;
+ fence->flags = arg.flags;
fence->signaled = 0;
return 0;
}
@@ -2271,6 +2272,7 @@ int drmFenceBuffers(int fd, int shareable, drmFence *fence)
fence->handle = arg.handle;
fence->class = arg.class;
fence->type = arg.type;
+ fence->flags = arg.flags;
fence->signaled = 0;
return 0;
}
@@ -2297,6 +2299,7 @@ int drmFenceReference(int fd, unsigned handle, drmFence *fence)
fence->handle = arg.handle;
fence->class = arg.class;
fence->type = arg.type;
+ fence->flags = 0;
fence->signaled = arg.signaled;
return 0;
}
@@ -2327,20 +2330,41 @@ int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type)
return 0;
}
-int drmFenceSignaled(int fd, drmFence *fence)
+int drmFenceUpdate(int fd, drmFence *fence)
{
- drm_fence_arg_t arg;
-
- arg.handle = fence->handle;
- arg.op = drm_fence_signaled;
- if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
- return -errno;
- fence->class = arg.class;
- fence->type = arg.type;
- fence->signaled = arg.signaled;
+ drm_fence_arg_t arg;
+
+ arg.handle = fence->handle;
+ arg.op = drm_fence_signaled;
+ if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
+ return -errno;
+ fence->class = arg.class;
+ fence->type = arg.type;
+ fence->signaled = arg.signaled;
+ return 0;
+}
+
+int drmFenceSignaled(int fd, drmFence *fence, unsigned fenceType,
+ int *signaled)
+{
+ int
+ ret;
+
+ if ((fence->flags & DRM_FENCE_FLAG_SHAREABLE) ||
+ ((fenceType & fence->signaled) != fenceType)) {
+
+ ret = drmFenceFlush(fd, fence, fenceType);
+ if (ret)
+ return ret;
+ }
+
+ *signaled = ((fenceType & fence->signaled) == fenceType);
+
return 0;
}
+
+
int drmFenceEmit(int fd, drmFence *fence, unsigned emit_type)
{
drm_fence_arg_t arg;
@@ -2362,6 +2386,12 @@ int drmFenceWait(int fd, drmFence *fence, unsigned flush_type,
drm_fence_arg_t arg;
int ret;
+ if (!(fence->flags & DRM_FENCE_FLAG_SHAREABLE)) {
+ if (flush_type & fence->signaled == flush_type) {
+ return 0;
+ }
+ }
+
arg.handle = fence->handle;
arg.type = flush_type;
arg.flags = (lazy) ? DRM_FENCE_FLAG_WAIT_LAZY : 0;
@@ -2942,8 +2972,6 @@ int drmAddValidateItem(drmBOList *list, drmBO *buf, unsigned flags,
*newItem = 0;
cur = NULL;
- mask &= ~DRM_BO_MASK_MEM;
-
for (l = list->list.next; l != &list->list; l = l->next) {
node = DRMLISTENTRY(drmBONode, l, head);
if (node->buf == buf) {
@@ -2961,20 +2989,22 @@ int drmAddValidateItem(drmBOList *list, drmBO *buf, unsigned flags,
cur->arg0 = flags;
cur->arg1 = mask;
} else {
- unsigned memFlags = cur->arg0 & DRM_BO_MASK_MEM;
+ unsigned memMask = (cur->arg1 | mask) & DRM_BO_MASK_MEM;
+ unsigned memFlags = cur->arg0 & flags & memMask;
- if (!(memFlags & flags)) {
+ if (!memFlags) {
drmMsg("Incompatible memory location requests "
"on validate list.\n");
return -EINVAL;
}
- if ((cur->arg1 | mask) & (cur->arg0 ^ flags)) {
+ if ((cur->arg1 | mask) & ~DRM_BO_MASK_MEM & (cur->arg0 ^ flags)) {
drmMsg("Incompatible buffer flag requests "
" on validate list.\n");
return -EINVAL;
}
cur->arg1 |= mask;
- cur->arg0 = (memFlags & flags) | ((cur->arg0 | flags) & cur->arg1);
+ cur->arg0 = memFlags | ((cur->arg0 | flags) &
+ cur->arg1 & ~DRM_BO_MASK_MEM);
}
return 0;
}
@@ -3116,16 +3146,18 @@ int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle)
}
int drmMMInit(int fd, unsigned long vramPOffset, unsigned long vramPSize,
- unsigned long ttPOffset, unsigned long ttPSize)
+ unsigned long ttPOffset, unsigned long ttPSize,
+ unsigned long max_locked_size)
{
drm_mm_init_arg_t arg;
-
+
arg.req.op = mm_init;
arg.req.vr_p_offset = vramPOffset;
arg.req.vr_p_size = vramPSize;
arg.req.tt_p_offset = ttPOffset;
arg.req.tt_p_size = ttPSize;
-
+ arg.req.max_locked_pages = max_locked_size / getpagesize();
+
if (ioctl(fd, DRM_IOCTL_MM_INIT, &arg))
return -errno;
diff --git a/libdrm/xf86drm.h b/libdrm/xf86drm.h
index be1eeeff..1b136d31 100644
--- a/libdrm/xf86drm.h
+++ b/libdrm/xf86drm.h
@@ -284,6 +284,7 @@ typedef struct _drmFence{
unsigned handle;
int class;
unsigned type;
+ unsigned flags;
unsigned signaled;
} drmFence;
@@ -622,7 +623,8 @@ extern int drmFenceDestroy(int fd, const drmFence *fence);
extern int drmFenceReference(int fd, unsigned handle, drmFence *fence);
extern int drmFenceUnreference(int fd, const drmFence *fence);
extern int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type);
-extern int drmFenceSignaled(int fd, drmFence *fence);
+extern int drmFenceSignaled(int fd, drmFence *fence,
+ unsigned fenceType, int *signaled);
extern int drmFenceWait(int fd, drmFence *fence, unsigned flush_type,
int lazy, int ignore_signals);
extern int drmFenceEmit(int fd, drmFence *fence, unsigned emit_type);
diff --git a/libdrm/xf86mm.h b/libdrm/xf86mm.h
index 43ea71b8..24e28245 100644
--- a/libdrm/xf86mm.h
+++ b/libdrm/xf86mm.h
@@ -185,7 +185,8 @@ extern int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle);
*/
extern int drmMMInit(int fd, unsigned long vramPOffset, unsigned long vramPSize,
- unsigned long ttPOffset, unsigned long ttPSize);
+ unsigned long ttPOffset, unsigned long ttPSize,
+ unsigned long max_locked_size);
extern int drmMMTakedown(int fd);
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index da14bdfd..835b295a 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -790,6 +790,7 @@ typedef struct drm_fence_manager{
typedef struct drm_buffer_manager{
int initialized;
+ drm_file_t *last_to_validate;
int has_vram;
int has_tt;
int use_vram;
@@ -803,6 +804,8 @@ typedef struct drm_buffer_manager{
struct list_head other;
struct work_struct wq;
uint32_t fence_flags;
+ unsigned long max_pages;
+ unsigned long cur_pages;
} drm_buffer_manager_t;
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c
index 74722b1b..3a9c2313 100644
--- a/linux-core/drm_bo.c
+++ b/linux-core/drm_bo.c
@@ -95,11 +95,11 @@ static void drm_bo_destroy_locked(drm_device_t * dev, drm_buffer_object_t * bo)
if (bo->fence) {
if (!drm_fence_object_signaled(bo->fence, bo->fence_flags)) {
+
drm_fence_object_flush(dev, bo->fence, bo->fence_flags);
list_add_tail(&bo->ddestroy, &bm->ddestroy);
-
- schedule_delayed_work(&bm->wq, 2);
-
+ schedule_delayed_work(&bm->wq,
+ ((DRM_HZ/100) < 1) ? 1 : DRM_HZ/100);
return;
} else {
drm_fence_usage_deref_locked(dev, bo->fence);
@@ -113,7 +113,7 @@ static void drm_bo_destroy_locked(drm_device_t * dev, drm_buffer_object_t * bo)
list_del_init(&bo->head);
if (bo->tt) {
- drm_unbind_ttm_region(bo->ttm_region);
+ drm_unbind_ttm_region(bo->ttm_region);
drm_mm_put_block(&bm->tt_manager, bo->tt);
bo->tt = NULL;
}
@@ -170,7 +170,7 @@ static void drm_bo_delayed_workqueue(void *data)
drm_bo_delayed_delete(dev);
mutex_lock(&dev->struct_mutex);
if (!list_empty(&bm->ddestroy)) {
- schedule_delayed_work(&bm->wq, 2);
+ schedule_delayed_work(&bm->wq, ((DRM_HZ/100) < 1) ? 1 : DRM_HZ/100);
}
mutex_unlock(&dev->struct_mutex);
}
@@ -822,6 +822,11 @@ static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
while (1) {
if (atomic_inc_and_test(&bo->mapped)) {
+ if (no_wait && drm_bo_busy(bo)) {
+ atomic_dec(&bo->mapped);
+ ret = -EBUSY;
+ goto out;
+ }
ret = drm_bo_wait(bo, 0, 0, no_wait);
if (ret) {
atomic_dec(&bo->mapped);
@@ -1174,9 +1179,8 @@ static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo,
bo->ttm_object = to;
ttm = drm_ttm_from_object(to);
ret = drm_create_ttm_region(ttm, bo->buffer_start >> PAGE_SHIFT,
- bo->num_pages, 0,
-
- /* bo->mask & DRM_BO_FLAG_BIND_CACHED,*/
+ bo->num_pages,
+ bo->mask & DRM_BO_FLAG_BIND_CACHED,
&bo->ttm_region);
if (ret) {
drm_ttm_object_deref_unlocked(dev, to);
@@ -1383,6 +1387,7 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS)
break;
case drm_bo_validate:
rep.ret = drm_bo_lock_test(dev, filp);
+
if (rep.ret)
break;
rep.ret =
@@ -1571,13 +1576,16 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
bm->initialized = 1;
-
+ bm->cur_pages = 0;
+ bm->max_pages = arg.req.max_locked_pages;
break;
case mm_takedown:
if (drm_bo_clean_mm(dev)) {
DRM_ERROR("Memory manager not clean. "
"Delaying takedown\n");
}
+ DRM_DEBUG("We have %ld still locked pages\n",
+ bm->cur_pages);
break;
default:
DRM_ERROR("Function not implemented yet\n");
diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c
index eaaf7f40..df5db702 100644
--- a/linux-core/drm_fence.c
+++ b/linux-core/drm_fence.c
@@ -359,12 +359,11 @@ int drm_fence_object_wait(drm_device_t * dev, drm_fence_object_t * fence,
fence_signaled(dev, fence, mask, 1));
if (time_after_eq(jiffies, _end))
break;
- } while (ret == -EINTR && ignore_signals);
-
+ } while (ret == -EINTR && ignore_signals);
if (time_after_eq(jiffies, _end) && (ret != 0))
ret = -EBUSY;
- return ret;
-
+ if (ret)
+ return ((ret == -EINTR) ? -EAGAIN : ret);
} else {
int signaled;
do {
diff --git a/linux-core/drm_lock.c b/linux-core/drm_lock.c
index 91fad8bf..69ce2291 100644
--- a/linux-core/drm_lock.c
+++ b/linux-core/drm_lock.c
@@ -270,7 +270,7 @@ int drm_lock_free(drm_device_t * dev,
prev = cmpxchg(lock, old, new);
} while (prev != old);
if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
- DRM_ERROR("%d freed heavyweight lock held by %d\n",
+ DRM_DEBUG("%d freed heavyweight lock held by %d\n",
context, _DRM_LOCKING_CONTEXT(old));
return 1;
}
diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c
index a83d6401..8aba36ca 100644
--- a/linux-core/drm_ttm.c
+++ b/linux-core/drm_ttm.c
@@ -251,19 +251,24 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
}
if (ttm->pages) {
+ drm_buffer_manager_t *bm = &ttm->dev->bm;
+ int do_tlbflush = 0;
for (i = 0; i < ttm->num_pages; ++i) {
cur_page = ttm->pages + i;
if (ttm->page_flags &&
(ttm->page_flags[i] & DRM_TTM_PAGE_UNCACHED) &&
*cur_page && !PageHighMem(*cur_page)) {
change_page_attr(*cur_page, 1, PAGE_KERNEL);
+ do_tlbflush = 1;
}
if (*cur_page) {
ClearPageReserved(*cur_page);
__free_page(*cur_page);
+ --bm->cur_pages;
}
}
- global_flush_tlb();
+ if (do_tlbflush)
+ global_flush_tlb();
ttm_free(ttm->pages, ttm->num_pages*sizeof(*ttm->pages),
DRM_MEM_TTM, ttm->pages_vmalloc);
ttm->pages = NULL;
@@ -308,6 +313,7 @@ static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size)
if (!ttm)
return NULL;
+ ttm->dev = dev;
ttm->lhandle = 0;
atomic_set(&ttm->vma_count, 0);
@@ -354,7 +360,6 @@ static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size)
INIT_LIST_HEAD(&ttm->vma_list->head);
ttm->lhandle = (unsigned long)ttm;
- ttm->dev = dev;
return ttm;
}
@@ -562,6 +567,7 @@ int drm_create_ttm_region(drm_ttm_t * ttm, unsigned long page_offset,
drm_ttm_backend_list_t *entry;
drm_ttm_backend_t *be;
int ret, i;
+ drm_buffer_manager_t *bm = &ttm->dev->bm;
if ((page_offset + n_pages) > ttm->num_pages || n_pages == 0) {
DRM_ERROR("Region Doesn't fit ttm\n");
@@ -602,6 +608,11 @@ int drm_create_ttm_region(drm_ttm_t * ttm, unsigned long page_offset,
for (i = 0; i < entry->num_pages; ++i) {
cur_page = ttm->pages + (page_offset + i);
if (!*cur_page) {
+ if (bm->cur_pages >= bm->max_pages) {
+ DRM_ERROR("Maximum locked page count exceeded\n");
+ drm_destroy_ttm_region(entry);
+ return -ENOMEM;
+ }
*cur_page = alloc_page(GFP_KERNEL);
if (!*cur_page) {
DRM_ERROR("Page allocation failed\n");
@@ -609,6 +620,7 @@ int drm_create_ttm_region(drm_ttm_t * ttm, unsigned long page_offset,
return -ENOMEM;
}
SetPageReserved(*cur_page);
+ ++bm->cur_pages;
}
}
diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c
index 69391058..e7b808cc 100644
--- a/linux-core/drm_vm.c
+++ b/linux-core/drm_vm.c
@@ -253,6 +253,7 @@ static __inline__ struct page *drm_do_vm_ttm_nopage(struct vm_area_struct *vma,
drm_ttm_t *ttm;
pgprot_t default_prot;
uint32_t page_flags;
+ drm_buffer_manager_t *bm;
if (address > vma->vm_end)
return NOPAGE_SIGBUS; /* Disallow mremap */
@@ -261,12 +262,18 @@ static __inline__ struct page *drm_do_vm_ttm_nopage(struct vm_area_struct *vma,
map = (drm_map_t *) entry->map;
ttm = (drm_ttm_t *) map->offset;
+ bm = &ttm->dev->bm;
page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
page = ttm->pages[page_offset];
page_flags = ttm->page_flags[page_offset];
if (!page) {
+ if (bm->cur_pages >= bm->max_pages) {
+ DRM_ERROR("Maximum locked page count exceeded\n");
+ return NOPAGE_OOM;
+ }
+ ++bm->cur_pages;
page = ttm->pages[page_offset] =
alloc_page(GFP_KERNEL);
SetPageReserved(page);
@@ -274,8 +281,6 @@ static __inline__ struct page *drm_do_vm_ttm_nopage(struct vm_area_struct *vma,
if (!page)
return NOPAGE_OOM;
- get_page(page);
-
default_prot = vm_get_page_prot(vma->vm_flags);
BUG_ON(page_flags & DRM_TTM_PAGE_UNCACHED);
diff --git a/shared-core/drm.h b/shared-core/drm.h
index bed55173..d10b713b 100644
--- a/shared-core/drm.h
+++ b/shared-core/drm.h
@@ -808,6 +808,7 @@ typedef union drm_mm_init_arg{
drm_u64_t vr_p_size;
drm_u64_t tt_p_offset;
drm_u64_t tt_p_size;
+ drm_u64_t max_locked_pages;
} req;
struct {
drm_handle_t mm_sarea;
diff --git a/shared-core/i915_drm.h b/shared-core/i915_drm.h
index fcffb25c..1a79d403 100644
--- a/shared-core/i915_drm.h
+++ b/shared-core/i915_drm.h
@@ -106,6 +106,14 @@ typedef struct _drm_i915_sarea {
unsigned int rotated2_tiled;
} drm_i915_sarea_t;
+/* Driver specific fence types and classes.
+ */
+
+/* The only fence class we support */
+#define DRM_I915_FENCE_CLASS_ACCEL 0
+/* Fence type that guarantees read-write flush */
+#define DRM_I915_FENCE_TYPE_RW 2
+
/* Flags for perf_boxes
*/
#define I915_BOX_RING_EMPTY 0x1
diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c
index 4a1b2987..8f3e79de 100644
--- a/shared-core/i915_irq.c
+++ b/shared-core/i915_irq.c
@@ -47,8 +47,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG);
+#if 0
DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
-
+#endif
if (temp == 0)
return IRQ_NONE;
@@ -104,7 +105,7 @@ int i915_emit_irq(drm_device_t * dev)
void i915_user_irq_on(drm_i915_private_t *dev_priv)
{
-
+ return;
spin_lock(&dev_priv->user_irq_lock);
if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1)){
dev_priv->irq_enable_reg |= USER_INT_FLAG;
@@ -116,6 +117,7 @@ void i915_user_irq_on(drm_i915_private_t *dev_priv)
void i915_user_irq_off(drm_i915_private_t *dev_priv)
{
+ return;
spin_lock(&dev_priv->user_irq_lock);
if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
dev_priv->irq_enable_reg &= ~USER_INT_FLAG;
@@ -239,7 +241,7 @@ static int i915_enable_interrupt (drm_device_t *dev)
dev_priv->user_irq_lock = SPIN_LOCK_UNLOCKED;
dev_priv->user_irq_refcount = 0;
dev_priv->irq_enable_reg = flag;
- I915_WRITE16(I915REG_INT_ENABLE_R, flag);
+ I915_WRITE16(I915REG_INT_ENABLE_R, flag | USER_INT_FLAG);
dev_priv->irq_enabled = 1;
return 0;
}