summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKeith Packard <keithp@keithp.com>2008-08-05 14:44:53 -0700
committerKeith Packard <keithp@keithp.com>2008-08-05 14:44:53 -0700
commitceb3d5e3834452f9d54f974b8066f90168467443 (patch)
treefc04302cf176bf9fe059c2ab5c68c9ec70f3f6db
parent8e41ce17b4ab72f526cc6e9acd75c3fa81a60433 (diff)
[gem-intel] Don't clear write_domain until flush completes
In i915_gem_object_wait_rendering, if the object write domain is being written by the GPU, the appropriate flushing commands are written to the device and an additional request queued to mark that flush. Finally, the function blocks on that new request. The bug was that the write_domain in the object was cleared before the function blocked. If the wait is interrupted by a signal, the flushing commands may still be pending. With the current write_domain information lost, the restarted syscall will drop right through the write_domain test as that value was lost, and so the function will not block at all. Oops. Fixed by simply moving the write_domain clear until after the wait_request succeeds. Note that the restarted system call will generate an additional flush sequence and request, but that should be 'harmless', aside from a slight performance impact. Someday we'll track flushing more accurately and clear write_domains more efficiently, but for now, this should suffice. This bug was discovered in the 2d gem development by running x11perf -copypixwin500 and noticing that the window got cleared accidentally.
-rw-r--r--linux-core/i915_gem.c22
1 files changed, 17 insertions, 5 deletions
diff --git a/linux-core/i915_gem.c b/linux-core/i915_gem.c
index 70f11515..acded2e8 100644
--- a/linux-core/i915_gem.c
+++ b/linux-core/i915_gem.c
@@ -381,6 +381,10 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
return -EBADF;
mutex_lock(&dev->struct_mutex);
+#if WATCH_BUF
+ DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
+ obj, obj->size, args->read_domains, args->write_domain);
+#endif
ret = i915_gem_set_domain(obj, file_priv,
args->read_domains, args->write_domain);
drm_gem_object_unreference(obj);
@@ -411,8 +415,8 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
}
#if WATCH_BUF
- DRM_INFO("%s: sw_finish %d (%p)\n",
- __func__, args->handle, obj);
+ DRM_INFO("%s: sw_finish %d (%p %d)\n",
+ __func__, args->handle, obj, obj->size);
#endif
obj_priv = obj->driver_private;
@@ -853,18 +857,18 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int ret;
+ uint32_t write_domain;
/* If there are writes queued to the buffer, flush and
* create a new seqno to wait for.
*/
- if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
- uint32_t write_domain = obj->write_domain;
+ write_domain = obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT);
+ if (write_domain) {
#if WATCH_BUF
DRM_INFO("%s: flushing object %p from write domain %08x\n",
__func__, obj, write_domain);
#endif
i915_gem_flush(dev, 0, write_domain);
- obj->write_domain = 0;
i915_gem_object_move_to_active(obj);
obj_priv->last_rendering_seqno = i915_add_request(dev,
@@ -874,6 +878,7 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
#endif
}
+
/* If there is rendering queued on the buffer being evicted, wait for
* it.
*/
@@ -885,6 +890,13 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
if (ret != 0)
return ret;
+ if (write_domain) {
+#if WATCH_BUF
+ DRM_INFO("%s: flushed object %p from write domain %08x\n",
+ __func__, obj, write_domain);
+#endif
+ obj->write_domain = 0;
+ }
}
return 0;