summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2008-10-16 10:50:31 +1000
committerDave Airlie <airlied@redhat.com>2008-10-16 10:50:31 +1000
commit11320fd6b106c1255f3fad0860cb4da71697b46a (patch)
treebc8d822776f0de14c268eb854b237704f2f6075b
parentfc33686ef044a4a59d48da2a648a0c2d0a1a7fd6 (diff)
drm: add discardable flag.
This discards memory contents on suspend/resume with the hope the upper layers know something we don't.
-rw-r--r--linux-core/drm_bo.c13
-rw-r--r--linux-core/drm_objects.h6
2 files changed, 16 insertions, 3 deletions
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c
index 94a81559..93df229f 100644
--- a/linux-core/drm_bo.c
+++ b/linux-core/drm_bo.c
@@ -2098,22 +2098,29 @@ void drm_bo_evict_mm(struct drm_device *dev, int mem_type, int no_wait)
struct list_head *lru;
int ret;
/* evict all buffers on the LRU - won't evict pinned buffers */
-
+
+ drm_mm_dump(&man->manager);
mutex_lock(&dev->struct_mutex);
do {
lru = &man->lru;
- if (lru->next == lru) {
+redo:
+ if (lru->next == &man->lru) {
DRM_ERROR("lru empty\n");
break;
}
entry = list_entry(lru->next, struct drm_buffer_object, lru);
+
+ if (entry->mem.flags & DRM_BO_FLAG_DISCARDABLE) {
+ lru = lru->next;
+ goto redo;
+ }
+
atomic_inc(&entry->usage);
mutex_unlock(&dev->struct_mutex);
mutex_lock(&entry->mutex);
- DRM_ERROR("Evicting %p %d\n", entry, entry->num_pages);
ret = drm_bo_evict(entry, mem_type, no_wait);
mutex_unlock(&entry->mutex);
diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h
index acb10f96..0c8ffe92 100644
--- a/linux-core/drm_objects.h
+++ b/linux-core/drm_objects.h
@@ -117,6 +117,12 @@ struct drm_fence_arg {
*/
#define DRM_BO_FLAG_NO_MOVE (1ULL << 8)
+/*
+ * Mask: if set the note the buffer contents are discardable
+ * Flags: if set the buffer contents are discardable on migration
+ */
+#define DRM_BO_FLAG_DISCARDABLE (1ULL << 9)
+
/* Mask: Make sure the buffer is in cached memory when mapped. In conjunction
* with DRM_BO_FLAG_CACHED it also allows the buffer to be bound into the GART
* with unsnooped PTEs instead of snooped, by using chipset-specific cache