summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKeith Packard <keithp@keithp.com>2008-05-06 14:43:49 -0700
committerKeith Packard <keithp@keithp.com>2008-05-06 14:43:49 -0700
commit631e86c5c4ad9b2cdd40749ea3b351204a362c80 (patch)
treed7c7aff2bafaca4a86ea09f64ff7c7c539dbfd0f
parent8551bfc6dba03dcd9d182b2099a0906153ecfa01 (diff)
Start coding up memory domains
-rw-r--r--linux-core/drmP.h24
-rw-r--r--linux-core/drm_gem.c38
-rw-r--r--shared-core/drm.h29
-rw-r--r--shared-core/i915_drm.h27
4 files changed, 110 insertions, 8 deletions
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index ffeafc18..cdeecc30 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -643,6 +643,15 @@ struct drm_gem_object {
*/
int name;
+ /**
+ * Memory domains. These monitor which caches contain read/write data
+ * related to the object. When transitioning from one set of domains
+ * to another, the driver is called to ensure that caches are suitably
+ * flushed and invalidated
+ */
+ uint32_t read_domains;
+ uint32_t write_domain;
+
void *driver_private;
};
@@ -942,6 +951,8 @@ struct drm_device {
spinlock_t object_name_lock;
struct idr object_name_idr;
atomic_t object_count;
+ uint32_t invalidate_domains; /* domains pending invalidation */
+ uint32_t flush_domains; /* domains pending flush */
/*@} */
};
@@ -1321,6 +1332,7 @@ static inline struct drm_memrange *drm_get_mm(struct drm_memrange_node *block)
return block->mm;
}
+/* Graphics Execution Manager library functions (drm_gem.c) */
int
drm_gem_init (struct drm_device *dev);
@@ -1330,7 +1342,6 @@ drm_gem_object_free (struct kref *kref);
void
drm_gem_object_handle_free (struct kref *kref);
-/* Graphics Execution Manager library functions (drm_gem.c) */
static inline void drm_gem_object_reference(struct drm_gem_object *obj)
{
kref_get(&obj->refcount);
@@ -1385,6 +1396,17 @@ int drm_gem_open_ioctl(struct drm_device *dev, void *data,
void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
+
+/*
+ * Given the new read/write domains for an object,
+ * compute the invalidate/flush domains for the whole device.
+ *
+ */
+int drm_gem_object_set_domain (struct drm_gem_object *object,
+ uint32_t read_domains,
+ uint32_t write_domains);
+
+
extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
diff --git a/linux-core/drm_gem.c b/linux-core/drm_gem.c
index 80d5a350..929c008f 100644
--- a/linux-core/drm_gem.c
+++ b/linux-core/drm_gem.c
@@ -539,3 +539,41 @@ drm_gem_object_handle_free (struct kref *kref)
}
EXPORT_SYMBOL(drm_gem_object_handle_free);
+
+/*
+ * Set the next domain for the specified object. This
+ * may not actually perform the necessary flushing/invaliding though,
+ * as that may want to be batched with other set_domain operations
+ */
+int drm_gem_object_set_domain (struct drm_gem_object *obj,
+ uint32_t read_domains,
+ uint32_t write_domain)
+{
+ struct drm_device *dev = obj->dev;
+ uint32_t invalidate_domains = 0;
+ uint32_t flush_domains = 0;
+
+ /*
+ * Flush the current write domain if
+ * the new read domains don't match. Invalidate
+ * any read domains which differ from the old
+ * write domain
+ */
+ if (obj->write_domain && obj->write_domain != read_domains)
+ {
+ flush_domains |= obj->write_domain;
+ invalidate_domains |= read_domains & ~obj->write_domain;
+ }
+ /*
+ * Invalidate any read caches which may have
+ * stale data. That is, any new read domains.
+ */
+ invalidate_domains |= read_domains & ~obj->read_domains;
+ obj->write_domain = write_domain;
+ obj->read_domain = read_domains;
+ if ((flush_domains | invalidate_domains) & DRM_GEM_DOMAIN_CPU)
+ drm_gem_object_clflush (obj);
+ dev->invalidate_domains |= invalidate_domains & ~DRM_GEM_DOMAIN_CPU;
+ dev->flush_domains |= flush_domains & ~DRM_GEM_DOMAIN_CPU;
+}
+EXPORT_SYMBOL(drm_gem_object_set_domain);
diff --git a/shared-core/drm.h b/shared-core/drm.h
index 0e8da7b9..f1430f59 100644
--- a/shared-core/drm.h
+++ b/shared-core/drm.h
@@ -1042,6 +1042,19 @@ struct drm_gem_open {
uint64_t size;
};
+struct drm_gem_set_domain {
+ /** Handle for the object */
+ uint32_t handle;
+
+ /** New read domains */
+ uint32_t read_domains;
+
+ /** New write domain */
+ uint32_t write_domain;
+};
+
+#define DRM_GEM_DOMAIN_CPU 0x00000001
+
/**
* \name Ioctls Definitions
*/
@@ -1062,13 +1075,6 @@ struct drm_gem_open {
#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
-#define DRM_IOCTL_GEM_ALLOC DRM_IOWR(0x09, struct drm_gem_alloc)
-#define DRM_IOCTL_GEM_UNREFERENCE DRM_IOW(0x0a, struct drm_gem_unreference)
-#define DRM_IOCTL_GEM_PREAD DRM_IOW(0x0b, struct drm_gem_pread)
-#define DRM_IOCTL_GEM_PWRITE DRM_IOW(0x0c, struct drm_gem_pwrite)
-#define DRM_IOCTL_GEM_MMAP DRM_IOWR(0x0d, struct drm_gem_mmap)
-#define DRM_IOCTL_GEM_NAME DRM_IOWR(0x0e, struct drm_gem_name)
-#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0f, struct drm_gem_open)
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
@@ -1117,6 +1123,15 @@ struct drm_gem_open {
#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
+#define DRM_IOCTL_GEM_ALLOC DRM_IOWR(0x09, struct drm_gem_alloc)
+#define DRM_IOCTL_GEM_UNREFERENCE DRM_IOW (0x0a, struct drm_gem_unreference)
+#define DRM_IOCTL_GEM_PREAD DRM_IOW (0x0b, struct drm_gem_pread)
+#define DRM_IOCTL_GEM_PWRITE DRM_IOW (0x0c, struct drm_gem_pwrite)
+#define DRM_IOCTL_GEM_MMAP DRM_IOWR(0x0d, struct drm_gem_mmap)
+#define DRM_IOCTL_GEM_NAME DRM_IOWR(0x0e, struct drm_gem_name)
+#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0f, struct drm_gem_open)
+#define DRM_IOCTL_GEM_SET_DOMAIN DRM_IOW (0xb7, struct drm_gem_set_domain)
+
#define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, struct drm_mm_init_arg)
#define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, struct drm_mm_type_arg)
#define DRM_IOCTL_MM_LOCK DRM_IOWR(0xc2, struct drm_mm_type_arg)
diff --git a/shared-core/i915_drm.h b/shared-core/i915_drm.h
index 0c64e866..302fc646 100644
--- a/shared-core/i915_drm.h
+++ b/shared-core/i915_drm.h
@@ -445,6 +445,28 @@ struct drm_i915_gem_relocation_entry {
uint64_t presumed_offset;
};
+/**
+ * Intel memory domains
+ *
+ * Most of these just align with the various caches in
+ * the system and are used to flush and invalidate as
+ * objects end up cached in different domains.
+ *
+ * STOLEN is a domain for the stolen memory portion of the
+ * address space; those pages are accessible only through the
+ * GTT and, hence, look a lot like VRAM on a discrete card.
+ * We'll allow programs to move objects into stolen memory
+ * mostly as a way to demonstrate the VRAM capabilities of this
+ * API
+ */
+
+/* 0x00000001 is DRM_GEM_DOMAIN_CPU */
+#define DRM_GEM_DOMAIN_I915_RENDER 0x00000002 /* Render cache, used by 2D and 3D drawing */
+#define DRM_GEM_DOMAIN_I915_SAMPLER 0x00000004 /* Sampler cache, used by texture engine */
+#define DRM_GEM_DOMAIN_I915_COMMAND 0x00000008 /* Command queue, used to load batch buffers */
+#define DRM_GEM_DOMAIN_I915_INSTRUCTION 0x00000010 /* Instruction cache, used by shader programs */
+#define DRM_GEM_DOMAIN_I915_STOLEN 0x00000020 /* Stolen memory, needed by some objects */
+
struct drm_i915_gem_validate_entry {
/**
* User's handle for a buffer to be bound into the GTT for this
@@ -458,6 +480,11 @@ struct drm_i915_gem_validate_entry {
/** Required alignment in graphics aperture */
uint64_t alignment;
+
+ /** Memory domains used in this execbuffer run */
+ uint32_t read_domains;
+ uint32_t write_domain;
+
/**
* Returned value of the updated offset of the buffer, for future
* presumed_offset writes.