summaryrefslogtreecommitdiff
path: root/linux-core/drm_objects.h
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2008-01-25 15:27:53 +1000
committerDave Airlie <airlied@redhat.com>2008-01-25 15:27:53 +1000
commite7a41d7f5be49241480a20eb733262712e0f8dcb (patch)
tree801c7eadf769db981dfb8a900a24ef48542958a6 /linux-core/drm_objects.h
parentfb9ea12438de95a6ac085879e079055eaea3daf8 (diff)
parentbfdddd218ec3e7ce3f8e765b93af35661a7bf0fd (diff)
Merge remote branch 'origin/master' into modesetting-101
Conflicts: linux-core/drm_bo.c linux-core/drm_drv.c shared-core/drm.h shared-core/i915_dma.c shared-core/i915_drv.h shared-core/i915_irq.c shared-core/radeon_irq.c
Diffstat (limited to 'linux-core/drm_objects.h')
-rw-r--r--linux-core/drm_objects.h127
1 files changed, 110 insertions, 17 deletions
diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h
index 394a2721..53fad3af 100644
--- a/linux-core/drm_objects.h
+++ b/linux-core/drm_objects.h
@@ -262,7 +262,8 @@ struct drm_ttm_backend;
struct drm_ttm_backend_func {
int (*needs_ub_cache_adjust) (struct drm_ttm_backend *backend);
int (*populate) (struct drm_ttm_backend *backend,
- unsigned long num_pages, struct page **pages);
+ unsigned long num_pages, struct page **pages,
+ struct page *dummy_read_page);
void (*clear) (struct drm_ttm_backend *backend);
int (*bind) (struct drm_ttm_backend *backend,
struct drm_bo_mem_reg *bo_mem);
@@ -296,8 +297,10 @@ struct drm_ttm {
};
-extern struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size);
-extern int drm_bind_ttm(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem);
+extern struct drm_ttm *drm_ttm_create(struct drm_device *dev, unsigned long size,
+ uint32_t page_flags,
+ struct page *dummy_read_page);
+extern int drm_ttm_bind(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem);
extern void drm_ttm_unbind(struct drm_ttm *ttm);
extern void drm_ttm_evict(struct drm_ttm *ttm);
extern void drm_ttm_fixup_caching(struct drm_ttm *ttm);
@@ -306,10 +309,8 @@ extern void drm_ttm_cache_flush(void);
extern int drm_ttm_populate(struct drm_ttm *ttm);
extern int drm_ttm_set_user(struct drm_ttm *ttm,
struct task_struct *tsk,
- int write,
unsigned long start,
- unsigned long num_pages,
- struct page *dummy_read_page);
+ unsigned long num_pages);
/*
* Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do
@@ -317,7 +318,7 @@ extern int drm_ttm_set_user(struct drm_ttm *ttm,
* Otherwise it is called when the last vma exits.
*/
-extern int drm_destroy_ttm(struct drm_ttm *ttm);
+extern int drm_ttm_destroy(struct drm_ttm *ttm);
#define DRM_FLAG_MASKED(_old, _new, _mask) {\
(_old) ^= (((_old) ^ (_new)) & (_mask)); \
@@ -330,14 +331,47 @@ extern int drm_destroy_ttm(struct drm_ttm *ttm);
* Page flags.
*/
+/*
+ * This ttm should not be cached by the CPU
+ */
#define DRM_TTM_PAGE_UNCACHED (1 << 0)
+/*
+ * This flat is not used at this time; I don't know what the
+ * intent was
+ */
#define DRM_TTM_PAGE_USED (1 << 1)
+/*
+ * This flat is not used at this time; I don't know what the
+ * intent was
+ */
#define DRM_TTM_PAGE_BOUND (1 << 2)
+/*
+ * This flat is not used at this time; I don't know what the
+ * intent was
+ */
#define DRM_TTM_PAGE_PRESENT (1 << 3)
+/*
+ * The array of page pointers was allocated with vmalloc
+ * instead of drm_calloc.
+ */
#define DRM_TTM_PAGE_VMALLOC (1 << 4)
+/*
+ * This ttm is mapped from user space
+ */
#define DRM_TTM_PAGE_USER (1 << 5)
-#define DRM_TTM_PAGE_USER_WRITE (1 << 6)
+/*
+ * This ttm will be written to by the GPU
+ */
+#define DRM_TTM_PAGE_WRITE (1 << 6)
+/*
+ * This ttm was mapped to the GPU, and so the contents may have
+ * been modified
+ */
#define DRM_TTM_PAGE_USER_DIRTY (1 << 7)
+/*
+ * This flag is not used at this time; I don't know what the
+ * intent was.
+ */
#define DRM_TTM_PAGE_USER_DMA (1 << 8)
/***************************************************
@@ -350,16 +384,50 @@ struct drm_bo_mem_reg {
unsigned long num_pages;
uint32_t page_alignment;
uint32_t mem_type;
+ /*
+ * Current buffer status flags, indicating
+ * where the buffer is located and which
+ * access modes are in effect
+ */
uint64_t flags;
- uint64_t mask;
+ /**
+ * These are the flags proposed for
+ * a validate operation. If the
+ * validate succeeds, they'll get moved
+ * into the flags field
+ */
+ uint64_t proposed_flags;
+
uint32_t desired_tile_stride;
uint32_t hw_tile_stride;
};
enum drm_bo_type {
- drm_bo_type_dc,
+ /*
+ * drm_bo_type_device are 'normal' drm allocations,
+ * pages are allocated from within the kernel automatically
+ * and the objects can be mmap'd from the drm device. Each
+ * drm_bo_type_device object has a unique name which can be
+ * used by other processes to share access to the underlying
+ * buffer.
+ */
+ drm_bo_type_device,
+ /*
+ * drm_bo_type_user are buffers of pages that already exist
+ * in the process address space. They are more limited than
+ * drm_bo_type_device buffers in that they must always
+ * remain cached (as we assume the user pages are mapped cached),
+ * and they are not sharable to other processes through DRM
+ * (although, regular shared memory should still work fine).
+ */
drm_bo_type_user,
- drm_bo_type_kernel, /* for initial kernel allocations */
+ /*
+ * drm_bo_type_kernel are buffers that exist solely for use
+ * within the kernel. The pages cannot be mapped into the
+ * process. One obvious use would be for the ring
+ * buffer where user access would not (ideally) be required.
+ */
+ drm_bo_type_kernel,
};
struct drm_buffer_object {
@@ -476,9 +544,36 @@ struct drm_bo_driver {
int (*invalidate_caches) (struct drm_device *dev, uint64_t flags);
int (*init_mem_type) (struct drm_device *dev, uint32_t type,
struct drm_mem_type_manager *man);
- uint32_t(*evict_mask) (struct drm_buffer_object *bo);
+ /*
+ * evict_flags:
+ *
+ * @bo: the buffer object to be evicted
+ *
+ * Return the bo flags for a buffer which is not mapped to the hardware.
+ * These will be placed in proposed_flags so that when the move is
+ * finished, they'll end up in bo->mem.flags
+ */
+ uint64_t(*evict_flags) (struct drm_buffer_object *bo);
+ /*
+ * move:
+ *
+ * @bo: the buffer to move
+ *
+ * @evict: whether this motion is evicting the buffer from
+ * the graphics address space
+ *
+ * @no_wait: whether this should give up and return -EBUSY
+ * if this move would require sleeping
+ *
+ * @new_mem: the new memory region receiving the buffer
+ *
+ * Move a buffer between two memory regions.
+ */
int (*move) (struct drm_buffer_object *bo,
int evict, int no_wait, struct drm_bo_mem_reg *new_mem);
+ /*
+ * ttm_cache_flush
+ */
void (*ttm_cache_flush)(struct drm_ttm *ttm);
};
@@ -519,7 +614,7 @@ extern int drm_fence_buffer_objects(struct drm_device *dev,
struct drm_fence_object **used_fence);
extern void drm_bo_add_to_lru(struct drm_buffer_object *bo);
extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size,
- enum drm_bo_type type, uint64_t mask,
+ enum drm_bo_type type, uint64_t flags,
uint32_t hint, uint32_t page_alignment,
unsigned long buffer_start,
struct drm_buffer_object **bo);
@@ -534,9 +629,8 @@ extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type);
extern int drm_bo_init_mm(struct drm_device *dev, unsigned type,
unsigned long p_offset, unsigned long p_size);
extern int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
- uint32_t fence_class, uint64_t flags,
- uint64_t mask, uint32_t hint,
- int use_old_fence_class,
+ uint64_t flags, uint64_t mask, uint32_t hint,
+ uint32_t fence_class, int use_old_fence_class,
struct drm_bo_info_rep *rep,
struct drm_buffer_object **bo_rep);
extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
@@ -545,7 +639,6 @@ extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_
extern int drm_bo_do_validate(struct drm_buffer_object *bo,
uint64_t flags, uint64_t mask, uint32_t hint,
uint32_t fence_class,
- int no_wait,
struct drm_bo_info_rep *rep);
/*