summaryrefslogtreecommitdiff
path: root/libdrm
diff options
context:
space:
mode:
Diffstat (limited to 'libdrm')
-rw-r--r--libdrm/intel/intel_bufmgr.c81
-rw-r--r--libdrm/intel/intel_bufmgr.h186
-rw-r--r--libdrm/intel/intel_bufmgr_fake.c421
-rw-r--r--libdrm/intel/intel_bufmgr_gem.c558
-rw-r--r--libdrm/intel/intel_bufmgr_priv.h70
5 files changed, 832 insertions, 484 deletions
diff --git a/libdrm/intel/intel_bufmgr.c b/libdrm/intel/intel_bufmgr.c
index fc7284b5..188eac22 100644
--- a/libdrm/intel/intel_bufmgr.c
+++ b/libdrm/intel/intel_bufmgr.c
@@ -39,26 +39,26 @@
#include "intel_bufmgr.h"
#include "intel_bufmgr_priv.h"
-/** @file dri_bufmgr.c
+/** @file intel_bufmgr.c
*
* Convenience functions for buffer management methods.
*/
-dri_bo *
-dri_bo_alloc(dri_bufmgr *bufmgr, const char *name, unsigned long size,
- unsigned int alignment)
+drm_intel_bo *
+drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
+ unsigned long size, unsigned int alignment)
{
return bufmgr->bo_alloc(bufmgr, name, size, alignment);
}
void
-dri_bo_reference(dri_bo *bo)
+drm_intel_bo_reference(drm_intel_bo *bo)
{
bo->bufmgr->bo_reference(bo);
}
void
-dri_bo_unreference(dri_bo *bo)
+drm_intel_bo_unreference(drm_intel_bo *bo)
{
if (bo == NULL)
return;
@@ -67,38 +67,39 @@ dri_bo_unreference(dri_bo *bo)
}
int
-dri_bo_map(dri_bo *buf, int write_enable)
+drm_intel_bo_map(drm_intel_bo *buf, int write_enable)
{
return buf->bufmgr->bo_map(buf, write_enable);
}
int
-dri_bo_unmap(dri_bo *buf)
+drm_intel_bo_unmap(drm_intel_bo *buf)
{
return buf->bufmgr->bo_unmap(buf);
}
int
-dri_bo_subdata(dri_bo *bo, unsigned long offset,
- unsigned long size, const void *data)
+drm_intel_bo_subdata(drm_intel_bo *bo, unsigned long offset,
+ unsigned long size, const void *data)
{
int ret;
+
if (bo->bufmgr->bo_subdata)
return bo->bufmgr->bo_subdata(bo, offset, size, data);
if (size == 0 || data == NULL)
return 0;
- ret = dri_bo_map(bo, 1);
+ ret = drm_intel_bo_map(bo, 1);
if (ret)
return ret;
memcpy((unsigned char *)bo->virtual + offset, data, size);
- dri_bo_unmap(bo);
+ drm_intel_bo_unmap(bo);
return 0;
}
int
-dri_bo_get_subdata(dri_bo *bo, unsigned long offset,
- unsigned long size, void *data)
+drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
+ unsigned long size, void *data)
{
int ret;
if (bo->bufmgr->bo_subdata)
@@ -107,48 +108,48 @@ dri_bo_get_subdata(dri_bo *bo, unsigned long offset,
if (size == 0 || data == NULL)
return 0;
- ret = dri_bo_map(bo, 0);
+ ret = drm_intel_bo_map(bo, 0);
if (ret)
return ret;
memcpy(data, (unsigned char *)bo->virtual + offset, size);
- dri_bo_unmap(bo);
+ drm_intel_bo_unmap(bo);
return 0;
}
void
-dri_bo_wait_rendering(dri_bo *bo)
+drm_intel_bo_wait_rendering(drm_intel_bo *bo)
{
bo->bufmgr->bo_wait_rendering(bo);
}
void
-dri_bufmgr_destroy(dri_bufmgr *bufmgr)
+drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr)
{
bufmgr->destroy(bufmgr);
}
int
-dri_bo_exec(dri_bo *bo, int used,
- drm_clip_rect_t *cliprects, int num_cliprects,
- int DR4)
+drm_intel_bo_exec(drm_intel_bo *bo, int used,
+ drm_clip_rect_t *cliprects, int num_cliprects,
+ int DR4)
{
return bo->bufmgr->bo_exec(bo, used, cliprects, num_cliprects, DR4);
}
void
-dri_bufmgr_set_debug(dri_bufmgr *bufmgr, int enable_debug)
+drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug)
{
bufmgr->debug = enable_debug;
}
int
-dri_bufmgr_check_aperture_space(dri_bo **bo_array, int count)
+drm_intel_bufmgr_check_aperture_space(drm_intel_bo **bo_array, int count)
{
return bo_array[0]->bufmgr->check_aperture_space(bo_array, count);
}
int
-dri_bo_flink(dri_bo *bo, uint32_t *name)
+drm_intel_bo_flink(drm_intel_bo *bo, uint32_t *name)
{
if (bo->bufmgr->bo_flink)
return bo->bufmgr->bo_flink(bo, name);
@@ -157,17 +158,17 @@ dri_bo_flink(dri_bo *bo, uint32_t *name)
}
int
-dri_bo_emit_reloc(dri_bo *reloc_buf,
- uint32_t read_domains, uint32_t write_domain,
- uint32_t delta, uint32_t offset, dri_bo *target_buf)
+drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
+ drm_intel_bo *target_bo, uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain)
{
- return reloc_buf->bufmgr->bo_emit_reloc(reloc_buf,
- read_domains, write_domain,
- delta, offset, target_buf);
+ return bo->bufmgr->bo_emit_reloc(bo, offset,
+ target_bo, target_offset,
+ read_domains, write_domain);
}
int
-dri_bo_pin(dri_bo *bo, uint32_t alignment)
+drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment)
{
if (bo->bufmgr->bo_pin)
return bo->bufmgr->bo_pin(bo, alignment);
@@ -176,7 +177,7 @@ dri_bo_pin(dri_bo *bo, uint32_t alignment)
}
int
-dri_bo_unpin(dri_bo *bo)
+drm_intel_bo_unpin(drm_intel_bo *bo)
{
if (bo->bufmgr->bo_unpin)
return bo->bufmgr->bo_unpin(bo);
@@ -184,11 +185,23 @@ dri_bo_unpin(dri_bo *bo)
return -ENODEV;
}
-int dri_bo_set_tiling(dri_bo *bo, uint32_t *tiling_mode)
+int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
+ uint32_t stride)
{
if (bo->bufmgr->bo_set_tiling)
- return bo->bufmgr->bo_set_tiling(bo, tiling_mode);
+ return bo->bufmgr->bo_set_tiling(bo, tiling_mode, stride);
+
+ *tiling_mode = I915_TILING_NONE;
+ return 0;
+}
+
+int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
+ uint32_t *swizzle_mode)
+{
+ if (bo->bufmgr->bo_get_tiling)
+ return bo->bufmgr->bo_get_tiling(bo, tiling_mode, swizzle_mode);
*tiling_mode = I915_TILING_NONE;
+ *swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
return 0;
}
diff --git a/libdrm/intel/intel_bufmgr.h b/libdrm/intel/intel_bufmgr.h
index 67dba6a1..1f7f73a4 100644
--- a/libdrm/intel/intel_bufmgr.h
+++ b/libdrm/intel/intel_bufmgr.h
@@ -36,10 +36,10 @@
#include <stdint.h>
-typedef struct _dri_bufmgr dri_bufmgr;
-typedef struct _dri_bo dri_bo;
+typedef struct _drm_intel_bufmgr drm_intel_bufmgr;
+typedef struct _drm_intel_bo drm_intel_bo;
-struct _dri_bo {
+struct _drm_intel_bo {
/**
* Size in bytes of the buffer object.
*
@@ -48,6 +48,13 @@ struct _dri_bo {
*/
unsigned long size;
/**
+ * Alignment requirement for object
+ *
+ * Used for GTT mapping & pinning the object.
+ */
+ unsigned long align;
+
+ /**
* Card virtual address (offset from the beginning of the aperture) for the
* object. Only valid while validated.
*/
@@ -58,74 +65,123 @@ struct _dri_bo {
void *virtual;
/** Buffer manager context associated with this buffer object */
- dri_bufmgr *bufmgr;
+ drm_intel_bufmgr *bufmgr;
+
/**
* MM-specific handle for accessing object
*/
int handle;
};
-dri_bo *dri_bo_alloc(dri_bufmgr *bufmgr, const char *name, unsigned long size,
- unsigned int alignment);
-void dri_bo_reference(dri_bo *bo);
-void dri_bo_unreference(dri_bo *bo);
-int dri_bo_map(dri_bo *buf, int write_enable);
-int dri_bo_unmap(dri_bo *buf);
-
-int dri_bo_subdata(dri_bo *bo, unsigned long offset,
- unsigned long size, const void *data);
-int dri_bo_get_subdata(dri_bo *bo, unsigned long offset,
- unsigned long size, void *data);
-void dri_bo_wait_rendering(dri_bo *bo);
-
-void dri_bufmgr_set_debug(dri_bufmgr *bufmgr, int enable_debug);
-void dri_bufmgr_destroy(dri_bufmgr *bufmgr);
-int dri_bo_exec(dri_bo *bo, int used,
- drm_clip_rect_t *cliprects, int num_cliprects,
- int DR4);
-int dri_bufmgr_check_aperture_space(dri_bo **bo_array, int count);
-
-int dri_bo_emit_reloc(dri_bo *reloc_buf,
- uint32_t read_domains, uint32_t write_domain,
- uint32_t delta, uint32_t offset, dri_bo *target_buf);
-int dri_bo_pin(dri_bo *buf, uint32_t alignment);
-int dri_bo_unpin(dri_bo *buf);
-int dri_bo_set_tiling(dri_bo *buf, uint32_t *tiling_mode);
-int dri_bo_flink(dri_bo *buf, uint32_t *name);
-
-/* intel_bufmgr_gem.c */
-dri_bufmgr *intel_bufmgr_gem_init(int fd, int batch_size);
-dri_bo *intel_bo_gem_create_from_name(dri_bufmgr *bufmgr, const char *name,
- unsigned int handle);
-void intel_bufmgr_gem_enable_reuse(dri_bufmgr *bufmgr);
-
-/* intel_bufmgr_fake.c */
-dri_bufmgr *intel_bufmgr_fake_init(int fd,
- unsigned long low_offset, void *low_virtual,
- unsigned long size,
- volatile unsigned int *last_dispatch);
-void intel_bufmgr_fake_set_last_dispatch(dri_bufmgr *bufmgr,
- volatile unsigned int *last_dispatch);
-void intel_bufmgr_fake_set_exec_callback(dri_bufmgr *bufmgr,
- int (*exec)(dri_bo *bo,
- unsigned int used,
- void *priv),
- void *priv);
-void intel_bufmgr_fake_set_fence_callback(dri_bufmgr *bufmgr,
- unsigned int (*emit)(void *priv),
- void (*wait)(unsigned int fence,
- void *priv),
- void *priv);
-dri_bo *intel_bo_fake_alloc_static(dri_bufmgr *bufmgr, const char *name,
- unsigned long offset, unsigned long size,
- void *virtual);
-void intel_bo_fake_disable_backing_store(dri_bo *bo,
- void (*invalidate_cb)(dri_bo *bo,
- void *ptr),
- void *ptr);
-
-void intel_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr);
-void intel_bufmgr_fake_evict_all(dri_bufmgr *bufmgr);
+drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
+ unsigned long size, unsigned int alignment);
+void drm_intel_bo_reference(drm_intel_bo *bo);
+void drm_intel_bo_unreference(drm_intel_bo *bo);
+int drm_intel_bo_map(drm_intel_bo *bo, int write_enable);
+int drm_intel_bo_unmap(drm_intel_bo *bo);
+
+int drm_intel_bo_subdata(drm_intel_bo *bo, unsigned long offset,
+ unsigned long size, const void *data);
+int drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
+ unsigned long size, void *data);
+void drm_intel_bo_wait_rendering(drm_intel_bo *bo);
+
+void drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug);
+void drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr);
+int drm_intel_bo_exec(drm_intel_bo *bo, int used,
+ drm_clip_rect_t *cliprects, int num_cliprects,
+ int DR4);
+int drm_intel_bufmgr_check_aperture_space(drm_intel_bo **bo_array, int count);
+
+int drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
+ drm_intel_bo *target_bo, uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain);
+int drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment);
+int drm_intel_bo_unpin(drm_intel_bo *bo);
+int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
+ uint32_t stride);
+int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
+ uint32_t *swizzle_mode);
+int drm_intel_bo_flink(drm_intel_bo *bo, uint32_t *name);
+
+/* drm_intel_bufmgr_gem.c */
+drm_intel_bufmgr *drm_intel_bufmgr_gem_init(int fd, int batch_size);
+drm_intel_bo *drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ unsigned int handle);
+void drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr);
+int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo);
+
+/* drm_intel_bufmgr_fake.c */
+drm_intel_bufmgr *drm_intel_bufmgr_fake_init(int fd,
+ unsigned long low_offset,
+ void *low_virtual,
+ unsigned long size,
+ volatile unsigned int *last_dispatch);
+void drm_intel_bufmgr_fake_set_last_dispatch(drm_intel_bufmgr *bufmgr,
+ volatile unsigned int *last_dispatch);
+void drm_intel_bufmgr_fake_set_exec_callback(drm_intel_bufmgr *bufmgr,
+ int (*exec)(drm_intel_bo *bo,
+ unsigned int used,
+ void *priv),
+ void *priv);
+void drm_intel_bufmgr_fake_set_fence_callback(drm_intel_bufmgr *bufmgr,
+ unsigned int (*emit)(void *priv),
+ void (*wait)(unsigned int fence,
+ void *priv),
+ void *priv);
+drm_intel_bo *drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ unsigned long offset, unsigned long size,
+ void *virtual);
+void drm_intel_bo_fake_disable_backing_store(drm_intel_bo *bo,
+ void (*invalidate_cb)(drm_intel_bo *bo,
+ void *ptr),
+ void *ptr);
+
+void drm_intel_bufmgr_fake_contended_lock_take(drm_intel_bufmgr *bufmgr);
+void drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr);
+
+/** @{ Compatibility defines to keep old code building despite the symbol rename
+ * from dri_* to drm_intel_*
+ */
+#define dri_bo drm_intel_bo
+#define dri_bufmgr drm_intel_bufmgr
+#define dri_bo_alloc drm_intel_bo_alloc
+#define dri_bo_reference drm_intel_bo_reference
+#define dri_bo_unreference drm_intel_bo_unreference
+#define dri_bo_map drm_intel_bo_map
+#define dri_bo_unmap drm_intel_bo_unmap
+#define dri_bo_subdata drm_intel_bo_subdata
+#define dri_bo_get_subdata drm_intel_bo_get_subdata
+#define dri_bo_wait_rendering drm_intel_bo_wait_rendering
+#define dri_bufmgr_set_debug drm_intel_bufmgr_set_debug
+#define dri_bufmgr_destroy drm_intel_bufmgr_destroy
+#define dri_bo_exec drm_intel_bo_exec
+#define dri_bufmgr_check_aperture_space drm_intel_bufmgr_check_aperture_space
+#define dri_bo_emit_reloc(reloc_bo, read, write, target_offset, \
+ reloc_offset, target_bo) \
+ drm_intel_bo_emit_reloc(reloc_bo, reloc_offset, \
+ target_bo, target_offset, \
+ read, write);
+#define dri_bo_pin drm_intel_bo_pin
+#define dri_bo_unpin drm_intel_bo_unpin
+#define dri_bo_get_tiling drm_intel_bo_get_tiling
+#define dri_bo_set_tiling(bo, mode) drm_intel_bo_set_tiling(bo, mode, 0)
+#define dri_bo_flink drm_intel_bo_flink
+#define intel_bufmgr_gem_init drm_intel_bufmgr_gem_init
+#define intel_bo_gem_create_from_name drm_intel_bo_gem_create_from_name
+#define intel_bufmgr_gem_enable_reuse drm_intel_bufmgr_gem_enable_reuse
+#define intel_bufmgr_fake_init drm_intel_bufmgr_fake_init
+#define intel_bufmgr_fake_set_last_dispatch drm_intel_bufmgr_fake_set_last_dispatch
+#define intel_bufmgr_fake_set_exec_callback drm_intel_bufmgr_fake_set_exec_callback
+#define intel_bufmgr_fake_set_fence_callback drm_intel_bufmgr_fake_set_fence_callback
+#define intel_bo_fake_alloc_static drm_intel_bo_fake_alloc_static
+#define intel_bo_fake_disable_backing_store drm_intel_bo_fake_disable_backing_store
+#define intel_bufmgr_fake_contended_lock_take drm_intel_bufmgr_fake_contended_lock_take
+#define intel_bufmgr_fake_evict_all drm_intel_bufmgr_fake_evict_all
+
+/** @{ */
#endif /* INTEL_BUFMGR_H */
diff --git a/libdrm/intel/intel_bufmgr_fake.c b/libdrm/intel/intel_bufmgr_fake.c
index 4c281467..0e465303 100644
--- a/libdrm/intel/intel_bufmgr_fake.c
+++ b/libdrm/intel/intel_bufmgr_fake.c
@@ -75,7 +75,7 @@
struct fake_buffer_reloc
{
/** Buffer object that the relocation points at. */
- dri_bo *target_buf;
+ drm_intel_bo *target_buf;
/** Offset of the relocation entry within reloc_buf. */
uint32_t offset;
/** Cached value of the offset when we last performed this relocation. */
@@ -106,12 +106,12 @@ struct block {
/** Fence cookie for the block. */
unsigned fence; /* Split to read_fence, write_fence */
- dri_bo *bo;
+ drm_intel_bo *bo;
void *virtual;
};
typedef struct _bufmgr_fake {
- dri_bufmgr bufmgr;
+ drm_intel_bufmgr bufmgr;
pthread_mutex_t lock;
@@ -163,7 +163,7 @@ typedef struct _bufmgr_fake {
* This allows the driver to hook in a replacement for the DRM usage in
* bufmgr_fake.
*/
- int (*exec)(dri_bo *bo, unsigned int used, void *priv);
+ int (*exec)(drm_intel_bo *bo, unsigned int used, void *priv);
void *exec_priv;
/** Driver-supplied argument to driver callbacks */
@@ -176,10 +176,10 @@ typedef struct _bufmgr_fake {
int debug;
int performed_rendering;
-} dri_bufmgr_fake;
+} drm_intel_bufmgr_fake;
-typedef struct _dri_bo_fake {
- dri_bo bo;
+typedef struct _drm_intel_bo_fake {
+ drm_intel_bo bo;
unsigned id; /* debug only */
const char *name;
@@ -214,11 +214,11 @@ typedef struct _dri_bo_fake {
struct block *block;
void *backing_store;
- void (*invalidate_cb)(dri_bo *bo, void *ptr);
+ void (*invalidate_cb)(drm_intel_bo *bo, void *ptr);
void *invalidate_ptr;
-} dri_bo_fake;
+} drm_intel_bo_fake;
-static int clear_fenced(dri_bufmgr_fake *bufmgr_fake,
+static int clear_fenced(drm_intel_bufmgr_fake *bufmgr_fake,
unsigned int fence_cookie);
#define MAXFENCE 0x7fffffff
@@ -237,13 +237,13 @@ static int FENCE_LTE( unsigned a, unsigned b )
return 0;
}
-void intel_bufmgr_fake_set_fence_callback(dri_bufmgr *bufmgr,
- unsigned int (*emit)(void *priv),
- void (*wait)(unsigned int fence,
- void *priv),
- void *priv)
+void drm_intel_bufmgr_fake_set_fence_callback(drm_intel_bufmgr *bufmgr,
+ unsigned int (*emit)(void *priv),
+ void (*wait)(unsigned int fence,
+ void *priv),
+ void *priv)
{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr;
bufmgr_fake->fence_emit = emit;
bufmgr_fake->fence_wait = wait;
@@ -251,13 +251,15 @@ void intel_bufmgr_fake_set_fence_callback(dri_bufmgr *bufmgr,
}
static unsigned int
-_fence_emit_internal(dri_bufmgr_fake *bufmgr_fake)
+_fence_emit_internal(drm_intel_bufmgr_fake *bufmgr_fake)
{
struct drm_i915_irq_emit ie;
int ret, seq = 1;
- if (bufmgr_fake->fence_emit != NULL)
- return bufmgr_fake->fence_emit(bufmgr_fake->fence_priv);
+ if (bufmgr_fake->fence_emit != NULL) {
+ seq = bufmgr_fake->fence_emit(bufmgr_fake->fence_priv);
+ return seq;
+ }
ie.irq_seq = &seq;
ret = drmCommandWriteRead(bufmgr_fake->fd, DRM_I915_IRQ_EMIT,
@@ -268,12 +270,11 @@ _fence_emit_internal(dri_bufmgr_fake *bufmgr_fake)
}
DBG("emit 0x%08x\n", seq);
- bufmgr_fake->last_fence = seq;
- return bufmgr_fake->last_fence;
+ return seq;
}
static void
-_fence_wait_internal(dri_bufmgr_fake *bufmgr_fake, int seq)
+_fence_wait_internal(drm_intel_bufmgr_fake *bufmgr_fake, int seq)
{
struct drm_i915_irq_wait iw;
int hw_seq, busy_count = 0;
@@ -282,6 +283,7 @@ _fence_wait_internal(dri_bufmgr_fake *bufmgr_fake, int seq)
if (bufmgr_fake->fence_wait != NULL) {
bufmgr_fake->fence_wait(seq, bufmgr_fake->fence_priv);
+ clear_fenced(bufmgr_fake, seq);
return;
}
@@ -395,7 +397,7 @@ _fence_wait_internal(dri_bufmgr_fake *bufmgr_fake, int seq)
}
static int
-_fence_test(dri_bufmgr_fake *bufmgr_fake, unsigned fence)
+_fence_test(drm_intel_bufmgr_fake *bufmgr_fake, unsigned fence)
{
/* Slight problem with wrap-around:
*/
@@ -406,10 +408,10 @@ _fence_test(dri_bufmgr_fake *bufmgr_fake, unsigned fence)
* Allocate a memory manager block for the buffer.
*/
static int
-alloc_block(dri_bo *bo)
+alloc_block(drm_intel_bo *bo)
{
- dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
- dri_bufmgr_fake *bufmgr_fake= (dri_bufmgr_fake *)bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
+ drm_intel_bufmgr_fake *bufmgr_fake= (drm_intel_bufmgr_fake *)bo->bufmgr;
struct block *block = (struct block *)calloc(sizeof *block, 1);
unsigned int align_log2 = ffs(bo_fake->alignment) - 1;
unsigned int sz;
@@ -442,18 +444,18 @@ alloc_block(dri_bo *bo)
/* Release the card storage associated with buf:
*/
-static void free_block(dri_bufmgr_fake *bufmgr_fake, struct block *block)
+static void free_block(drm_intel_bufmgr_fake *bufmgr_fake, struct block *block)
{
- dri_bo_fake *bo_fake;
+ drm_intel_bo_fake *bo_fake;
DBG("free block %p %08x %d %d\n", block, block->mem->ofs, block->on_hardware, block->fenced);
if (!block)
return;
- bo_fake = (dri_bo_fake *)block->bo;
- if (!(bo_fake->flags & BM_NO_BACKING_STORE) && (bo_fake->card_dirty == 1)) {
+ bo_fake = (drm_intel_bo_fake *)block->bo;
+ if (!(bo_fake->flags & (BM_PINNED | BM_NO_BACKING_STORE)) && (bo_fake->card_dirty == 1)) {
memcpy(bo_fake->backing_store, block->virtual, block->bo->size);
- bo_fake->card_dirty = 1;
+ bo_fake->card_dirty = 0;
bo_fake->dirty = 1;
}
@@ -473,10 +475,10 @@ static void free_block(dri_bufmgr_fake *bufmgr_fake, struct block *block)
}
static void
-alloc_backing_store(dri_bo *bo)
+alloc_backing_store(drm_intel_bo *bo)
{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
- dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
assert(!bo_fake->backing_store);
assert(!(bo_fake->flags & (BM_PINNED|BM_NO_BACKING_STORE)));
@@ -487,9 +489,9 @@ alloc_backing_store(dri_bo *bo)
}
static void
-free_backing_store(dri_bo *bo)
+free_backing_store(drm_intel_bo *bo)
{
- dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
if (bo_fake->backing_store) {
assert(!(bo_fake->flags & (BM_PINNED|BM_NO_BACKING_STORE)));
@@ -499,10 +501,10 @@ free_backing_store(dri_bo *bo)
}
static void
-set_dirty(dri_bo *bo)
+set_dirty(drm_intel_bo *bo)
{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
- dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
if (bo_fake->flags & BM_NO_BACKING_STORE && bo_fake->invalidate_cb != NULL)
bo_fake->invalidate_cb(bo, bo_fake->invalidate_ptr);
@@ -514,14 +516,14 @@ set_dirty(dri_bo *bo)
}
static int
-evict_lru(dri_bufmgr_fake *bufmgr_fake, unsigned int max_fence)
+evict_lru(drm_intel_bufmgr_fake *bufmgr_fake, unsigned int max_fence)
{
struct block *block, *tmp;
DBG("%s\n", __FUNCTION__);
DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
- dri_bo_fake *bo_fake = (dri_bo_fake *)block->bo;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)block->bo;
if (bo_fake != NULL && (bo_fake->flags & BM_NO_FENCE_SUBDATA))
continue;
@@ -540,14 +542,14 @@ evict_lru(dri_bufmgr_fake *bufmgr_fake, unsigned int max_fence)
}
static int
-evict_mru(dri_bufmgr_fake *bufmgr_fake)
+evict_mru(drm_intel_bufmgr_fake *bufmgr_fake)
{
struct block *block, *tmp;
DBG("%s\n", __FUNCTION__);
DRMLISTFOREACHSAFEREVERSE(block, tmp, &bufmgr_fake->lru) {
- dri_bo_fake *bo_fake = (dri_bo_fake *)block->bo;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)block->bo;
if (bo_fake && (bo_fake->flags & BM_NO_FENCE_SUBDATA))
continue;
@@ -565,12 +567,13 @@ evict_mru(dri_bufmgr_fake *bufmgr_fake)
/**
* Removes all objects from the fenced list older than the given fence.
*/
-static int clear_fenced(dri_bufmgr_fake *bufmgr_fake,
+static int clear_fenced(drm_intel_bufmgr_fake *bufmgr_fake,
unsigned int fence_cookie)
{
struct block *block, *tmp;
int ret = 0;
+ bufmgr_fake->last_fence = fence_cookie;
DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->fenced) {
assert(block->fenced);
@@ -608,7 +611,7 @@ static int clear_fenced(dri_bufmgr_fake *bufmgr_fake,
return ret;
}
-static void fence_blocks(dri_bufmgr_fake *bufmgr_fake, unsigned fence)
+static void fence_blocks(drm_intel_bufmgr_fake *bufmgr_fake, unsigned fence)
{
struct block *block, *tmp;
@@ -629,10 +632,10 @@ static void fence_blocks(dri_bufmgr_fake *bufmgr_fake, unsigned fence)
assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
}
-static int evict_and_alloc_block(dri_bo *bo)
+static int evict_and_alloc_block(drm_intel_bo *bo)
{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
- dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
assert(bo_fake->block == NULL);
@@ -699,7 +702,7 @@ static int evict_and_alloc_block(dri_bo *bo)
* Wait for hardware idle by emitting a fence and waiting for it.
*/
static void
-dri_bufmgr_fake_wait_idle(dri_bufmgr_fake *bufmgr_fake)
+drm_intel_bufmgr_fake_wait_idle(drm_intel_bufmgr_fake *bufmgr_fake)
{
unsigned int cookie;
@@ -714,20 +717,24 @@ dri_bufmgr_fake_wait_idle(dri_bufmgr_fake *bufmgr_fake)
* the necessary flushing.
*/
static void
-dri_fake_bo_wait_rendering(dri_bo *bo)
+drm_intel_fake_bo_wait_rendering_locked(drm_intel_bo *bo)
{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
- dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
-
- pthread_mutex_lock(&bufmgr_fake->lock);
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
- if (bo_fake->block == NULL || !bo_fake->block->fenced) {
- pthread_mutex_unlock(&bufmgr_fake->lock);
+ if (bo_fake->block == NULL || !bo_fake->block->fenced)
return;
- }
_fence_wait_internal(bufmgr_fake, bo_fake->block->fence);
+}
+
+static void
+drm_intel_fake_bo_wait_rendering(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
+ pthread_mutex_lock(&bufmgr_fake->lock);
+ drm_intel_fake_bo_wait_rendering_locked(bo);
pthread_mutex_unlock(&bufmgr_fake->lock);
}
@@ -736,9 +743,9 @@ dri_fake_bo_wait_rendering(dri_bo *bo)
* -- and wait for idle
*/
void
-intel_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr)
+drm_intel_bufmgr_fake_contended_lock_take(drm_intel_bufmgr *bufmgr)
{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr;
struct block *block, *tmp;
pthread_mutex_lock(&bufmgr_fake->lock);
@@ -750,7 +757,7 @@ intel_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr)
* happening, so we'll need to wait anyway before letting anything get
* put on the card again.
*/
- dri_bufmgr_fake_wait_idle(bufmgr_fake);
+ drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
/* Check that we hadn't released the lock without having fenced the last
* set of buffers.
@@ -766,14 +773,14 @@ intel_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr)
pthread_mutex_unlock(&bufmgr_fake->lock);
}
-static dri_bo *
-dri_fake_bo_alloc(dri_bufmgr *bufmgr, const char *name,
- unsigned long size, unsigned int alignment)
+static drm_intel_bo *
+drm_intel_fake_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
+ unsigned long size, unsigned int alignment)
{
- dri_bufmgr_fake *bufmgr_fake;
- dri_bo_fake *bo_fake;
+ drm_intel_bufmgr_fake *bufmgr_fake;
+ drm_intel_bo_fake *bo_fake;
- bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
+ bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr;
assert(size != 0);
@@ -803,15 +810,15 @@ dri_fake_bo_alloc(dri_bufmgr *bufmgr, const char *name,
return &bo_fake->bo;
}
-dri_bo *
-intel_bo_fake_alloc_static(dri_bufmgr *bufmgr, const char *name,
- unsigned long offset, unsigned long size,
- void *virtual)
+drm_intel_bo *
+drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr, const char *name,
+ unsigned long offset, unsigned long size,
+ void *virtual)
{
- dri_bufmgr_fake *bufmgr_fake;
- dri_bo_fake *bo_fake;
+ drm_intel_bufmgr_fake *bufmgr_fake;
+ drm_intel_bo_fake *bo_fake;
- bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
+ bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr;
assert(size != 0);
@@ -836,10 +843,10 @@ intel_bo_fake_alloc_static(dri_bufmgr *bufmgr, const char *name,
}
static void
-dri_fake_bo_reference(dri_bo *bo)
+drm_intel_fake_bo_reference(drm_intel_bo *bo)
{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
- dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
pthread_mutex_lock(&bufmgr_fake->lock);
bo_fake->refcount++;
@@ -847,18 +854,18 @@ dri_fake_bo_reference(dri_bo *bo)
}
static void
-dri_fake_bo_reference_locked(dri_bo *bo)
+drm_intel_fake_bo_reference_locked(drm_intel_bo *bo)
{
- dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
bo_fake->refcount++;
}
static void
-dri_fake_bo_unreference_locked(dri_bo *bo)
+drm_intel_fake_bo_unreference_locked(drm_intel_bo *bo)
{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
- dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
int i;
if (--bo_fake->refcount == 0) {
@@ -869,7 +876,7 @@ dri_fake_bo_unreference_locked(dri_bo *bo)
free_backing_store(bo);
for (i = 0; i < bo_fake->nr_relocs; i++)
- dri_fake_bo_unreference_locked(bo_fake->relocs[i].target_buf);
+ drm_intel_fake_bo_unreference_locked(bo_fake->relocs[i].target_buf);
DBG("drm_bo_unreference: free buf %d %s\n", bo_fake->id, bo_fake->name);
@@ -879,12 +886,12 @@ dri_fake_bo_unreference_locked(dri_bo *bo)
}
static void
-dri_fake_bo_unreference(dri_bo *bo)
+drm_intel_fake_bo_unreference(drm_intel_bo *bo)
{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
pthread_mutex_lock(&bufmgr_fake->lock);
- dri_fake_bo_unreference_locked(bo);
+ drm_intel_fake_bo_unreference_locked(bo);
pthread_mutex_unlock(&bufmgr_fake->lock);
}
@@ -892,13 +899,13 @@ dri_fake_bo_unreference(dri_bo *bo)
* Set the buffer as not requiring backing store, and instead get the callback
* invoked whenever it would be set dirty.
*/
-void intel_bo_fake_disable_backing_store(dri_bo *bo,
- void (*invalidate_cb)(dri_bo *bo,
- void *ptr),
- void *ptr)
+void drm_intel_bo_fake_disable_backing_store(drm_intel_bo *bo,
+ void (*invalidate_cb)(drm_intel_bo *bo,
+ void *ptr),
+ void *ptr)
{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
- dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
pthread_mutex_lock(&bufmgr_fake->lock);
@@ -927,14 +934,19 @@ void intel_bo_fake_disable_backing_store(dri_bo *bo,
* BM_NO_BACKING_STORE or BM_PINNED) or backing store, as necessary.
*/
static int
-dri_fake_bo_map_locked(dri_bo *bo, int write_enable)
+drm_intel_fake_bo_map_locked(drm_intel_bo *bo, int write_enable)
{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
- dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
/* Static buffers are always mapped. */
- if (bo_fake->is_static)
+ if (bo_fake->is_static) {
+ if (bo_fake->card_dirty) {
+ drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
+ bo_fake->card_dirty = 0;
+ }
return 0;
+ }
/* Allow recursive mapping. Mesa may recursively map buffers with
* nested display loops, and it is used internally in bufmgr_fake
@@ -964,7 +976,7 @@ dri_fake_bo_map_locked(dri_bo *bo, int write_enable)
if (!(bo_fake->flags & BM_NO_FENCE_SUBDATA) &&
bo_fake->block->fenced) {
- dri_fake_bo_wait_rendering(bo);
+ drm_intel_fake_bo_wait_rendering_locked(bo);
}
bo->virtual = bo_fake->block->virtual;
@@ -977,6 +989,14 @@ dri_fake_bo_map_locked(dri_bo *bo, int write_enable)
if (bo_fake->backing_store == 0)
alloc_backing_store(bo);
+ if ((bo_fake->card_dirty == 1) && bo_fake->block) {
+ if (bo_fake->block->fenced)
+ drm_intel_fake_bo_wait_rendering_locked(bo);
+
+ memcpy(bo_fake->backing_store, bo_fake->block->virtual, bo_fake->block->bo->size);
+ bo_fake->card_dirty = 0;
+ }
+
bo->virtual = bo_fake->backing_store;
}
}
@@ -985,23 +1005,23 @@ dri_fake_bo_map_locked(dri_bo *bo, int write_enable)
}
static int
-dri_fake_bo_map(dri_bo *bo, int write_enable)
+drm_intel_fake_bo_map(drm_intel_bo *bo, int write_enable)
{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
int ret;
pthread_mutex_lock(&bufmgr_fake->lock);
- ret = dri_fake_bo_map_locked(bo, write_enable);
+ ret = drm_intel_fake_bo_map_locked(bo, write_enable);
pthread_mutex_unlock(&bufmgr_fake->lock);
return ret;
}
static int
-dri_fake_bo_unmap_locked(dri_bo *bo)
+drm_intel_fake_bo_unmap_locked(drm_intel_bo *bo)
{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
- dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
/* Static buffers are always mapped. */
if (bo_fake->is_static)
@@ -1020,30 +1040,28 @@ dri_fake_bo_unmap_locked(dri_bo *bo)
}
static int
-dri_fake_bo_unmap(dri_bo *bo)
+drm_intel_fake_bo_unmap(drm_intel_bo *bo)
{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
int ret;
pthread_mutex_lock(&bufmgr_fake->lock);
- ret = dri_fake_bo_unmap_locked(bo);
+ ret = drm_intel_fake_bo_unmap_locked(bo);
pthread_mutex_unlock(&bufmgr_fake->lock);
return ret;
}
static void
-dri_fake_kick_all(dri_bufmgr_fake *bufmgr_fake)
+drm_intel_fake_kick_all_locked(drm_intel_bufmgr_fake *bufmgr_fake)
{
struct block *block, *tmp;
- pthread_mutex_lock(&bufmgr_fake->lock);
-
bufmgr_fake->performed_rendering = 0;
/* okay for ever BO that is on the HW kick it off.
seriously not afraid of the POLICE right now */
DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->on_hardware) {
- dri_bo_fake *bo_fake = (dri_bo_fake *)block->bo;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)block->bo;
block->on_hardware = 0;
free_block(bufmgr_fake, block);
@@ -1052,17 +1070,15 @@ dri_fake_kick_all(dri_bufmgr_fake *bufmgr_fake)
if (!(bo_fake->flags & BM_NO_BACKING_STORE))
bo_fake->dirty = 1;
}
-
- pthread_mutex_unlock(&bufmgr_fake->lock);
}
static int
-dri_fake_bo_validate(dri_bo *bo)
+drm_intel_fake_bo_validate(drm_intel_bo *bo)
{
- dri_bufmgr_fake *bufmgr_fake;
- dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+ drm_intel_bufmgr_fake *bufmgr_fake;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
- bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+ bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
DBG("drm_bo_validate: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
bo_fake->bo.size / 1024);
@@ -1103,7 +1119,7 @@ dri_fake_bo_validate(dri_bo *bo)
* which we would be tracking when we free it. Waiting for idle is
* a sufficiently large hammer for now.
*/
- dri_bufmgr_fake_wait_idle(bufmgr_fake);
+ drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
/* we may never have mapped this BO so it might not have any backing
* store if this happens it should be rare, but 0 the card memory
@@ -1128,9 +1144,9 @@ dri_fake_bo_validate(dri_bo *bo)
}
static void
-dri_fake_fence_validated(dri_bufmgr *bufmgr)
+drm_intel_fake_fence_validated(drm_intel_bufmgr *bufmgr)
{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr;
unsigned int cookie;
cookie = _fence_emit_internal(bufmgr_fake);
@@ -1140,9 +1156,9 @@ dri_fake_fence_validated(dri_bufmgr *bufmgr)
}
static void
-dri_fake_destroy(dri_bufmgr *bufmgr)
+drm_intel_fake_destroy(drm_intel_bufmgr *bufmgr)
{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr;
pthread_mutex_destroy(&bufmgr_fake->lock);
mmDestroy(bufmgr_fake->heap);
@@ -1150,46 +1166,46 @@ dri_fake_destroy(dri_bufmgr *bufmgr)
}
static int
-dri_fake_emit_reloc(dri_bo *reloc_buf,
- uint32_t read_domains, uint32_t write_domain,
- uint32_t delta, uint32_t offset, dri_bo *target_buf)
+drm_intel_fake_emit_reloc(drm_intel_bo *bo, uint32_t offset,
+ drm_intel_bo *target_bo, uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain)
{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)reloc_buf->bufmgr;
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
struct fake_buffer_reloc *r;
- dri_bo_fake *reloc_fake = (dri_bo_fake *)reloc_buf;
- dri_bo_fake *target_fake = (dri_bo_fake *)target_buf;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
+ drm_intel_bo_fake *target_fake = (drm_intel_bo_fake *)target_bo;
int i;
pthread_mutex_lock(&bufmgr_fake->lock);
- assert(reloc_buf);
- assert(target_buf);
+ assert(bo);
+ assert(target_bo);
- if (reloc_fake->relocs == NULL) {
- reloc_fake->relocs = malloc(sizeof(struct fake_buffer_reloc) *
- MAX_RELOCS);
+ if (bo_fake->relocs == NULL) {
+ bo_fake->relocs = malloc(sizeof(struct fake_buffer_reloc) * MAX_RELOCS);
}
- r = &reloc_fake->relocs[reloc_fake->nr_relocs++];
-
- assert(reloc_fake->nr_relocs <= MAX_RELOCS);
+ r = &bo_fake->relocs[bo_fake->nr_relocs++];
- dri_fake_bo_reference_locked(target_buf);
+ assert(bo_fake->nr_relocs <= MAX_RELOCS);
- if (!target_fake->is_static)
- reloc_fake->child_size += ALIGN(target_buf->size, target_fake->alignment);
+ drm_intel_fake_bo_reference_locked(target_bo);
- r->target_buf = target_buf;
+ if (!target_fake->is_static) {
+ bo_fake->child_size += ALIGN(target_bo->size, target_fake->alignment);
+ bo_fake->child_size += target_fake->child_size;
+ }
+ r->target_buf = target_bo;
r->offset = offset;
- r->last_target_offset = target_buf->offset;
- r->delta = delta;
+ r->last_target_offset = target_bo->offset;
+ r->delta = target_offset;
r->read_domains = read_domains;
r->write_domain = write_domain;
if (bufmgr_fake->debug) {
/* Check that a conflicting relocation hasn't already been emitted. */
- for (i = 0; i < reloc_fake->nr_relocs - 1; i++) {
- struct fake_buffer_reloc *r2 = &reloc_fake->relocs[i];
+ for (i = 0; i < bo_fake->nr_relocs - 1; i++) {
+ struct fake_buffer_reloc *r2 = &bo_fake->relocs[i];
assert(r->offset != r2->offset);
}
@@ -1205,45 +1221,44 @@ dri_fake_emit_reloc(dri_bo *reloc_buf,
* the combined validation flags for the buffer on this batchbuffer submission.
*/
static void
-dri_fake_calculate_domains(dri_bo *bo)
+drm_intel_fake_calculate_domains(drm_intel_bo *bo)
{
- dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
int i;
for (i = 0; i < bo_fake->nr_relocs; i++) {
struct fake_buffer_reloc *r = &bo_fake->relocs[i];
- dri_bo_fake *target_fake = (dri_bo_fake *)r->target_buf;
+ drm_intel_bo_fake *target_fake = (drm_intel_bo_fake *)r->target_buf;
/* Do the same for the tree of buffers we depend on */
- dri_fake_calculate_domains(r->target_buf);
+ drm_intel_fake_calculate_domains(r->target_buf);
target_fake->read_domains |= r->read_domains;
- if (target_fake->write_domain != 0)
- target_fake->write_domain = r->write_domain;
+ target_fake->write_domain |= r->write_domain;
}
}
static int
-dri_fake_reloc_and_validate_buffer(dri_bo *bo)
+drm_intel_fake_reloc_and_validate_buffer(drm_intel_bo *bo)
{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
- dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
int i, ret;
assert(bo_fake->map_count == 0);
for (i = 0; i < bo_fake->nr_relocs; i++) {
struct fake_buffer_reloc *r = &bo_fake->relocs[i];
- dri_bo_fake *target_fake = (dri_bo_fake *)r->target_buf;
+ drm_intel_bo_fake *target_fake = (drm_intel_bo_fake *)r->target_buf;
uint32_t reloc_data;
/* Validate the target buffer if that hasn't been done. */
if (!target_fake->validated) {
- ret = dri_fake_reloc_and_validate_buffer(r->target_buf);
+ ret = drm_intel_fake_reloc_and_validate_buffer(r->target_buf);
if (ret != 0) {
if (bo->virtual != NULL)
- dri_fake_bo_unmap_locked(bo);
+ drm_intel_fake_bo_unmap_locked(bo);
return ret;
}
}
@@ -1253,7 +1268,7 @@ dri_fake_reloc_and_validate_buffer(dri_bo *bo)
reloc_data = r->target_buf->offset + r->delta;
if (bo->virtual == NULL)
- dri_fake_bo_map_locked(bo, 1);
+ drm_intel_fake_bo_map_locked(bo, 1);
*(uint32_t *)((uint8_t *)bo->virtual + r->offset) = reloc_data;
@@ -1262,34 +1277,33 @@ dri_fake_reloc_and_validate_buffer(dri_bo *bo)
}
if (bo->virtual != NULL)
- dri_fake_bo_unmap_locked(bo);
+ drm_intel_fake_bo_unmap_locked(bo);
if (bo_fake->write_domain != 0) {
if (!(bo_fake->flags & (BM_NO_BACKING_STORE|BM_PINNED))) {
if (bo_fake->backing_store == 0)
alloc_backing_store(bo);
-
- bo_fake->card_dirty = 1;
}
+ bo_fake->card_dirty = 1;
bufmgr_fake->performed_rendering = 1;
}
- return dri_fake_bo_validate(bo);
+ return drm_intel_fake_bo_validate(bo);
}
static void
-dri_bo_fake_post_submit(dri_bo *bo)
+drm_intel_bo_fake_post_submit(drm_intel_bo *bo)
{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
- dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
int i;
for (i = 0; i < bo_fake->nr_relocs; i++) {
struct fake_buffer_reloc *r = &bo_fake->relocs[i];
- dri_bo_fake *target_fake = (dri_bo_fake *)r->target_buf;
+ drm_intel_bo_fake *target_fake = (drm_intel_bo_fake *)r->target_buf;
if (target_fake->validated)
- dri_bo_fake_post_submit(r->target_buf);
+ drm_intel_bo_fake_post_submit(r->target_buf);
DBG("%s@0x%08x + 0x%08x -> %s@0x%08x + 0x%08x\n",
bo_fake->name, (uint32_t)bo->offset, r->offset,
@@ -1303,25 +1317,25 @@ dri_bo_fake_post_submit(dri_bo *bo)
}
-void intel_bufmgr_fake_set_exec_callback(dri_bufmgr *bufmgr,
- int (*exec)(dri_bo *bo,
- unsigned int used,
- void *priv),
- void *priv)
+void drm_intel_bufmgr_fake_set_exec_callback(drm_intel_bufmgr *bufmgr,
+ int (*exec)(drm_intel_bo *bo,
+ unsigned int used,
+ void *priv),
+ void *priv)
{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr;
bufmgr_fake->exec = exec;
bufmgr_fake->exec_priv = priv;
}
static int
-dri_fake_bo_exec(dri_bo *bo, int used,
- drm_clip_rect_t *cliprects, int num_cliprects,
- int DR4)
+drm_intel_fake_bo_exec(drm_intel_bo *bo, int used,
+ drm_clip_rect_t *cliprects, int num_cliprects,
+ int DR4)
{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
- dri_bo_fake *batch_fake = (dri_bo_fake *)bo;
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
+ drm_intel_bo_fake *batch_fake = (drm_intel_bo_fake *)bo;
struct drm_i915_batchbuffer batch;
int ret;
int retry_count = 0;
@@ -1330,17 +1344,17 @@ dri_fake_bo_exec(dri_bo *bo, int used,
bufmgr_fake->performed_rendering = 0;
- dri_fake_calculate_domains(bo);
+ drm_intel_fake_calculate_domains(bo);
batch_fake->read_domains = I915_GEM_DOMAIN_COMMAND;
/* we've ran out of RAM so blow the whole lot away and retry */
restart:
- ret = dri_fake_reloc_and_validate_buffer(bo);
+ ret = drm_intel_fake_reloc_and_validate_buffer(bo);
if (bufmgr_fake->fail == 1) {
if (retry_count == 0) {
retry_count++;
- dri_fake_kick_all(bufmgr_fake);
+ drm_intel_fake_kick_all_locked(bufmgr_fake);
bufmgr_fake->fail = 0;
goto restart;
} else /* dump out the memory here */
@@ -1351,8 +1365,10 @@ dri_fake_bo_exec(dri_bo *bo, int used,
if (bufmgr_fake->exec != NULL) {
int ret = bufmgr_fake->exec(bo, used, bufmgr_fake->exec_priv);
- if (ret != 0)
+ if (ret != 0) {
+ pthread_mutex_unlock(&bufmgr_fake->lock);
return ret;
+ }
} else {
batch.start = bo->offset;
batch.used = used;
@@ -1364,13 +1380,14 @@ dri_fake_bo_exec(dri_bo *bo, int used,
if (drmCommandWrite(bufmgr_fake->fd, DRM_I915_BATCHBUFFER, &batch,
sizeof(batch))) {
drmMsg("DRM_I915_BATCHBUFFER: %d\n", -errno);
+ pthread_mutex_unlock(&bufmgr_fake->lock);
return -errno;
}
}
- dri_fake_fence_validated(bo->bufmgr);
+ drm_intel_fake_fence_validated(bo->bufmgr);
- dri_bo_fake_post_submit(bo);
+ drm_intel_bo_fake_post_submit(bo);
pthread_mutex_unlock(&bufmgr_fake->lock);
@@ -1385,14 +1402,14 @@ dri_fake_bo_exec(dri_bo *bo, int used,
* a set smaller than the aperture.
*/
static int
-dri_fake_check_aperture_space(dri_bo **bo_array, int count)
+drm_intel_fake_check_aperture_space(drm_intel_bo **bo_array, int count)
{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo_array[0]->bufmgr;
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo_array[0]->bufmgr;
unsigned int sz = 0;
int i;
for (i = 0; i < count; i++) {
- dri_bo_fake *bo_fake = (dri_bo_fake *)bo_array[i];
+ drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo_array[i];
if (bo_fake == NULL)
continue;
@@ -1421,9 +1438,9 @@ dri_fake_check_aperture_space(dri_bo **bo_array, int count)
* own.
*/
void
-intel_bufmgr_fake_evict_all(dri_bufmgr *bufmgr)
+drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr)
{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr;
struct block *block, *tmp;
pthread_mutex_lock(&bufmgr_fake->lock);
@@ -1435,7 +1452,7 @@ intel_bufmgr_fake_evict_all(dri_bufmgr *bufmgr)
* happening, so we'll need to wait anyway before letting anything get
* put on the card again.
*/
- dri_bufmgr_fake_wait_idle(bufmgr_fake);
+ drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
/* Check that we hadn't released the lock without having fenced the last
* set of buffers.
@@ -1450,21 +1467,21 @@ intel_bufmgr_fake_evict_all(dri_bufmgr *bufmgr)
pthread_mutex_unlock(&bufmgr_fake->lock);
}
-void intel_bufmgr_fake_set_last_dispatch(dri_bufmgr *bufmgr,
+void drm_intel_bufmgr_fake_set_last_dispatch(drm_intel_bufmgr *bufmgr,
volatile unsigned int *last_dispatch)
{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
+ drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr;
bufmgr_fake->last_dispatch = (volatile int *)last_dispatch;
}
-dri_bufmgr *
-intel_bufmgr_fake_init(int fd,
+drm_intel_bufmgr *
+drm_intel_bufmgr_fake_init(int fd,
unsigned long low_offset, void *low_virtual,
unsigned long size,
volatile unsigned int *last_dispatch)
{
- dri_bufmgr_fake *bufmgr_fake;
+ drm_intel_bufmgr_fake *bufmgr_fake;
bufmgr_fake = calloc(1, sizeof(*bufmgr_fake));
@@ -1484,16 +1501,16 @@ intel_bufmgr_fake_init(int fd,
bufmgr_fake->heap = mmInit(low_offset, size);
/* Hook in methods */
- bufmgr_fake->bufmgr.bo_alloc = dri_fake_bo_alloc;
- bufmgr_fake->bufmgr.bo_reference = dri_fake_bo_reference;
- bufmgr_fake->bufmgr.bo_unreference = dri_fake_bo_unreference;
- bufmgr_fake->bufmgr.bo_map = dri_fake_bo_map;
- bufmgr_fake->bufmgr.bo_unmap = dri_fake_bo_unmap;
- bufmgr_fake->bufmgr.bo_wait_rendering = dri_fake_bo_wait_rendering;
- bufmgr_fake->bufmgr.bo_emit_reloc = dri_fake_emit_reloc;
- bufmgr_fake->bufmgr.destroy = dri_fake_destroy;
- bufmgr_fake->bufmgr.bo_exec = dri_fake_bo_exec;
- bufmgr_fake->bufmgr.check_aperture_space = dri_fake_check_aperture_space;
+ bufmgr_fake->bufmgr.bo_alloc = drm_intel_fake_bo_alloc;
+ bufmgr_fake->bufmgr.bo_reference = drm_intel_fake_bo_reference;
+ bufmgr_fake->bufmgr.bo_unreference = drm_intel_fake_bo_unreference;
+ bufmgr_fake->bufmgr.bo_map = drm_intel_fake_bo_map;
+ bufmgr_fake->bufmgr.bo_unmap = drm_intel_fake_bo_unmap;
+ bufmgr_fake->bufmgr.bo_wait_rendering = drm_intel_fake_bo_wait_rendering;
+ bufmgr_fake->bufmgr.bo_emit_reloc = drm_intel_fake_emit_reloc;
+ bufmgr_fake->bufmgr.destroy = drm_intel_fake_destroy;
+ bufmgr_fake->bufmgr.bo_exec = drm_intel_fake_bo_exec;
+ bufmgr_fake->bufmgr.check_aperture_space = drm_intel_fake_check_aperture_space;
bufmgr_fake->bufmgr.debug = 0;
bufmgr_fake->fd = fd;
diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c
index af58ad8f..64d32d38 100644
--- a/libdrm/intel/intel_bufmgr_gem.c
+++ b/libdrm/intel/intel_bufmgr_gem.c
@@ -39,6 +39,7 @@
#endif
#include <xf86drm.h>
+#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -47,6 +48,8 @@
#include <pthread.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
#include "errno.h"
#include "intel_bufmgr.h"
@@ -60,10 +63,10 @@
fprintf(stderr, __VA_ARGS__); \
} while (0)
-typedef struct _dri_bo_gem dri_bo_gem;
+typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
-struct dri_gem_bo_bucket {
- dri_bo_gem *head, **tail;
+struct drm_intel_gem_bo_bucket {
+ drm_intel_bo_gem *head, **tail;
/**
* Limit on the number of entries in this bucket.
*
@@ -77,9 +80,9 @@ struct dri_gem_bo_bucket {
/* Arbitrarily chosen, 16 means that the maximum size we'll cache for reuse
* is 1 << 16 pages, or 256MB.
*/
-#define INTEL_GEM_BO_BUCKETS 16
-typedef struct _dri_bufmgr_gem {
- dri_bufmgr bufmgr;
+#define DRM_INTEL_GEM_BO_BUCKETS 16
+typedef struct _drm_intel_bufmgr_gem {
+ drm_intel_bufmgr bufmgr;
int fd;
@@ -88,16 +91,18 @@ typedef struct _dri_bufmgr_gem {
pthread_mutex_t lock;
struct drm_i915_gem_exec_object *exec_objects;
- dri_bo **exec_bos;
+ drm_intel_bo **exec_bos;
int exec_size;
int exec_count;
/** Array of lists of cached gem objects of power-of-two sizes */
- struct dri_gem_bo_bucket cache_bucket[INTEL_GEM_BO_BUCKETS];
-} dri_bufmgr_gem;
+ struct drm_intel_gem_bo_bucket cache_bucket[DRM_INTEL_GEM_BO_BUCKETS];
-struct _dri_bo_gem {
- dri_bo bo;
+ uint64_t gtt_size;
+} drm_intel_bufmgr_gem;
+
+struct _drm_intel_bo_gem {
+ drm_intel_bo bo;
int refcount;
/** Boolean whether the mmap ioctl has been called for this buffer yet. */
@@ -126,17 +131,38 @@ struct _dri_bo_gem {
/** Array passed to the DRM containing relocation information. */
struct drm_i915_gem_relocation_entry *relocs;
/** Array of bos corresponding to relocs[i].target_handle */
- dri_bo **reloc_target_bo;
+ drm_intel_bo **reloc_target_bo;
/** Number of entries in relocs */
int reloc_count;
/** Mapped address for the buffer */
void *virtual;
/** free list */
- dri_bo_gem *next;
+ drm_intel_bo_gem *next;
+
+ /**
+ * Boolean of whether this BO and its children have been included in
+ * the current drm_intel_bufmgr_check_aperture_space() total.
+ */
+ char included_in_check_aperture;
+
+ /**
+ * Boolean of whether this buffer has been used as a relocation
+ * target and had its size accounted for, and thus can't have any
+ * further relocations added to it.
+ */
+ char used_as_reloc_target;
+
+ /**
+ * Size in bytes of this buffer and its relocation descendents.
+ *
+ * Used to avoid costly tree walking in drm_intel_bufmgr_check_aperture in
+ * the common case.
+ */
+ int reloc_tree_size;
};
-static void dri_gem_bo_reference_locked(dri_bo *bo);
+static void drm_intel_gem_bo_reference_locked(drm_intel_bo *bo);
static int
logbase2(int n)
@@ -152,8 +178,9 @@ logbase2(int n)
return log2;
}
-static struct dri_gem_bo_bucket *
-dri_gem_bo_bucket_for_size(dri_bufmgr_gem *bufmgr_gem, unsigned long size)
+static struct drm_intel_gem_bo_bucket *
+drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
+ unsigned long size)
{
int i;
@@ -166,20 +193,20 @@ dri_gem_bo_bucket_for_size(dri_bufmgr_gem *bufmgr_gem, unsigned long size)
/* We always allocate in units of pages */
i = ffs(size / 4096) - 1;
- if (i >= INTEL_GEM_BO_BUCKETS)
+ if (i >= DRM_INTEL_GEM_BO_BUCKETS)
return NULL;
return &bufmgr_gem->cache_bucket[i];
}
-static void dri_gem_dump_validation_list(dri_bufmgr_gem *bufmgr_gem)
+static void drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
{
int i, j;
for (i = 0; i < bufmgr_gem->exec_count; i++) {
- dri_bo *bo = bufmgr_gem->exec_bos[i];
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
if (bo_gem->relocs == NULL) {
DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle, bo_gem->name);
@@ -187,12 +214,13 @@ static void dri_gem_dump_validation_list(dri_bufmgr_gem *bufmgr_gem)
}
for (j = 0; j < bo_gem->reloc_count; j++) {
- dri_bo *target_bo = bo_gem->reloc_target_bo[j];
- dri_bo_gem *target_gem = (dri_bo_gem *)target_bo;
+ drm_intel_bo *target_bo = bo_gem->reloc_target_bo[j];
+ drm_intel_bo_gem *target_gem = (drm_intel_bo_gem *)target_bo;
DBG("%2d: %d (%s)@0x%08llx -> %d (%s)@0x%08lx + 0x%08x\n",
i,
- bo_gem->gem_handle, bo_gem->name, bo_gem->relocs[j].offset,
+ bo_gem->gem_handle, bo_gem->name,
+ (unsigned long long)bo_gem->relocs[j].offset,
target_gem->gem_handle, target_gem->name, target_bo->offset,
bo_gem->relocs[j].delta);
}
@@ -208,10 +236,10 @@ static void dri_gem_dump_validation_list(dri_bufmgr_gem *bufmgr_gem)
* access flags.
*/
static void
-intel_add_validate_buffer(dri_bo *bo)
+drm_intel_add_validate_buffer(drm_intel_bo *bo)
{
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
int index;
if (bo_gem->validate_index != -1)
@@ -242,7 +270,7 @@ intel_add_validate_buffer(dri_bo *bo)
bufmgr_gem->exec_objects[index].alignment = 0;
bufmgr_gem->exec_objects[index].offset = 0;
bufmgr_gem->exec_bos[index] = bo;
- dri_gem_bo_reference_locked(bo);
+ drm_intel_gem_bo_reference_locked(bo);
bufmgr_gem->exec_count++;
}
@@ -251,27 +279,28 @@ intel_add_validate_buffer(dri_bo *bo)
sizeof(uint32_t))
static int
-intel_setup_reloc_list(dri_bo *bo)
+drm_intel_setup_reloc_list(drm_intel_bo *bo)
{
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
bo_gem->relocs = malloc(bufmgr_gem->max_relocs *
sizeof(struct drm_i915_gem_relocation_entry));
- bo_gem->reloc_target_bo = malloc(bufmgr_gem->max_relocs * sizeof(dri_bo *));
+ bo_gem->reloc_target_bo = malloc(bufmgr_gem->max_relocs *
+ sizeof(drm_intel_bo *));
return 0;
}
-static dri_bo *
-dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name,
- unsigned long size, unsigned int alignment)
+static drm_intel_bo *
+drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
+ unsigned long size, unsigned int alignment)
{
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
- dri_bo_gem *bo_gem;
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
+ drm_intel_bo_gem *bo_gem;
unsigned int page_size = getpagesize();
int ret;
- struct dri_gem_bo_bucket *bucket;
+ struct drm_intel_gem_bo_bucket *bucket;
int alloc_from_cache = 0;
unsigned long bo_size;
@@ -279,7 +308,7 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name,
bo_size = 1 << logbase2(size);
if (bo_size < page_size)
bo_size = page_size;
- bucket = dri_gem_bo_bucket_for_size(bufmgr_gem, bo_size);
+ bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo_size);
/* If we don't have caching at this size, don't actually round the
* allocation up.
@@ -334,6 +363,8 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name,
bo_gem->name = name;
bo_gem->refcount = 1;
bo_gem->validate_index = -1;
+ bo_gem->reloc_tree_size = bo_gem->bo.size;
+ bo_gem->used_as_reloc_target = 0;
DBG("bo_create: buf %d (%s) %ldb\n",
bo_gem->gem_handle, bo_gem->name, size);
@@ -342,17 +373,17 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name,
}
/**
- * Returns a dri_bo wrapping the given buffer object handle.
+ * Returns a drm_intel_bo wrapping the given buffer object handle.
*
* This can be used when one application needs to pass a buffer object
* to another.
*/
-dri_bo *
-intel_bo_gem_create_from_name(dri_bufmgr *bufmgr, const char *name,
- unsigned int handle)
+drm_intel_bo *
+drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr, const char *name,
+ unsigned int handle)
{
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
- dri_bo_gem *bo_gem;
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
+ drm_intel_bo_gem *bo_gem;
int ret;
struct drm_gem_open open_arg;
@@ -385,10 +416,10 @@ intel_bo_gem_create_from_name(dri_bufmgr *bufmgr, const char *name,
}
static void
-dri_gem_bo_reference(dri_bo *bo)
+drm_intel_gem_bo_reference(drm_intel_bo *bo)
{
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
pthread_mutex_lock(&bufmgr_gem->lock);
bo_gem->refcount++;
@@ -398,16 +429,24 @@ dri_gem_bo_reference(dri_bo *bo)
static void
dri_gem_bo_reference_locked(dri_bo *bo)
{
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
bo_gem->refcount++;
}
static void
-dri_gem_bo_free(dri_bo *bo)
+drm_intel_gem_bo_reference_locked(drm_intel_bo *bo)
{
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
+
+ bo_gem->refcount++;
+}
+
+static void
+drm_intel_gem_bo_free(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
struct drm_gem_close close;
int ret;
@@ -426,20 +465,20 @@ dri_gem_bo_free(dri_bo *bo)
}
static void
-dri_gem_bo_unreference_locked(dri_bo *bo)
+drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo)
{
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
if (--bo_gem->refcount == 0) {
- struct dri_gem_bo_bucket *bucket;
+ struct drm_intel_gem_bo_bucket *bucket;
if (bo_gem->relocs != NULL) {
int i;
/* Unreference all the target buffers */
for (i = 0; i < bo_gem->reloc_count; i++)
- dri_gem_bo_unreference_locked(bo_gem->reloc_target_bo[i]);
+ drm_intel_gem_bo_unreference_locked(bo_gem->reloc_target_bo[i]);
free(bo_gem->reloc_target_bo);
free(bo_gem->relocs);
}
@@ -447,7 +486,7 @@ dri_gem_bo_unreference_locked(dri_bo *bo)
DBG("bo_unreference final: %d (%s)\n",
bo_gem->gem_handle, bo_gem->name);
- bucket = dri_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
+ bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
/* Put the buffer into our internal cache for reuse if we can. */
if (bucket != NULL &&
(bucket->max_entries == -1 ||
@@ -465,26 +504,26 @@ dri_gem_bo_unreference_locked(dri_bo *bo)
bucket->tail = &bo_gem->next;
bucket->num_entries++;
} else {
- dri_gem_bo_free(bo);
+ drm_intel_gem_bo_free(bo);
}
}
}
static void
-dri_gem_bo_unreference(dri_bo *bo)
+drm_intel_gem_bo_unreference(drm_intel_bo *bo)
{
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
pthread_mutex_lock(&bufmgr_gem->lock);
- dri_gem_bo_unreference_locked(bo);
+ drm_intel_gem_bo_unreference_locked(bo);
pthread_mutex_unlock(&bufmgr_gem->lock);
}
static int
-dri_gem_bo_map(dri_bo *bo, int write_enable)
+drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
{
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
struct drm_i915_gem_set_domain set_domain;
int ret;
@@ -543,11 +582,89 @@ dri_gem_bo_map(dri_bo *bo, int write_enable)
return 0;
}
+int
+drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
+ struct drm_i915_gem_set_domain set_domain;
+ int ret;
+
+ pthread_mutex_lock(&bufmgr_gem->lock);
+
+ /* Allow recursive mapping. Mesa may recursively map buffers with
+ * nested display loops.
+ */
+ if (!bo_gem->mapped) {
+
+ assert(bo->virtual == NULL);
+
+ DBG("bo_map_gtt: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
+
+ if (bo_gem->virtual == NULL) {
+ struct drm_i915_gem_mmap_gtt mmap_arg;
+
+ memset(&mmap_arg, 0, sizeof(mmap_arg));
+ mmap_arg.handle = bo_gem->gem_handle;
+
+ /* Get the fake offset back... */
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT,
+ &mmap_arg);
+ if (ret != 0) {
+ fprintf(stderr,
+ "%s:%d: Error preparing buffer map %d (%s): %s .\n",
+ __FILE__, __LINE__,
+ bo_gem->gem_handle, bo_gem->name,
+ strerror(errno));
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ return ret;
+ }
+
+ /* and mmap it */
+ bo_gem->virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, bufmgr_gem->fd,
+ mmap_arg.offset);
+ if (bo_gem->virtual == MAP_FAILED) {
+ fprintf(stderr,
+ "%s:%d: Error mapping buffer %d (%s): %s .\n",
+ __FILE__, __LINE__,
+ bo_gem->gem_handle, bo_gem->name,
+ strerror(errno));
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ return errno;
+ }
+ }
+
+ bo->virtual = bo_gem->virtual;
+ bo_gem->mapped = 1;
+ DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
+ bo_gem->virtual);
+ }
+
+ /* Now move it to the GTT domain so that the CPU caches are flushed */
+ set_domain.handle = bo_gem->gem_handle;
+ set_domain.read_domains = I915_GEM_DOMAIN_GTT;
+ set_domain.write_domain = I915_GEM_DOMAIN_GTT;
+ do {
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
+ &set_domain);
+ } while (ret == -1 && errno == EINTR);
+
+ if (ret != 0) {
+ fprintf (stderr, "%s:%d: Error setting swrast %d: %s\n",
+ __FILE__, __LINE__, bo_gem->gem_handle, strerror (errno));
+ }
+
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+
+ return 0;
+}
+
static int
-dri_gem_bo_unmap(dri_bo *bo)
+drm_intel_gem_bo_unmap(drm_intel_bo *bo)
{
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
struct drm_i915_gem_sw_finish sw_finish;
int ret;
@@ -570,11 +687,11 @@ dri_gem_bo_unmap(dri_bo *bo)
}
static int
-dri_gem_bo_subdata (dri_bo *bo, unsigned long offset,
- unsigned long size, const void *data)
+drm_intel_gem_bo_subdata (drm_intel_bo *bo, unsigned long offset,
+ unsigned long size, const void *data)
{
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
struct drm_i915_gem_pwrite pwrite;
int ret;
@@ -596,11 +713,11 @@ dri_gem_bo_subdata (dri_bo *bo, unsigned long offset,
}
static int
-dri_gem_bo_get_subdata (dri_bo *bo, unsigned long offset,
- unsigned long size, void *data)
+drm_intel_gem_bo_get_subdata (drm_intel_bo *bo, unsigned long offset,
+ unsigned long size, void *data)
{
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
struct drm_i915_gem_pread pread;
int ret;
@@ -622,17 +739,19 @@ dri_gem_bo_get_subdata (dri_bo *bo, unsigned long offset,
}
static void
-dri_gem_bo_wait_rendering(dri_bo *bo)
+drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
{
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
struct drm_i915_gem_set_domain set_domain;
int ret;
set_domain.handle = bo_gem->gem_handle;
set_domain.read_domains = I915_GEM_DOMAIN_GTT;
set_domain.write_domain = 0;
- ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
+ do {
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
+ } while (ret == -1 && errno == EINTR);
if (ret != 0) {
fprintf (stderr, "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
__FILE__, __LINE__,
@@ -642,9 +761,9 @@ dri_gem_bo_wait_rendering(dri_bo *bo)
}
static void
-dri_bufmgr_gem_destroy(dri_bufmgr *bufmgr)
+drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
{
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
int i;
free(bufmgr_gem->exec_objects);
@@ -653,9 +772,9 @@ dri_bufmgr_gem_destroy(dri_bufmgr *bufmgr)
pthread_mutex_destroy(&bufmgr_gem->lock);
/* Free any cached buffer objects we were going to reuse */
- for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++) {
- struct dri_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i];
- dri_bo_gem *bo_gem;
+ for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
+ struct drm_intel_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i];
+ drm_intel_bo_gem *bo_gem;
while ((bo_gem = bucket->head) != NULL) {
bucket->head = bo_gem->next;
@@ -663,7 +782,7 @@ dri_bufmgr_gem_destroy(dri_bufmgr *bufmgr)
bucket->tail = &bucket->head;
bucket->num_entries--;
- dri_gem_bo_free(&bo_gem->bo);
+ drm_intel_gem_bo_free(&bo_gem->bo);
}
}
@@ -680,18 +799,19 @@ dri_bufmgr_gem_destroy(dri_bufmgr *bufmgr)
* last known offset in target_bo.
*/
static int
-dri_gem_bo_emit_reloc(dri_bo *bo, uint32_t read_domains, uint32_t write_domain,
- uint32_t delta, uint32_t offset, dri_bo *target_bo)
+drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
+ drm_intel_bo *target_bo, uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain)
{
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
- dri_bo_gem *target_bo_gem = (dri_bo_gem *)target_bo;
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
+ drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *)target_bo;
pthread_mutex_lock(&bufmgr_gem->lock);
/* Create a new relocation list if needed */
if (bo_gem->relocs == NULL)
- intel_setup_reloc_list(bo);
+ drm_intel_setup_reloc_list(bo);
/* Check overflow */
assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
@@ -700,8 +820,17 @@ dri_gem_bo_emit_reloc(dri_bo *bo, uint32_t read_domains, uint32_t write_domain,
assert (offset <= bo->size - 4);
assert ((write_domain & (write_domain-1)) == 0);
+ /* Make sure that we're not adding a reloc to something whose size has
+ * already been accounted for.
+ */
+ assert(!bo_gem->used_as_reloc_target);
+ bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
+
+ /* Flag the target to disallow further relocations in it. */
+ target_bo_gem->used_as_reloc_target = 1;
+
bo_gem->relocs[bo_gem->reloc_count].offset = offset;
- bo_gem->relocs[bo_gem->reloc_count].delta = delta;
+ bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
bo_gem->relocs[bo_gem->reloc_count].target_handle =
target_bo_gem->gem_handle;
bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
@@ -709,7 +838,7 @@ dri_gem_bo_emit_reloc(dri_bo *bo, uint32_t read_domains, uint32_t write_domain,
bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
bo_gem->reloc_target_bo[bo_gem->reloc_count] = target_bo;
- dri_gem_bo_reference_locked(target_bo);
+ drm_intel_gem_bo_reference_locked(target_bo);
bo_gem->reloc_count++;
@@ -724,61 +853,61 @@ dri_gem_bo_emit_reloc(dri_bo *bo, uint32_t read_domains, uint32_t write_domain,
* index values into the validation list.
*/
static void
-dri_gem_bo_process_reloc(dri_bo *bo)
+drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
{
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
int i;
if (bo_gem->relocs == NULL)
return;
for (i = 0; i < bo_gem->reloc_count; i++) {
- dri_bo *target_bo = bo_gem->reloc_target_bo[i];
+ drm_intel_bo *target_bo = bo_gem->reloc_target_bo[i];
/* Continue walking the tree depth-first. */
- dri_gem_bo_process_reloc(target_bo);
+ drm_intel_gem_bo_process_reloc(target_bo);
/* Add the target to the validate list */
- intel_add_validate_buffer(target_bo);
+ drm_intel_add_validate_buffer(target_bo);
}
}
static void
-intel_update_buffer_offsets (dri_bufmgr_gem *bufmgr_gem)
+drm_intel_update_buffer_offsets (drm_intel_bufmgr_gem *bufmgr_gem)
{
int i;
for (i = 0; i < bufmgr_gem->exec_count; i++) {
- dri_bo *bo = bufmgr_gem->exec_bos[i];
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
/* Update the buffer offset */
if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
bo_gem->gem_handle, bo_gem->name, bo->offset,
- bufmgr_gem->exec_objects[i].offset);
+ (unsigned long long)bufmgr_gem->exec_objects[i].offset);
bo->offset = bufmgr_gem->exec_objects[i].offset;
}
}
}
static int
-dri_gem_bo_exec(dri_bo *bo, int used,
- drm_clip_rect_t *cliprects, int num_cliprects,
- int DR4)
+drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
+ drm_clip_rect_t *cliprects, int num_cliprects,
+ int DR4)
{
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
struct drm_i915_gem_execbuffer execbuf;
int ret, i;
pthread_mutex_lock(&bufmgr_gem->lock);
/* Update indices and set up the validate list. */
- dri_gem_bo_process_reloc(bo);
+ drm_intel_gem_bo_process_reloc(bo);
/* Add the batch buffer to the validation list. There are no relocations
* pointing to it.
*/
- intel_add_validate_buffer(bo);
+ drm_intel_add_validate_buffer(bo);
execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec_objects;
execbuf.buffer_count = bufmgr_gem->exec_count;
@@ -791,23 +920,23 @@ dri_gem_bo_exec(dri_bo *bo, int used,
do {
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_EXECBUFFER, &execbuf);
- } while (ret == -EAGAIN);
+ } while (ret != 0 && errno == EAGAIN);
- intel_update_buffer_offsets (bufmgr_gem);
+ drm_intel_update_buffer_offsets (bufmgr_gem);
if (bufmgr_gem->bufmgr.debug)
- dri_gem_dump_validation_list(bufmgr_gem);
+ drm_intel_gem_dump_validation_list(bufmgr_gem);
for (i = 0; i < bufmgr_gem->exec_count; i++) {
- dri_bo *bo = bufmgr_gem->exec_bos[i];
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
/* Need to call swrast on next bo_map */
bo_gem->swrast = 0;
/* Disconnect the buffer from the validate list */
bo_gem->validate_index = -1;
- dri_gem_bo_unreference_locked(bo);
+ drm_intel_gem_bo_unreference_locked(bo);
bufmgr_gem->exec_bos[i] = NULL;
}
bufmgr_gem->exec_count = 0;
@@ -817,10 +946,10 @@ dri_gem_bo_exec(dri_bo *bo, int used,
}
static int
-dri_gem_bo_pin(dri_bo *bo, uint32_t alignment)
+drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
{
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
struct drm_i915_gem_pin pin;
int ret;
@@ -836,10 +965,10 @@ dri_gem_bo_pin(dri_bo *bo, uint32_t alignment)
}
static int
-dri_gem_bo_unpin(dri_bo *bo)
+drm_intel_gem_bo_unpin(drm_intel_bo *bo)
{
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
struct drm_i915_gem_unpin unpin;
int ret;
@@ -853,15 +982,17 @@ dri_gem_bo_unpin(dri_bo *bo)
}
static int
-dri_gem_bo_set_tiling(dri_bo *bo, uint32_t *tiling_mode)
+drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
+ uint32_t stride)
{
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
struct drm_i915_gem_set_tiling set_tiling;
int ret;
set_tiling.handle = bo_gem->gem_handle;
set_tiling.tiling_mode = *tiling_mode;
+ set_tiling.stride = stride;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
if (ret != 0) {
@@ -874,10 +1005,33 @@ dri_gem_bo_set_tiling(dri_bo *bo, uint32_t *tiling_mode)
}
static int
-dri_gem_bo_flink(dri_bo *bo, uint32_t *name)
+drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
+ uint32_t *swizzle_mode)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
+ struct drm_i915_gem_get_tiling get_tiling;
+ int ret;
+
+ get_tiling.handle = bo_gem->gem_handle;
+
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
+ if (ret != 0) {
+ *tiling_mode = I915_TILING_NONE;
+ *swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ return -errno;
+ }
+
+ *tiling_mode = get_tiling.tiling_mode;
+ *swizzle_mode = get_tiling.swizzle_mode;
+ return 0;
+}
+
+static int
+drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t *name)
{
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
- dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
struct drm_gem_flink flink;
int ret;
@@ -902,23 +1056,106 @@ dri_gem_bo_flink(dri_bo *bo, uint32_t *name)
* in flight at once.
*/
void
-intel_bufmgr_gem_enable_reuse(dri_bufmgr *bufmgr)
+drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
{
- dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
int i;
- for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++) {
+ for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
bufmgr_gem->cache_bucket[i].max_entries = -1;
}
}
-/*
+/**
+ * Return the additional aperture space required by the tree of buffer objects
+ * rooted at bo.
+ */
+static int
+drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
+ int i;
+ int total = 0;
+
+ if (bo == NULL || bo_gem->included_in_check_aperture)
+ return 0;
+
+ total += bo->size;
+ bo_gem->included_in_check_aperture = 1;
+
+ for (i = 0; i < bo_gem->reloc_count; i++)
+ total += drm_intel_gem_bo_get_aperture_space(bo_gem->reloc_target_bo[i]);
+
+ return total;
+}
+
+/**
+ * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
+ * for the next drm_intel_bufmgr_check_aperture_space() call.
+ */
+static void
+drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
+ int i;
+
+ if (bo == NULL || !bo_gem->included_in_check_aperture)
+ return;
+
+ bo_gem->included_in_check_aperture = 0;
+
+ for (i = 0; i < bo_gem->reloc_count; i++)
+ drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->reloc_target_bo[i]);
+}
+
+/**
+ * Return -1 if the batchbuffer should be flushed before attempting to
+ * emit rendering referencing the buffers pointed to by bo_array.
+ *
+ * This is required because if we try to emit a batchbuffer with relocations
+ * to a tree of buffers that won't simultaneously fit in the aperture,
+ * the rendering will return an error at a point where the software is not
+ * prepared to recover from it.
*
+ * However, we also want to emit the batchbuffer significantly before we reach
+ * the limit, as a series of batchbuffers each of which references buffers
+ * covering almost all of the aperture means that at each emit we end up
+ * waiting to evict a buffer from the last rendering, and we get synchronous
+ * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
+ * get better parallelism.
*/
static int
-dri_gem_check_aperture_space(dri_bo **bo_array, int count)
+drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
{
- return 0;
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo_array[0]->bufmgr;
+ unsigned int total = 0;
+ unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo_array[i];
+ if (bo_gem != NULL)
+ total += bo_gem->reloc_tree_size;
+ }
+
+ if (total > threshold) {
+ total = 0;
+ for (i = 0; i < count; i++)
+ total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
+
+ for (i = 0; i < count; i++)
+ drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
+ }
+
+ if (total > bufmgr_gem->gtt_size * 3 / 4) {
+ DBG("check_space: overflowed available aperture, %dkb vs %dkb\n",
+ total / 1024, (int)bufmgr_gem->gtt_size / 1024);
+ return -1;
+ } else {
+ DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024 ,
+ (int)bufmgr_gem->gtt_size / 1024);
+ return 0;
+ }
}
/**
@@ -927,11 +1164,12 @@ dri_gem_check_aperture_space(dri_bo **bo_array, int count)
*
* \param fd File descriptor of the opened DRM device.
*/
-dri_bufmgr *
-intel_bufmgr_gem_init(int fd, int batch_size)
+drm_intel_bufmgr *
+drm_intel_bufmgr_gem_init(int fd, int batch_size)
{
- dri_bufmgr_gem *bufmgr_gem;
- int i;
+ drm_intel_bufmgr_gem *bufmgr_gem;
+ struct drm_i915_gem_get_aperture aperture;
+ int ret, i;
bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
bufmgr_gem->fd = fd;
@@ -941,6 +1179,19 @@ intel_bufmgr_gem_init(int fd, int batch_size)
return NULL;
}
+ ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
+
+ if (ret == 0)
+ bufmgr_gem->gtt_size = aperture.aper_available_size;
+ else {
+ fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
+ strerror(errno));
+ bufmgr_gem->gtt_size = 128 * 1024 * 1024;
+ fprintf(stderr, "Assuming %dkB available aperture size.\n"
+ "May lead to reduced performance or incorrect rendering.\n",
+ (int)bufmgr_gem->gtt_size / 1024);
+ }
+
/* Let's go with one relocation per every 2 dwords (but round down a bit
* since a power of two will mean an extra page allocation for the reloc
* buffer).
@@ -949,25 +1200,26 @@ intel_bufmgr_gem_init(int fd, int batch_size)
*/
bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
- bufmgr_gem->bufmgr.bo_alloc = dri_gem_bo_alloc;
- bufmgr_gem->bufmgr.bo_reference = dri_gem_bo_reference;
- bufmgr_gem->bufmgr.bo_unreference = dri_gem_bo_unreference;
- bufmgr_gem->bufmgr.bo_map = dri_gem_bo_map;
- bufmgr_gem->bufmgr.bo_unmap = dri_gem_bo_unmap;
- bufmgr_gem->bufmgr.bo_subdata = dri_gem_bo_subdata;
- bufmgr_gem->bufmgr.bo_get_subdata = dri_gem_bo_get_subdata;
- bufmgr_gem->bufmgr.bo_wait_rendering = dri_gem_bo_wait_rendering;
- bufmgr_gem->bufmgr.bo_emit_reloc = dri_gem_bo_emit_reloc;
- bufmgr_gem->bufmgr.bo_pin = dri_gem_bo_pin;
- bufmgr_gem->bufmgr.bo_unpin = dri_gem_bo_unpin;
- bufmgr_gem->bufmgr.bo_set_tiling = dri_gem_bo_set_tiling;
- bufmgr_gem->bufmgr.bo_flink = dri_gem_bo_flink;
- bufmgr_gem->bufmgr.bo_exec = dri_gem_bo_exec;
- bufmgr_gem->bufmgr.destroy = dri_bufmgr_gem_destroy;
+ bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
+ bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
+ bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
+ bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
+ bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
+ bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
+ bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
+ bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
+ bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
+ bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
+ bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
+ bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
+ bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
+ bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
+ bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
+ bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
bufmgr_gem->bufmgr.debug = 0;
- bufmgr_gem->bufmgr.check_aperture_space = dri_gem_check_aperture_space;
+ bufmgr_gem->bufmgr.check_aperture_space = drm_intel_gem_check_aperture_space;
/* Initialize the linked lists for BO reuse cache. */
- for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++)
+ for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++)
bufmgr_gem->cache_bucket[i].tail = &bufmgr_gem->cache_bucket[i].head;
return &bufmgr_gem->bufmgr;
diff --git a/libdrm/intel/intel_bufmgr_priv.h b/libdrm/intel/intel_bufmgr_priv.h
index 7f39bfc2..76d31e47 100644
--- a/libdrm/intel/intel_bufmgr_priv.h
+++ b/libdrm/intel/intel_bufmgr_priv.h
@@ -39,7 +39,7 @@
*
* Contains public methods followed by private storage for the buffer manager.
*/
-struct _dri_bufmgr {
+struct _drm_intel_bufmgr {
/**
* Allocate a buffer object.
*
@@ -48,17 +48,17 @@ struct _dri_bufmgr {
* bo_map() to be used by the CPU, and validated for use using bo_validate()
* to be used from the graphics device.
*/
- dri_bo *(*bo_alloc)(dri_bufmgr *bufmgr_ctx, const char *name,
- unsigned long size, unsigned int alignment);
+ drm_intel_bo *(*bo_alloc)(drm_intel_bufmgr *bufmgr, const char *name,
+ unsigned long size, unsigned int alignment);
/** Takes a reference on a buffer object */
- void (*bo_reference)(dri_bo *bo);
+ void (*bo_reference)(drm_intel_bo *bo);
/**
* Releases a reference on a buffer object, freeing the data if
* rerefences remain.
*/
- void (*bo_unreference)(dri_bo *bo);
+ void (*bo_unreference)(drm_intel_bo *bo);
/**
* Maps the buffer into userspace.
@@ -67,28 +67,28 @@ struct _dri_bufmgr {
* buffer to complete, first. The resulting mapping is available at
* buf->virtual.
*/
- int (*bo_map)(dri_bo *buf, int write_enable);
+ int (*bo_map)(drm_intel_bo *bo, int write_enable);
/** Reduces the refcount on the userspace mapping of the buffer object. */
- int (*bo_unmap)(dri_bo *buf);
+ int (*bo_unmap)(drm_intel_bo *bo);
/**
* Write data into an object.
*
* This is an optional function, if missing,
- * dri_bo will map/memcpy/unmap.
+ * drm_intel_bo will map/memcpy/unmap.
*/
- int (*bo_subdata) (dri_bo *buf, unsigned long offset,
- unsigned long size, const void *data);
+ int (*bo_subdata)(drm_intel_bo *bo, unsigned long offset,
+ unsigned long size, const void *data);
/**
* Read data from an object
*
* This is an optional function, if missing,
- * dri_bo will map/memcpy/unmap.
+ * drm_intel_bo will map/memcpy/unmap.
*/
- int (*bo_get_subdata) (dri_bo *bo, unsigned long offset,
- unsigned long size, void *data);
+ int (*bo_get_subdata)(drm_intel_bo *bo, unsigned long offset,
+ unsigned long size, void *data);
/**
* Waits for rendering to an object by the GPU to have completed.
@@ -96,12 +96,12 @@ struct _dri_bufmgr {
* This is not required for any access to the BO by bo_map, bo_subdata, etc.
* It is merely a way for the driver to implement glFinish.
*/
- void (*bo_wait_rendering) (dri_bo *bo);
+ void (*bo_wait_rendering)(drm_intel_bo *bo);
/**
* Tears down the buffer manager instance.
*/
- void (*destroy)(dri_bufmgr *bufmgr);
+ void (*destroy)(drm_intel_bufmgr *bufmgr);
/**
* Add relocation entry in reloc_buf, which will be updated with the
@@ -109,23 +109,23 @@ struct _dri_bufmgr {
*
* Relocations remain in place for the lifetime of the buffer object.
*
- * \param reloc_buf Buffer to write the relocation into.
+ * \param bo Buffer to write the relocation into.
+ * \param offset Byte offset within reloc_bo of the pointer to target_bo.
+ * \param target_bo Buffer whose offset should be written into the
+ * relocation entry.
+ * \param target_offset Constant value to be added to target_bo's offset in
+ * relocation entry.
* \param read_domains GEM read domains which the buffer will be read into
* by the command that this relocation is part of.
* \param write_domains GEM read domains which the buffer will be dirtied
* in by the command that this relocation is part of.
- * \param delta Constant value to be added to the relocation target's
- * offset.
- * \param offset Byte offset within batch_buf of the relocated pointer.
- * \param target Buffer whose offset should be written into the relocation
- * entry.
*/
- int (*bo_emit_reloc)(dri_bo *reloc_buf,
- uint32_t read_domains, uint32_t write_domain,
- uint32_t delta, uint32_t offset, dri_bo *target);
+ int (*bo_emit_reloc)(drm_intel_bo *bo, uint32_t offset,
+ drm_intel_bo *target_bo, uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain);
/** Executes the command buffer pointed to by bo. */
- int (*bo_exec)(dri_bo *bo, int used,
+ int (*bo_exec)(drm_intel_bo *bo, int used,
drm_clip_rect_t *cliprects, int num_cliprects,
int DR4);
@@ -135,29 +135,39 @@ struct _dri_bufmgr {
* \param buf Buffer to pin
* \param alignment Required alignment for aperture, in bytes
*/
- int (*bo_pin) (dri_bo *buf, uint32_t alignment);
+ int (*bo_pin)(drm_intel_bo *bo, uint32_t alignment);
/**
* Unpin a buffer from the aperture, allowing it to be removed
*
* \param buf Buffer to unpin
*/
- int (*bo_unpin) (dri_bo *buf);
+ int (*bo_unpin)(drm_intel_bo *bo);
/**
* Ask that the buffer be placed in tiling mode
*
* \param buf Buffer to set tiling mode for
* \param tiling_mode desired, and returned tiling mode
*/
- int (*bo_set_tiling) (dri_bo *bo, uint32_t *tiling_mode);
+ int (*bo_set_tiling)(drm_intel_bo *bo, uint32_t *tiling_mode,
+ uint32_t stride);
+ /**
+ * Get the current tiling (and resulting swizzling) mode for the bo.
+ *
+ * \param buf Buffer to get tiling mode for
+ * \param tiling_mode returned tiling mode
+ * \param swizzle_mode returned swizzling mode
+ */
+ int (*bo_get_tiling)(drm_intel_bo *bo, uint32_t *tiling_mode,
+ uint32_t *swizzle_mode);
/**
* Create a visible name for a buffer which can be used by other apps
*
* \param buf Buffer to create a name for
* \param name Returned name
*/
- int (*bo_flink) (dri_bo *buf, uint32_t *name);
+ int (*bo_flink)(drm_intel_bo *bo, uint32_t *name);
- int (*check_aperture_space)(dri_bo **bo_array, int count);
+ int (*check_aperture_space)(drm_intel_bo **bo_array, int count);
int debug; /**< Enables verbose debugging printouts */
};