diff options
-rw-r--r-- | configure.ac | 4 | ||||
-rw-r--r-- | libdrm/Makefile.am | 4 | ||||
-rw-r--r-- | libdrm/intel/Makefile.am | 1 | ||||
-rw-r--r-- | libdrm/intel/intel_bufmgr_fake.c | 112 | ||||
-rw-r--r-- | libdrm/intel/intel_bufmgr_gem.c | 65 |
5 files changed, 155 insertions, 31 deletions
diff --git a/configure.ac b/configure.ac index 0cf09744..92507cb5 100644 --- a/configure.ac +++ b/configure.ac @@ -32,6 +32,10 @@ AC_PROG_CC AC_HEADER_STDC AC_SYS_LARGEFILE +PKG_CHECK_MODULES(PTHREADSTUBS, pthread-stubs) +AC_SUBST(PTHREADSTUBS_CFLAGS) +AC_SUBST(PTHREADSTUBS_LIBS) + pkgconfigdir=${libdir}/pkgconfig AC_SUBST(pkgconfigdir) AC_ARG_ENABLE(udev, AS_HELP_STRING([--enable-udev], diff --git a/libdrm/Makefile.am b/libdrm/Makefile.am index 532ca138..8e1c0ee4 100644 --- a/libdrm/Makefile.am +++ b/libdrm/Makefile.am @@ -26,8 +26,8 @@ libdrm_la_LDFLAGS = -version-number 2:3:0 -no-undefined AM_CFLAGS = -I$(top_srcdir)/shared-core libdrm_la_SOURCES = xf86drm.c xf86drmHash.c xf86drmRandom.c xf86drmSL.c \ - xf86drmMode.c -libdrm_la_LIBADD = intel/libdrm_intel.la + xf86drmMode.c libdrm_lists.h +libdrm_la_LIBADD = intel/libdrm_intel.la @PTHREADSTUBS_LIBS@ libdrmincludedir = ${includedir} libdrminclude_HEADERS = xf86drm.h xf86drmMode.h diff --git a/libdrm/intel/Makefile.am b/libdrm/intel/Makefile.am index 607c4765..92388c24 100644 --- a/libdrm/intel/Makefile.am +++ b/libdrm/intel/Makefile.am @@ -26,6 +26,7 @@ AM_CFLAGS = \ $(WARN_CFLAGS) \ -I$(top_srcdir)/libdrm \ -I$(top_srcdir)/libdrm/intel \ + $(PTHREADSTUBS_CFLAGS) \ -I$(top_srcdir)/shared-core libdrm_intel_la_LTLIBRARIES = libdrm_intel.la diff --git a/libdrm/intel/intel_bufmgr_fake.c b/libdrm/intel/intel_bufmgr_fake.c index c88e1cb0..4c281467 100644 --- a/libdrm/intel/intel_bufmgr_fake.c +++ b/libdrm/intel/intel_bufmgr_fake.c @@ -43,6 +43,7 @@ #include <assert.h> #include <errno.h> #include <xf86drm.h> +#include <pthread.h> #include "intel_bufmgr.h" #include "intel_bufmgr_priv.h" #include "drm.h" @@ -112,6 +113,8 @@ struct block { typedef struct _bufmgr_fake { dri_bufmgr bufmgr; + pthread_mutex_t lock; + unsigned long low_offset; unsigned long size; void *virtual; @@ -716,10 +719,16 @@ dri_fake_bo_wait_rendering(dri_bo *bo) dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; dri_bo_fake *bo_fake = (dri_bo_fake *)bo; - if (bo_fake->block == NULL || !bo_fake->block->fenced) + pthread_mutex_lock(&bufmgr_fake->lock); + + if (bo_fake->block == NULL || !bo_fake->block->fenced) { + pthread_mutex_unlock(&bufmgr_fake->lock); return; + } _fence_wait_internal(bufmgr_fake, bo_fake->block->fence); + + pthread_mutex_unlock(&bufmgr_fake->lock); } /* Specifically ignore texture memory sharing. @@ -732,6 +741,8 @@ intel_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr) dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr; struct block *block, *tmp; + pthread_mutex_lock(&bufmgr_fake->lock); + bufmgr_fake->need_fence = 1; bufmgr_fake->fail = 0; @@ -751,6 +762,8 @@ intel_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr) assert(_fence_test(bufmgr_fake, block->fence)); set_dirty(block->bo); } + + pthread_mutex_unlock(&bufmgr_fake->lock); } static dri_bo * @@ -825,21 +838,29 @@ intel_bo_fake_alloc_static(dri_bufmgr *bufmgr, const char *name, static void dri_fake_bo_reference(dri_bo *bo) { + dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; dri_bo_fake *bo_fake = (dri_bo_fake *)bo; + pthread_mutex_lock(&bufmgr_fake->lock); bo_fake->refcount++; + pthread_mutex_unlock(&bufmgr_fake->lock); } static void -dri_fake_bo_unreference(dri_bo *bo) +dri_fake_bo_reference_locked(dri_bo *bo) +{ + dri_bo_fake *bo_fake = (dri_bo_fake *)bo; + + bo_fake->refcount++; +} + +static void +dri_fake_bo_unreference_locked(dri_bo *bo) { dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; dri_bo_fake *bo_fake = (dri_bo_fake *)bo; int i; - if (!bo) - return; - if (--bo_fake->refcount == 0) { assert(bo_fake->map_count == 0); /* No remaining references, so free it */ @@ -848,17 +869,25 @@ dri_fake_bo_unreference(dri_bo *bo) free_backing_store(bo); for (i = 0; i < bo_fake->nr_relocs; i++) - dri_bo_unreference(bo_fake->relocs[i].target_buf); + dri_fake_bo_unreference_locked(bo_fake->relocs[i].target_buf); DBG("drm_bo_unreference: free buf %d %s\n", bo_fake->id, bo_fake->name); free(bo_fake->relocs); free(bo); - - return; } } +static void +dri_fake_bo_unreference(dri_bo *bo) +{ + dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; + + pthread_mutex_lock(&bufmgr_fake->lock); + dri_fake_bo_unreference_locked(bo); + pthread_mutex_unlock(&bufmgr_fake->lock); +} + /** * Set the buffer as not requiring backing store, and instead get the callback * invoked whenever it would be set dirty. @@ -871,6 +900,8 @@ void intel_bo_fake_disable_backing_store(dri_bo *bo, dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; dri_bo_fake *bo_fake = (dri_bo_fake *)bo; + pthread_mutex_lock(&bufmgr_fake->lock); + if (bo_fake->backing_store) free_backing_store(bo); @@ -887,6 +918,8 @@ void intel_bo_fake_disable_backing_store(dri_bo *bo, */ if (invalidate_cb != NULL) invalidate_cb(bo, ptr); + + pthread_mutex_unlock(&bufmgr_fake->lock); } /** @@ -894,7 +927,7 @@ void intel_bo_fake_disable_backing_store(dri_bo *bo, * BM_NO_BACKING_STORE or BM_PINNED) or backing store, as necessary. */ static int -dri_fake_bo_map(dri_bo *bo, int write_enable) +dri_fake_bo_map_locked(dri_bo *bo, int write_enable) { dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; dri_bo_fake *bo_fake = (dri_bo_fake *)bo; @@ -952,7 +985,20 @@ dri_fake_bo_map(dri_bo *bo, int write_enable) } static int -dri_fake_bo_unmap(dri_bo *bo) +dri_fake_bo_map(dri_bo *bo, int write_enable) +{ + dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; + int ret; + + pthread_mutex_lock(&bufmgr_fake->lock); + ret = dri_fake_bo_map_locked(bo, write_enable); + pthread_mutex_unlock(&bufmgr_fake->lock); + + return ret; +} + +static int +dri_fake_bo_unmap_locked(dri_bo *bo) { dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; dri_bo_fake *bo_fake = (dri_bo_fake *)bo; @@ -973,11 +1019,26 @@ dri_fake_bo_unmap(dri_bo *bo) return 0; } +static int +dri_fake_bo_unmap(dri_bo *bo) +{ + dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; + int ret; + + pthread_mutex_lock(&bufmgr_fake->lock); + ret = dri_fake_bo_unmap_locked(bo); + pthread_mutex_unlock(&bufmgr_fake->lock); + + return ret; +} + static void dri_fake_kick_all(dri_bufmgr_fake *bufmgr_fake) { struct block *block, *tmp; + pthread_mutex_lock(&bufmgr_fake->lock); + bufmgr_fake->performed_rendering = 0; /* okay for ever BO that is on the HW kick it off. seriously not afraid of the POLICE right now */ @@ -991,6 +1052,8 @@ dri_fake_kick_all(dri_bufmgr_fake *bufmgr_fake) if (!(bo_fake->flags & BM_NO_BACKING_STORE)) bo_fake->dirty = 1; } + + pthread_mutex_unlock(&bufmgr_fake->lock); } static int @@ -999,9 +1062,6 @@ dri_fake_bo_validate(dri_bo *bo) dri_bufmgr_fake *bufmgr_fake; dri_bo_fake *bo_fake = (dri_bo_fake *)bo; - /* XXX: Sanity-check whether we've already validated this one under - * different flags. See drmAddValidateItem(). - */ bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; DBG("drm_bo_validate: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name, @@ -1084,6 +1144,7 @@ dri_fake_destroy(dri_bufmgr *bufmgr) { dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr; + pthread_mutex_destroy(&bufmgr_fake->lock); mmDestroy(bufmgr_fake->heap); free(bufmgr); } @@ -1099,6 +1160,8 @@ dri_fake_emit_reloc(dri_bo *reloc_buf, dri_bo_fake *target_fake = (dri_bo_fake *)target_buf; int i; + pthread_mutex_lock(&bufmgr_fake->lock); + assert(reloc_buf); assert(target_buf); @@ -1111,7 +1174,7 @@ dri_fake_emit_reloc(dri_bo *reloc_buf, assert(reloc_fake->nr_relocs <= MAX_RELOCS); - dri_bo_reference(target_buf); + dri_fake_bo_reference_locked(target_buf); if (!target_fake->is_static) reloc_fake->child_size += ALIGN(target_buf->size, target_fake->alignment); @@ -1132,6 +1195,8 @@ dri_fake_emit_reloc(dri_bo *reloc_buf, } } + pthread_mutex_unlock(&bufmgr_fake->lock); + return 0; } @@ -1178,7 +1243,7 @@ dri_fake_reloc_and_validate_buffer(dri_bo *bo) ret = dri_fake_reloc_and_validate_buffer(r->target_buf); if (ret != 0) { if (bo->virtual != NULL) - dri_bo_unmap(bo); + dri_fake_bo_unmap_locked(bo); return ret; } } @@ -1188,7 +1253,7 @@ dri_fake_reloc_and_validate_buffer(dri_bo *bo) reloc_data = r->target_buf->offset + r->delta; if (bo->virtual == NULL) - dri_bo_map(bo, 1); + dri_fake_bo_map_locked(bo, 1); *(uint32_t *)((uint8_t *)bo->virtual + r->offset) = reloc_data; @@ -1197,7 +1262,7 @@ dri_fake_reloc_and_validate_buffer(dri_bo *bo) } if (bo->virtual != NULL) - dri_bo_unmap(bo); + dri_fake_bo_unmap_locked(bo); if (bo_fake->write_domain != 0) { if (!(bo_fake->flags & (BM_NO_BACKING_STORE|BM_PINNED))) { @@ -1261,6 +1326,8 @@ dri_fake_bo_exec(dri_bo *bo, int used, int ret; int retry_count = 0; + pthread_mutex_lock(&bufmgr_fake->lock); + bufmgr_fake->performed_rendering = 0; dri_fake_calculate_domains(bo); @@ -1305,6 +1372,8 @@ dri_fake_bo_exec(dri_bo *bo, int used, dri_bo_fake_post_submit(bo); + pthread_mutex_unlock(&bufmgr_fake->lock); + return 0; } @@ -1357,6 +1426,8 @@ intel_bufmgr_fake_evict_all(dri_bufmgr *bufmgr) dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr; struct block *block, *tmp; + pthread_mutex_lock(&bufmgr_fake->lock); + bufmgr_fake->need_fence = 1; bufmgr_fake->fail = 0; @@ -1376,6 +1447,8 @@ intel_bufmgr_fake_evict_all(dri_bufmgr *bufmgr) /* Releases the memory, and memcpys dirty contents out if necessary. */ free_block(bufmgr_fake, block); } + + pthread_mutex_unlock(&bufmgr_fake->lock); } void intel_bufmgr_fake_set_last_dispatch(dri_bufmgr *bufmgr, volatile unsigned int *last_dispatch) @@ -1395,6 +1468,11 @@ intel_bufmgr_fake_init(int fd, bufmgr_fake = calloc(1, sizeof(*bufmgr_fake)); + if (pthread_mutex_init(&bufmgr_fake->lock, NULL) != 0) { + free(bufmgr_fake); + return NULL; + } + /* Initialize allocator */ DRMINITLISTHEAD(&bufmgr_fake->fenced); DRMINITLISTHEAD(&bufmgr_fake->on_hardware); diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c index 70cdca74..af58ad8f 100644 --- a/libdrm/intel/intel_bufmgr_gem.c +++ b/libdrm/intel/intel_bufmgr_gem.c @@ -44,6 +44,7 @@ #include <string.h> #include <unistd.h> #include <assert.h> +#include <pthread.h> #include <sys/ioctl.h> #include <sys/mman.h> @@ -84,6 +85,8 @@ typedef struct _dri_bufmgr_gem { int max_relocs; + pthread_mutex_t lock; + struct drm_i915_gem_exec_object *exec_objects; dri_bo **exec_bos; int exec_size; @@ -133,6 +136,8 @@ struct _dri_bo_gem { dri_bo_gem *next; }; +static void dri_gem_bo_reference_locked(dri_bo *bo); + static int logbase2(int n) { @@ -237,7 +242,7 @@ intel_add_validate_buffer(dri_bo *bo) bufmgr_gem->exec_objects[index].alignment = 0; bufmgr_gem->exec_objects[index].offset = 0; bufmgr_gem->exec_bos[index] = bo; - dri_bo_reference(bo); + dri_gem_bo_reference_locked(bo); bufmgr_gem->exec_count++; } @@ -285,6 +290,7 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name, bo_size = page_size; } + pthread_mutex_lock(&bufmgr_gem->lock); /* Get a buffer out of the cache if available */ if (bucket != NULL && bucket->num_entries > 0) { struct drm_i915_gem_busy busy; @@ -302,6 +308,7 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name, bucket->num_entries--; } } + pthread_mutex_unlock(&bufmgr_gem->lock); if (!alloc_from_cache) { struct drm_i915_gem_create create; @@ -380,6 +387,17 @@ intel_bo_gem_create_from_name(dri_bufmgr *bufmgr, const char *name, static void dri_gem_bo_reference(dri_bo *bo) { + dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; + dri_bo_gem *bo_gem = (dri_bo_gem *)bo; + + pthread_mutex_lock(&bufmgr_gem->lock); + bo_gem->refcount++; + pthread_mutex_unlock(&bufmgr_gem->lock); +} + +static void +dri_gem_bo_reference_locked(dri_bo *bo) +{ dri_bo_gem *bo_gem = (dri_bo_gem *)bo; bo_gem->refcount++; @@ -408,14 +426,11 @@ dri_gem_bo_free(dri_bo *bo) } static void -dri_gem_bo_unreference(dri_bo *bo) +dri_gem_bo_unreference_locked(dri_bo *bo) { dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; dri_bo_gem *bo_gem = (dri_bo_gem *)bo; - if (!bo) - return; - if (--bo_gem->refcount == 0) { struct dri_gem_bo_bucket *bucket; @@ -424,7 +439,7 @@ dri_gem_bo_unreference(dri_bo *bo) /* Unreference all the target buffers */ for (i = 0; i < bo_gem->reloc_count; i++) - dri_bo_unreference(bo_gem->reloc_target_bo[i]); + dri_gem_bo_unreference_locked(bo_gem->reloc_target_bo[i]); free(bo_gem->reloc_target_bo); free(bo_gem->relocs); } @@ -452,20 +467,28 @@ dri_gem_bo_unreference(dri_bo *bo) } else { dri_gem_bo_free(bo); } - - return; } } +static void +dri_gem_bo_unreference(dri_bo *bo) +{ + dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; + + pthread_mutex_lock(&bufmgr_gem->lock); + dri_gem_bo_unreference_locked(bo); + pthread_mutex_unlock(&bufmgr_gem->lock); +} + static int dri_gem_bo_map(dri_bo *bo, int write_enable) { - dri_bufmgr_gem *bufmgr_gem; + dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; dri_bo_gem *bo_gem = (dri_bo_gem *)bo; struct drm_i915_gem_set_domain set_domain; int ret; - bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; + pthread_mutex_lock(&bufmgr_gem->lock); /* Allow recursive mapping. Mesa may recursively map buffers with * nested display loops. @@ -515,6 +538,8 @@ dri_gem_bo_map(dri_bo *bo, int write_enable) bo_gem->swrast = 1; } + pthread_mutex_unlock(&bufmgr_gem->lock); + return 0; } @@ -531,6 +556,7 @@ dri_gem_bo_unmap(dri_bo *bo) assert(bo_gem->mapped); + pthread_mutex_lock(&bufmgr_gem->lock); if (bo_gem->swrast) { sw_finish.handle = bo_gem->gem_handle; do { @@ -539,6 +565,7 @@ dri_gem_bo_unmap(dri_bo *bo) } while (ret == -1 && errno == EINTR); bo_gem->swrast = 0; } + pthread_mutex_unlock(&bufmgr_gem->lock); return 0; } @@ -623,6 +650,8 @@ dri_bufmgr_gem_destroy(dri_bufmgr *bufmgr) free(bufmgr_gem->exec_objects); free(bufmgr_gem->exec_bos); + pthread_mutex_destroy(&bufmgr_gem->lock); + /* Free any cached buffer objects we were going to reuse */ for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++) { struct dri_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i]; @@ -658,6 +687,8 @@ dri_gem_bo_emit_reloc(dri_bo *bo, uint32_t read_domains, uint32_t write_domain, dri_bo_gem *bo_gem = (dri_bo_gem *)bo; dri_bo_gem *target_bo_gem = (dri_bo_gem *)target_bo; + pthread_mutex_lock(&bufmgr_gem->lock); + /* Create a new relocation list if needed */ if (bo_gem->relocs == NULL) intel_setup_reloc_list(bo); @@ -678,9 +709,12 @@ dri_gem_bo_emit_reloc(dri_bo *bo, uint32_t read_domains, uint32_t write_domain, bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset; bo_gem->reloc_target_bo[bo_gem->reloc_count] = target_bo; - dri_bo_reference(target_bo); + dri_gem_bo_reference_locked(target_bo); bo_gem->reloc_count++; + + pthread_mutex_unlock(&bufmgr_gem->lock); + return 0; } @@ -737,6 +771,7 @@ dri_gem_bo_exec(dri_bo *bo, int used, struct drm_i915_gem_execbuffer execbuf; int ret, i; + pthread_mutex_lock(&bufmgr_gem->lock); /* Update indices and set up the validate list. */ dri_gem_bo_process_reloc(bo); @@ -772,10 +807,11 @@ dri_gem_bo_exec(dri_bo *bo, int used, /* Disconnect the buffer from the validate list */ bo_gem->validate_index = -1; - dri_bo_unreference(bo); + dri_gem_bo_unreference_locked(bo); bufmgr_gem->exec_bos[i] = NULL; } bufmgr_gem->exec_count = 0; + pthread_mutex_unlock(&bufmgr_gem->lock); return 0; } @@ -900,6 +936,11 @@ intel_bufmgr_gem_init(int fd, int batch_size) bufmgr_gem = calloc(1, sizeof(*bufmgr_gem)); bufmgr_gem->fd = fd; + if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) { + free(bufmgr_gem); + return NULL; + } + /* Let's go with one relocation per every 2 dwords (but round down a bit * since a power of two will mean an extra page allocation for the reloc * buffer). |