From d2d7f3069dac4bc5ddd3c8da4d3955f690274276 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Thu, 31 Jul 2008 13:13:21 +1000 Subject: drm: userspace rip out TTM API --- libdrm/intel/intel_bufmgr_fake.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'libdrm/intel') diff --git a/libdrm/intel/intel_bufmgr_fake.c b/libdrm/intel/intel_bufmgr_fake.c index e988eb58..e7ed893f 100644 --- a/libdrm/intel/intel_bufmgr_fake.c +++ b/libdrm/intel/intel_bufmgr_fake.c @@ -649,7 +649,7 @@ intel_bo_fake_alloc_static(dri_bufmgr *bufmgr, const char *name, bo_fake->refcount = 1; bo_fake->id = ++bufmgr_fake->buf_nr; bo_fake->name = name; - bo_fake->flags = BM_PINNED | DRM_BO_FLAG_NO_MOVE; + bo_fake->flags = BM_PINNED; bo_fake->is_static = 1; DBG("drm_bo_alloc_static: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name, -- cgit v1.2.3 From e9648e9107e90c3ef38a9c9ebb95bac1297d0df5 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Tue, 12 Aug 2008 18:22:34 -0700 Subject: Export a generic dri_bo handle for use by clients We'll need something like this (either a handle field or a dri_bo_get_handle function) for kernel mode setting to get at the handles. --- libdrm/intel/intel_bufmgr_gem.c | 1 + 1 file changed, 1 insertion(+) (limited to 'libdrm/intel') diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c index 22f8695d..02b1b252 100644 --- a/libdrm/intel/intel_bufmgr_gem.c +++ b/libdrm/intel/intel_bufmgr_gem.c @@ -311,6 +311,7 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name, ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CREATE, &create); bo_gem->gem_handle = create.handle; + bo_gem->bo.handle = bo_gem->gem_handle; if (ret != 0) { free(bo_gem); return NULL; -- cgit v1.2.3 From af2323b4b3b76070fb453531147a8956161b3718 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Tue, 30 Sep 2008 16:35:26 -0700 Subject: intel bufmgr: reinstate buffer handle tracking We need a way of getting at the underlying handle for use with mode setting. We can either export it in the dri_bo object or provide a new callback to get it. --- libdrm/intel/intel_bufmgr.h | 4 ++++ libdrm/intel/intel_bufmgr_gem.c | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'libdrm/intel') diff --git a/libdrm/intel/intel_bufmgr.h b/libdrm/intel/intel_bufmgr.h index c44d596b..67dba6a1 100644 --- a/libdrm/intel/intel_bufmgr.h +++ b/libdrm/intel/intel_bufmgr.h @@ -59,6 +59,10 @@ struct _dri_bo { /** Buffer manager context associated with this buffer object */ dri_bufmgr *bufmgr; + /** + * MM-specific handle for accessing object + */ + int handle; }; dri_bo *dri_bo_alloc(dri_bufmgr *bufmgr, const char *name, unsigned long size, diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c index 4ca49d0a..70cdca74 100644 --- a/libdrm/intel/intel_bufmgr_gem.c +++ b/libdrm/intel/intel_bufmgr_gem.c @@ -316,7 +316,7 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name, ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CREATE, &create); bo_gem->gem_handle = create.handle; - //bo_gem->bo.handle = bo_gem->gem_handle; + bo_gem->bo.handle = bo_gem->gem_handle; if (ret != 0) { free(bo_gem); return NULL; -- cgit v1.2.3 From 6df7b0719fe92b718e486c2b87e2f883cfa41efa Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Thu, 12 Jun 2008 23:22:26 -0700 Subject: intel: Protect bufmgr objects with a pthread mutex. We want to be able to use the bufmgr from multiple threads for GL, and thus we need to protect the internal structures. The pthread-stubs package is used so that programs not linked against pthreads get weak symbols to stubs and don't eat most of the cost. --- libdrm/intel/Makefile.am | 1 + libdrm/intel/intel_bufmgr_fake.c | 112 +++++++++++++++++++++++++++++++++------ libdrm/intel/intel_bufmgr_gem.c | 65 ++++++++++++++++++----- 3 files changed, 149 insertions(+), 29 deletions(-) (limited to 'libdrm/intel') diff --git a/libdrm/intel/Makefile.am b/libdrm/intel/Makefile.am index 607c4765..92388c24 100644 --- a/libdrm/intel/Makefile.am +++ b/libdrm/intel/Makefile.am @@ -26,6 +26,7 @@ AM_CFLAGS = \ $(WARN_CFLAGS) \ -I$(top_srcdir)/libdrm \ -I$(top_srcdir)/libdrm/intel \ + $(PTHREADSTUBS_CFLAGS) \ -I$(top_srcdir)/shared-core libdrm_intel_la_LTLIBRARIES = libdrm_intel.la diff --git a/libdrm/intel/intel_bufmgr_fake.c b/libdrm/intel/intel_bufmgr_fake.c index c88e1cb0..4c281467 100644 --- a/libdrm/intel/intel_bufmgr_fake.c +++ b/libdrm/intel/intel_bufmgr_fake.c @@ -43,6 +43,7 @@ #include #include #include +#include #include "intel_bufmgr.h" #include "intel_bufmgr_priv.h" #include "drm.h" @@ -112,6 +113,8 @@ struct block { typedef struct _bufmgr_fake { dri_bufmgr bufmgr; + pthread_mutex_t lock; + unsigned long low_offset; unsigned long size; void *virtual; @@ -716,10 +719,16 @@ dri_fake_bo_wait_rendering(dri_bo *bo) dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; dri_bo_fake *bo_fake = (dri_bo_fake *)bo; - if (bo_fake->block == NULL || !bo_fake->block->fenced) + pthread_mutex_lock(&bufmgr_fake->lock); + + if (bo_fake->block == NULL || !bo_fake->block->fenced) { + pthread_mutex_unlock(&bufmgr_fake->lock); return; + } _fence_wait_internal(bufmgr_fake, bo_fake->block->fence); + + pthread_mutex_unlock(&bufmgr_fake->lock); } /* Specifically ignore texture memory sharing. @@ -732,6 +741,8 @@ intel_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr) dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr; struct block *block, *tmp; + pthread_mutex_lock(&bufmgr_fake->lock); + bufmgr_fake->need_fence = 1; bufmgr_fake->fail = 0; @@ -751,6 +762,8 @@ intel_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr) assert(_fence_test(bufmgr_fake, block->fence)); set_dirty(block->bo); } + + pthread_mutex_unlock(&bufmgr_fake->lock); } static dri_bo * @@ -825,21 +838,29 @@ intel_bo_fake_alloc_static(dri_bufmgr *bufmgr, const char *name, static void dri_fake_bo_reference(dri_bo *bo) { + dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; dri_bo_fake *bo_fake = (dri_bo_fake *)bo; + pthread_mutex_lock(&bufmgr_fake->lock); bo_fake->refcount++; + pthread_mutex_unlock(&bufmgr_fake->lock); } static void -dri_fake_bo_unreference(dri_bo *bo) +dri_fake_bo_reference_locked(dri_bo *bo) +{ + dri_bo_fake *bo_fake = (dri_bo_fake *)bo; + + bo_fake->refcount++; +} + +static void +dri_fake_bo_unreference_locked(dri_bo *bo) { dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; dri_bo_fake *bo_fake = (dri_bo_fake *)bo; int i; - if (!bo) - return; - if (--bo_fake->refcount == 0) { assert(bo_fake->map_count == 0); /* No remaining references, so free it */ @@ -848,17 +869,25 @@ dri_fake_bo_unreference(dri_bo *bo) free_backing_store(bo); for (i = 0; i < bo_fake->nr_relocs; i++) - dri_bo_unreference(bo_fake->relocs[i].target_buf); + dri_fake_bo_unreference_locked(bo_fake->relocs[i].target_buf); DBG("drm_bo_unreference: free buf %d %s\n", bo_fake->id, bo_fake->name); free(bo_fake->relocs); free(bo); - - return; } } +static void +dri_fake_bo_unreference(dri_bo *bo) +{ + dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; + + pthread_mutex_lock(&bufmgr_fake->lock); + dri_fake_bo_unreference_locked(bo); + pthread_mutex_unlock(&bufmgr_fake->lock); +} + /** * Set the buffer as not requiring backing store, and instead get the callback * invoked whenever it would be set dirty. @@ -871,6 +900,8 @@ void intel_bo_fake_disable_backing_store(dri_bo *bo, dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; dri_bo_fake *bo_fake = (dri_bo_fake *)bo; + pthread_mutex_lock(&bufmgr_fake->lock); + if (bo_fake->backing_store) free_backing_store(bo); @@ -887,6 +918,8 @@ void intel_bo_fake_disable_backing_store(dri_bo *bo, */ if (invalidate_cb != NULL) invalidate_cb(bo, ptr); + + pthread_mutex_unlock(&bufmgr_fake->lock); } /** @@ -894,7 +927,7 @@ void intel_bo_fake_disable_backing_store(dri_bo *bo, * BM_NO_BACKING_STORE or BM_PINNED) or backing store, as necessary. */ static int -dri_fake_bo_map(dri_bo *bo, int write_enable) +dri_fake_bo_map_locked(dri_bo *bo, int write_enable) { dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; dri_bo_fake *bo_fake = (dri_bo_fake *)bo; @@ -952,7 +985,20 @@ dri_fake_bo_map(dri_bo *bo, int write_enable) } static int -dri_fake_bo_unmap(dri_bo *bo) +dri_fake_bo_map(dri_bo *bo, int write_enable) +{ + dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; + int ret; + + pthread_mutex_lock(&bufmgr_fake->lock); + ret = dri_fake_bo_map_locked(bo, write_enable); + pthread_mutex_unlock(&bufmgr_fake->lock); + + return ret; +} + +static int +dri_fake_bo_unmap_locked(dri_bo *bo) { dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; dri_bo_fake *bo_fake = (dri_bo_fake *)bo; @@ -973,11 +1019,26 @@ dri_fake_bo_unmap(dri_bo *bo) return 0; } +static int +dri_fake_bo_unmap(dri_bo *bo) +{ + dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; + int ret; + + pthread_mutex_lock(&bufmgr_fake->lock); + ret = dri_fake_bo_unmap_locked(bo); + pthread_mutex_unlock(&bufmgr_fake->lock); + + return ret; +} + static void dri_fake_kick_all(dri_bufmgr_fake *bufmgr_fake) { struct block *block, *tmp; + pthread_mutex_lock(&bufmgr_fake->lock); + bufmgr_fake->performed_rendering = 0; /* okay for ever BO that is on the HW kick it off. seriously not afraid of the POLICE right now */ @@ -991,6 +1052,8 @@ dri_fake_kick_all(dri_bufmgr_fake *bufmgr_fake) if (!(bo_fake->flags & BM_NO_BACKING_STORE)) bo_fake->dirty = 1; } + + pthread_mutex_unlock(&bufmgr_fake->lock); } static int @@ -999,9 +1062,6 @@ dri_fake_bo_validate(dri_bo *bo) dri_bufmgr_fake *bufmgr_fake; dri_bo_fake *bo_fake = (dri_bo_fake *)bo; - /* XXX: Sanity-check whether we've already validated this one under - * different flags. See drmAddValidateItem(). - */ bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; DBG("drm_bo_validate: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name, @@ -1084,6 +1144,7 @@ dri_fake_destroy(dri_bufmgr *bufmgr) { dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr; + pthread_mutex_destroy(&bufmgr_fake->lock); mmDestroy(bufmgr_fake->heap); free(bufmgr); } @@ -1099,6 +1160,8 @@ dri_fake_emit_reloc(dri_bo *reloc_buf, dri_bo_fake *target_fake = (dri_bo_fake *)target_buf; int i; + pthread_mutex_lock(&bufmgr_fake->lock); + assert(reloc_buf); assert(target_buf); @@ -1111,7 +1174,7 @@ dri_fake_emit_reloc(dri_bo *reloc_buf, assert(reloc_fake->nr_relocs <= MAX_RELOCS); - dri_bo_reference(target_buf); + dri_fake_bo_reference_locked(target_buf); if (!target_fake->is_static) reloc_fake->child_size += ALIGN(target_buf->size, target_fake->alignment); @@ -1132,6 +1195,8 @@ dri_fake_emit_reloc(dri_bo *reloc_buf, } } + pthread_mutex_unlock(&bufmgr_fake->lock); + return 0; } @@ -1178,7 +1243,7 @@ dri_fake_reloc_and_validate_buffer(dri_bo *bo) ret = dri_fake_reloc_and_validate_buffer(r->target_buf); if (ret != 0) { if (bo->virtual != NULL) - dri_bo_unmap(bo); + dri_fake_bo_unmap_locked(bo); return ret; } } @@ -1188,7 +1253,7 @@ dri_fake_reloc_and_validate_buffer(dri_bo *bo) reloc_data = r->target_buf->offset + r->delta; if (bo->virtual == NULL) - dri_bo_map(bo, 1); + dri_fake_bo_map_locked(bo, 1); *(uint32_t *)((uint8_t *)bo->virtual + r->offset) = reloc_data; @@ -1197,7 +1262,7 @@ dri_fake_reloc_and_validate_buffer(dri_bo *bo) } if (bo->virtual != NULL) - dri_bo_unmap(bo); + dri_fake_bo_unmap_locked(bo); if (bo_fake->write_domain != 0) { if (!(bo_fake->flags & (BM_NO_BACKING_STORE|BM_PINNED))) { @@ -1261,6 +1326,8 @@ dri_fake_bo_exec(dri_bo *bo, int used, int ret; int retry_count = 0; + pthread_mutex_lock(&bufmgr_fake->lock); + bufmgr_fake->performed_rendering = 0; dri_fake_calculate_domains(bo); @@ -1305,6 +1372,8 @@ dri_fake_bo_exec(dri_bo *bo, int used, dri_bo_fake_post_submit(bo); + pthread_mutex_unlock(&bufmgr_fake->lock); + return 0; } @@ -1357,6 +1426,8 @@ intel_bufmgr_fake_evict_all(dri_bufmgr *bufmgr) dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr; struct block *block, *tmp; + pthread_mutex_lock(&bufmgr_fake->lock); + bufmgr_fake->need_fence = 1; bufmgr_fake->fail = 0; @@ -1376,6 +1447,8 @@ intel_bufmgr_fake_evict_all(dri_bufmgr *bufmgr) /* Releases the memory, and memcpys dirty contents out if necessary. */ free_block(bufmgr_fake, block); } + + pthread_mutex_unlock(&bufmgr_fake->lock); } void intel_bufmgr_fake_set_last_dispatch(dri_bufmgr *bufmgr, volatile unsigned int *last_dispatch) @@ -1395,6 +1468,11 @@ intel_bufmgr_fake_init(int fd, bufmgr_fake = calloc(1, sizeof(*bufmgr_fake)); + if (pthread_mutex_init(&bufmgr_fake->lock, NULL) != 0) { + free(bufmgr_fake); + return NULL; + } + /* Initialize allocator */ DRMINITLISTHEAD(&bufmgr_fake->fenced); DRMINITLISTHEAD(&bufmgr_fake->on_hardware); diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c index 70cdca74..af58ad8f 100644 --- a/libdrm/intel/intel_bufmgr_gem.c +++ b/libdrm/intel/intel_bufmgr_gem.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include @@ -84,6 +85,8 @@ typedef struct _dri_bufmgr_gem { int max_relocs; + pthread_mutex_t lock; + struct drm_i915_gem_exec_object *exec_objects; dri_bo **exec_bos; int exec_size; @@ -133,6 +136,8 @@ struct _dri_bo_gem { dri_bo_gem *next; }; +static void dri_gem_bo_reference_locked(dri_bo *bo); + static int logbase2(int n) { @@ -237,7 +242,7 @@ intel_add_validate_buffer(dri_bo *bo) bufmgr_gem->exec_objects[index].alignment = 0; bufmgr_gem->exec_objects[index].offset = 0; bufmgr_gem->exec_bos[index] = bo; - dri_bo_reference(bo); + dri_gem_bo_reference_locked(bo); bufmgr_gem->exec_count++; } @@ -285,6 +290,7 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name, bo_size = page_size; } + pthread_mutex_lock(&bufmgr_gem->lock); /* Get a buffer out of the cache if available */ if (bucket != NULL && bucket->num_entries > 0) { struct drm_i915_gem_busy busy; @@ -302,6 +308,7 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name, bucket->num_entries--; } } + pthread_mutex_unlock(&bufmgr_gem->lock); if (!alloc_from_cache) { struct drm_i915_gem_create create; @@ -379,6 +386,17 @@ intel_bo_gem_create_from_name(dri_bufmgr *bufmgr, const char *name, static void dri_gem_bo_reference(dri_bo *bo) +{ + dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; + dri_bo_gem *bo_gem = (dri_bo_gem *)bo; + + pthread_mutex_lock(&bufmgr_gem->lock); + bo_gem->refcount++; + pthread_mutex_unlock(&bufmgr_gem->lock); +} + +static void +dri_gem_bo_reference_locked(dri_bo *bo) { dri_bo_gem *bo_gem = (dri_bo_gem *)bo; @@ -408,14 +426,11 @@ dri_gem_bo_free(dri_bo *bo) } static void -dri_gem_bo_unreference(dri_bo *bo) +dri_gem_bo_unreference_locked(dri_bo *bo) { dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; dri_bo_gem *bo_gem = (dri_bo_gem *)bo; - if (!bo) - return; - if (--bo_gem->refcount == 0) { struct dri_gem_bo_bucket *bucket; @@ -424,7 +439,7 @@ dri_gem_bo_unreference(dri_bo *bo) /* Unreference all the target buffers */ for (i = 0; i < bo_gem->reloc_count; i++) - dri_bo_unreference(bo_gem->reloc_target_bo[i]); + dri_gem_bo_unreference_locked(bo_gem->reloc_target_bo[i]); free(bo_gem->reloc_target_bo); free(bo_gem->relocs); } @@ -452,20 +467,28 @@ dri_gem_bo_unreference(dri_bo *bo) } else { dri_gem_bo_free(bo); } - - return; } } +static void +dri_gem_bo_unreference(dri_bo *bo) +{ + dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; + + pthread_mutex_lock(&bufmgr_gem->lock); + dri_gem_bo_unreference_locked(bo); + pthread_mutex_unlock(&bufmgr_gem->lock); +} + static int dri_gem_bo_map(dri_bo *bo, int write_enable) { - dri_bufmgr_gem *bufmgr_gem; + dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; dri_bo_gem *bo_gem = (dri_bo_gem *)bo; struct drm_i915_gem_set_domain set_domain; int ret; - bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; + pthread_mutex_lock(&bufmgr_gem->lock); /* Allow recursive mapping. Mesa may recursively map buffers with * nested display loops. @@ -515,6 +538,8 @@ dri_gem_bo_map(dri_bo *bo, int write_enable) bo_gem->swrast = 1; } + pthread_mutex_unlock(&bufmgr_gem->lock); + return 0; } @@ -531,6 +556,7 @@ dri_gem_bo_unmap(dri_bo *bo) assert(bo_gem->mapped); + pthread_mutex_lock(&bufmgr_gem->lock); if (bo_gem->swrast) { sw_finish.handle = bo_gem->gem_handle; do { @@ -539,6 +565,7 @@ dri_gem_bo_unmap(dri_bo *bo) } while (ret == -1 && errno == EINTR); bo_gem->swrast = 0; } + pthread_mutex_unlock(&bufmgr_gem->lock); return 0; } @@ -623,6 +650,8 @@ dri_bufmgr_gem_destroy(dri_bufmgr *bufmgr) free(bufmgr_gem->exec_objects); free(bufmgr_gem->exec_bos); + pthread_mutex_destroy(&bufmgr_gem->lock); + /* Free any cached buffer objects we were going to reuse */ for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++) { struct dri_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i]; @@ -658,6 +687,8 @@ dri_gem_bo_emit_reloc(dri_bo *bo, uint32_t read_domains, uint32_t write_domain, dri_bo_gem *bo_gem = (dri_bo_gem *)bo; dri_bo_gem *target_bo_gem = (dri_bo_gem *)target_bo; + pthread_mutex_lock(&bufmgr_gem->lock); + /* Create a new relocation list if needed */ if (bo_gem->relocs == NULL) intel_setup_reloc_list(bo); @@ -678,9 +709,12 @@ dri_gem_bo_emit_reloc(dri_bo *bo, uint32_t read_domains, uint32_t write_domain, bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset; bo_gem->reloc_target_bo[bo_gem->reloc_count] = target_bo; - dri_bo_reference(target_bo); + dri_gem_bo_reference_locked(target_bo); bo_gem->reloc_count++; + + pthread_mutex_unlock(&bufmgr_gem->lock); + return 0; } @@ -737,6 +771,7 @@ dri_gem_bo_exec(dri_bo *bo, int used, struct drm_i915_gem_execbuffer execbuf; int ret, i; + pthread_mutex_lock(&bufmgr_gem->lock); /* Update indices and set up the validate list. */ dri_gem_bo_process_reloc(bo); @@ -772,10 +807,11 @@ dri_gem_bo_exec(dri_bo *bo, int used, /* Disconnect the buffer from the validate list */ bo_gem->validate_index = -1; - dri_bo_unreference(bo); + dri_gem_bo_unreference_locked(bo); bufmgr_gem->exec_bos[i] = NULL; } bufmgr_gem->exec_count = 0; + pthread_mutex_unlock(&bufmgr_gem->lock); return 0; } @@ -900,6 +936,11 @@ intel_bufmgr_gem_init(int fd, int batch_size) bufmgr_gem = calloc(1, sizeof(*bufmgr_gem)); bufmgr_gem->fd = fd; + if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) { + free(bufmgr_gem); + return NULL; + } + /* Let's go with one relocation per every 2 dwords (but round down a bit * since a power of two will mean an extra page allocation for the reloc * buffer). -- cgit v1.2.3 From 4c8aeb6fc7fa9f0aa9df4fd855696afe4bf6c0b4 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Fri, 17 Oct 2008 06:40:38 +1000 Subject: link libdrm_intel properly libdrm_intel needs symbols from libdrm, so link against it. (cherry picked from commit d9c2f65dd8e50736a33e97a55c257ef6843e1ce7) Conflicts: libdrm/Makefile.am --- libdrm/intel/Makefile.am | 1 + 1 file changed, 1 insertion(+) (limited to 'libdrm/intel') diff --git a/libdrm/intel/Makefile.am b/libdrm/intel/Makefile.am index 92388c24..5e3dee06 100644 --- a/libdrm/intel/Makefile.am +++ b/libdrm/intel/Makefile.am @@ -32,6 +32,7 @@ AM_CFLAGS = \ libdrm_intel_la_LTLIBRARIES = libdrm_intel.la libdrm_intel_ladir = $(libdir) libdrm_intel_la_LDFLAGS = -version-number 1:0:0 -no-undefined +libdrm_intel_la_LIBADD = ../libdrm.la @PTHREADSTUBS_LIBS@ libdrm_intel_la_SOURCES = \ intel_bufmgr.c \ -- cgit v1.2.3 From 60c1e3a09e33bfaec893c1d4780553b9b344293a Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 19 Nov 2008 10:56:48 -0800 Subject: libdrm_intel: fix merge error don't take the lock twice --- libdrm/intel/intel_bufmgr_gem.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'libdrm/intel') diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c index 8b8cda62..64d32d38 100644 --- a/libdrm/intel/intel_bufmgr_gem.c +++ b/libdrm/intel/intel_bufmgr_gem.c @@ -809,8 +809,6 @@ drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset, pthread_mutex_lock(&bufmgr_gem->lock); - pthread_mutex_lock(&bufmgr_gem->lock); - /* Create a new relocation list if needed */ if (bo_gem->relocs == NULL) drm_intel_setup_reloc_list(bo); -- cgit v1.2.3