diff options
Diffstat (limited to 'libdrm/intel')
| -rw-r--r-- | libdrm/intel/Makefile.am | 1 | ||||
| -rw-r--r-- | libdrm/intel/intel_bufmgr_fake.c | 112 | ||||
| -rw-r--r-- | libdrm/intel/intel_bufmgr_gem.c | 65 | 
3 files changed, 149 insertions, 29 deletions
| diff --git a/libdrm/intel/Makefile.am b/libdrm/intel/Makefile.am index 607c4765..92388c24 100644 --- a/libdrm/intel/Makefile.am +++ b/libdrm/intel/Makefile.am @@ -26,6 +26,7 @@ AM_CFLAGS = \  	$(WARN_CFLAGS) \  	-I$(top_srcdir)/libdrm \  	-I$(top_srcdir)/libdrm/intel \ +	$(PTHREADSTUBS_CFLAGS) \  	-I$(top_srcdir)/shared-core  libdrm_intel_la_LTLIBRARIES = libdrm_intel.la diff --git a/libdrm/intel/intel_bufmgr_fake.c b/libdrm/intel/intel_bufmgr_fake.c index 22273c5b..c8f643db 100644 --- a/libdrm/intel/intel_bufmgr_fake.c +++ b/libdrm/intel/intel_bufmgr_fake.c @@ -43,6 +43,7 @@  #include <assert.h>  #include <errno.h>  #include <xf86drm.h> +#include <pthread.h>  #include "intel_bufmgr.h"  #include "intel_bufmgr_priv.h"  #include "drm.h" @@ -112,6 +113,8 @@ struct block {  typedef struct _bufmgr_fake {     dri_bufmgr bufmgr; +   pthread_mutex_t lock; +     unsigned long low_offset;     unsigned long size;     void *virtual; @@ -716,10 +719,16 @@ dri_fake_bo_wait_rendering(dri_bo *bo)     dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;     dri_bo_fake *bo_fake = (dri_bo_fake *)bo; -   if (bo_fake->block == NULL || !bo_fake->block->fenced) +   pthread_mutex_lock(&bufmgr_fake->lock); + +   if (bo_fake->block == NULL || !bo_fake->block->fenced) { +      pthread_mutex_unlock(&bufmgr_fake->lock);        return; +   }     _fence_wait_internal(bufmgr_fake, bo_fake->block->fence); + +   pthread_mutex_unlock(&bufmgr_fake->lock);  }  /* Specifically ignore texture memory sharing. @@ -732,6 +741,8 @@ intel_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr)     dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;     struct block *block, *tmp; +   pthread_mutex_lock(&bufmgr_fake->lock); +     bufmgr_fake->need_fence = 1;     bufmgr_fake->fail = 0; @@ -751,6 +762,8 @@ intel_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr)        assert(_fence_test(bufmgr_fake, block->fence));        set_dirty(block->bo);     } + +   pthread_mutex_unlock(&bufmgr_fake->lock);  }  static dri_bo * @@ -825,21 +838,29 @@ intel_bo_fake_alloc_static(dri_bufmgr *bufmgr, const char *name,  static void  dri_fake_bo_reference(dri_bo *bo)  { +   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;     dri_bo_fake *bo_fake = (dri_bo_fake *)bo; +   pthread_mutex_lock(&bufmgr_fake->lock);     bo_fake->refcount++; +   pthread_mutex_unlock(&bufmgr_fake->lock);  }  static void -dri_fake_bo_unreference(dri_bo *bo) +dri_fake_bo_reference_locked(dri_bo *bo) +{ +   dri_bo_fake *bo_fake = (dri_bo_fake *)bo; + +   bo_fake->refcount++; +} + +static void +dri_fake_bo_unreference_locked(dri_bo *bo)  {     dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;     dri_bo_fake *bo_fake = (dri_bo_fake *)bo;     int i; -   if (!bo) -      return; -     if (--bo_fake->refcount == 0) {        assert(bo_fake->map_count == 0);        /* No remaining references, so free it */ @@ -848,17 +869,25 @@ dri_fake_bo_unreference(dri_bo *bo)        free_backing_store(bo);        for (i = 0; i < bo_fake->nr_relocs; i++) -	 dri_bo_unreference(bo_fake->relocs[i].target_buf); +	 dri_fake_bo_unreference_locked(bo_fake->relocs[i].target_buf);        DBG("drm_bo_unreference: free buf %d %s\n", bo_fake->id, bo_fake->name);        free(bo_fake->relocs);        free(bo); - -      return;     }  } +static void +dri_fake_bo_unreference(dri_bo *bo) +{ +   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; + +   pthread_mutex_lock(&bufmgr_fake->lock); +   dri_fake_bo_unreference_locked(bo); +   pthread_mutex_unlock(&bufmgr_fake->lock); +} +  /**   * Set the buffer as not requiring backing store, and instead get the callback   * invoked whenever it would be set dirty. @@ -871,6 +900,8 @@ void intel_bo_fake_disable_backing_store(dri_bo *bo,     dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;     dri_bo_fake *bo_fake = (dri_bo_fake *)bo; +   pthread_mutex_lock(&bufmgr_fake->lock); +     if (bo_fake->backing_store)        free_backing_store(bo); @@ -887,6 +918,8 @@ void intel_bo_fake_disable_backing_store(dri_bo *bo,      */     if (invalidate_cb != NULL)        invalidate_cb(bo, ptr); + +   pthread_mutex_unlock(&bufmgr_fake->lock);  }  /** @@ -894,7 +927,7 @@ void intel_bo_fake_disable_backing_store(dri_bo *bo,   * BM_NO_BACKING_STORE or BM_PINNED) or backing store, as necessary.   */  static int -dri_fake_bo_map(dri_bo *bo, int write_enable) +dri_fake_bo_map_locked(dri_bo *bo, int write_enable)  {     dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;     dri_bo_fake *bo_fake = (dri_bo_fake *)bo; @@ -965,7 +998,20 @@ dri_fake_bo_map(dri_bo *bo, int write_enable)  }  static int -dri_fake_bo_unmap(dri_bo *bo) +dri_fake_bo_map(dri_bo *bo, int write_enable) +{ +   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; +   int ret; + +   pthread_mutex_lock(&bufmgr_fake->lock); +   ret = dri_fake_bo_map_locked(bo, write_enable); +   pthread_mutex_unlock(&bufmgr_fake->lock); + +   return ret; +} + +static int +dri_fake_bo_unmap_locked(dri_bo *bo)  {     dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;     dri_bo_fake *bo_fake = (dri_bo_fake *)bo; @@ -986,11 +1032,26 @@ dri_fake_bo_unmap(dri_bo *bo)     return 0;  } +static int +dri_fake_bo_unmap(dri_bo *bo) +{ +   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; +   int ret; + +   pthread_mutex_lock(&bufmgr_fake->lock); +   ret = dri_fake_bo_unmap_locked(bo); +   pthread_mutex_unlock(&bufmgr_fake->lock); + +   return ret; +} +  static void  dri_fake_kick_all(dri_bufmgr_fake *bufmgr_fake)  {     struct block *block, *tmp; +   pthread_mutex_lock(&bufmgr_fake->lock); +     bufmgr_fake->performed_rendering = 0;     /* okay for ever BO that is on the HW kick it off.        seriously not afraid of the POLICE right now */ @@ -1004,6 +1065,8 @@ dri_fake_kick_all(dri_bufmgr_fake *bufmgr_fake)        if (!(bo_fake->flags & BM_NO_BACKING_STORE))           bo_fake->dirty = 1;     } + +   pthread_mutex_unlock(&bufmgr_fake->lock);  }  static int @@ -1012,9 +1075,6 @@ dri_fake_bo_validate(dri_bo *bo)     dri_bufmgr_fake *bufmgr_fake;     dri_bo_fake *bo_fake = (dri_bo_fake *)bo; -   /* XXX: Sanity-check whether we've already validated this one under -    * different flags.  See drmAddValidateItem(). -    */     bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;     DBG("drm_bo_validate: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name, @@ -1097,6 +1157,7 @@ dri_fake_destroy(dri_bufmgr *bufmgr)  {     dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr; +   pthread_mutex_destroy(&bufmgr_fake->lock);     mmDestroy(bufmgr_fake->heap);     free(bufmgr);  } @@ -1112,6 +1173,8 @@ dri_fake_emit_reloc(dri_bo *reloc_buf,     dri_bo_fake *target_fake = (dri_bo_fake *)target_buf;     int i; +   pthread_mutex_lock(&bufmgr_fake->lock); +     assert(reloc_buf);     assert(target_buf); @@ -1124,7 +1187,7 @@ dri_fake_emit_reloc(dri_bo *reloc_buf,     assert(reloc_fake->nr_relocs <= MAX_RELOCS); -   dri_bo_reference(target_buf); +   dri_fake_bo_reference_locked(target_buf);     if (!target_fake->is_static)        reloc_fake->child_size += ALIGN(target_buf->size, target_fake->alignment); @@ -1145,6 +1208,8 @@ dri_fake_emit_reloc(dri_bo *reloc_buf,        }     } +   pthread_mutex_unlock(&bufmgr_fake->lock); +     return 0;  } @@ -1190,7 +1255,7 @@ dri_fake_reloc_and_validate_buffer(dri_bo *bo)           ret = dri_fake_reloc_and_validate_buffer(r->target_buf);           if (ret != 0) {              if (bo->virtual != NULL) -                dri_bo_unmap(bo); +                dri_fake_bo_unmap_locked(bo);              return ret;           }        } @@ -1200,7 +1265,7 @@ dri_fake_reloc_and_validate_buffer(dri_bo *bo)  	 reloc_data = r->target_buf->offset + r->delta;  	 if (bo->virtual == NULL) -	    dri_bo_map(bo, 1); +	    dri_fake_bo_map_locked(bo, 1);  	 *(uint32_t *)((uint8_t *)bo->virtual + r->offset) = reloc_data; @@ -1209,7 +1274,7 @@ dri_fake_reloc_and_validate_buffer(dri_bo *bo)     }     if (bo->virtual != NULL) -      dri_bo_unmap(bo); +      dri_fake_bo_unmap_locked(bo);     if (bo_fake->write_domain != 0) {        if (!(bo_fake->flags & (BM_NO_BACKING_STORE|BM_PINNED))) { @@ -1272,6 +1337,8 @@ dri_fake_bo_exec(dri_bo *bo, int used,     int ret;     int retry_count = 0; +   pthread_mutex_lock(&bufmgr_fake->lock); +     bufmgr_fake->performed_rendering = 0;     dri_fake_calculate_domains(bo); @@ -1316,6 +1383,8 @@ dri_fake_bo_exec(dri_bo *bo, int used,     dri_bo_fake_post_submit(bo); +   pthread_mutex_unlock(&bufmgr_fake->lock); +     return 0;  } @@ -1368,6 +1437,8 @@ intel_bufmgr_fake_evict_all(dri_bufmgr *bufmgr)     dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;     struct block *block, *tmp; +   pthread_mutex_lock(&bufmgr_fake->lock); +     bufmgr_fake->need_fence = 1;     bufmgr_fake->fail = 0; @@ -1387,6 +1458,8 @@ intel_bufmgr_fake_evict_all(dri_bufmgr *bufmgr)        /* Releases the memory, and memcpys dirty contents out if necessary. */        free_block(bufmgr_fake, block);     } + +   pthread_mutex_unlock(&bufmgr_fake->lock);  }  void intel_bufmgr_fake_set_last_dispatch(dri_bufmgr *bufmgr,  					 volatile unsigned int *last_dispatch) @@ -1406,6 +1479,11 @@ intel_bufmgr_fake_init(int fd,     bufmgr_fake = calloc(1, sizeof(*bufmgr_fake)); +   if (pthread_mutex_init(&bufmgr_fake->lock, NULL) != 0) { +      free(bufmgr_fake); +      return NULL; +   } +     /* Initialize allocator */     DRMINITLISTHEAD(&bufmgr_fake->fenced);     DRMINITLISTHEAD(&bufmgr_fake->on_hardware); diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c index cd36cdc7..9bd44417 100644 --- a/libdrm/intel/intel_bufmgr_gem.c +++ b/libdrm/intel/intel_bufmgr_gem.c @@ -44,6 +44,7 @@  #include <string.h>  #include <unistd.h>  #include <assert.h> +#include <pthread.h>  #include <sys/ioctl.h>  #include <sys/mman.h> @@ -84,6 +85,8 @@ typedef struct _dri_bufmgr_gem {      int max_relocs; +    pthread_mutex_t lock; +      struct drm_i915_gem_exec_object *exec_objects;      dri_bo **exec_bos;      int exec_size; @@ -133,6 +136,8 @@ struct _dri_bo_gem {      dri_bo_gem *next;  }; +static void dri_gem_bo_reference_locked(dri_bo *bo); +  static int  logbase2(int n)  { @@ -237,7 +242,7 @@ intel_add_validate_buffer(dri_bo *bo)      bufmgr_gem->exec_objects[index].alignment = 0;      bufmgr_gem->exec_objects[index].offset = 0;      bufmgr_gem->exec_bos[index] = bo; -    dri_bo_reference(bo); +    dri_gem_bo_reference_locked(bo);      bufmgr_gem->exec_count++;  } @@ -285,6 +290,7 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name,  	    bo_size = page_size;      } +    pthread_mutex_lock(&bufmgr_gem->lock);      /* Get a buffer out of the cache if available */      if (bucket != NULL && bucket->num_entries > 0) {  	struct drm_i915_gem_busy busy; @@ -302,6 +308,7 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name,  	    bucket->num_entries--;  	}      } +    pthread_mutex_unlock(&bufmgr_gem->lock);      if (!alloc_from_cache) {  	struct drm_i915_gem_create create; @@ -379,6 +386,17 @@ intel_bo_gem_create_from_name(dri_bufmgr *bufmgr, const char *name,  static void  dri_gem_bo_reference(dri_bo *bo)  { +    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; +    dri_bo_gem *bo_gem = (dri_bo_gem *)bo; + +    pthread_mutex_lock(&bufmgr_gem->lock); +    bo_gem->refcount++; +    pthread_mutex_unlock(&bufmgr_gem->lock); +} + +static void +dri_gem_bo_reference_locked(dri_bo *bo) +{      dri_bo_gem *bo_gem = (dri_bo_gem *)bo;      bo_gem->refcount++; @@ -407,14 +425,11 @@ dri_gem_bo_free(dri_bo *bo)  }  static void -dri_gem_bo_unreference(dri_bo *bo) +dri_gem_bo_unreference_locked(dri_bo *bo)  {      dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;      dri_bo_gem *bo_gem = (dri_bo_gem *)bo; -    if (!bo) -	return; -      if (--bo_gem->refcount == 0) {  	struct dri_gem_bo_bucket *bucket; @@ -423,7 +438,7 @@ dri_gem_bo_unreference(dri_bo *bo)  	    /* Unreference all the target buffers */  	    for (i = 0; i < bo_gem->reloc_count; i++) -		 dri_bo_unreference(bo_gem->reloc_target_bo[i]); +		 dri_gem_bo_unreference_locked(bo_gem->reloc_target_bo[i]);  	    free(bo_gem->reloc_target_bo);  	    free(bo_gem->relocs);  	} @@ -451,20 +466,28 @@ dri_gem_bo_unreference(dri_bo *bo)  	} else {  	    dri_gem_bo_free(bo);  	} - -	return;      }  } +static void +dri_gem_bo_unreference(dri_bo *bo) +{ +    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; + +    pthread_mutex_lock(&bufmgr_gem->lock); +    dri_gem_bo_unreference_locked(bo); +    pthread_mutex_unlock(&bufmgr_gem->lock); +} +  static int  dri_gem_bo_map(dri_bo *bo, int write_enable)  { -    dri_bufmgr_gem *bufmgr_gem; +    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;      dri_bo_gem *bo_gem = (dri_bo_gem *)bo;      struct drm_i915_gem_set_domain set_domain;      int ret; -    bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; +    pthread_mutex_lock(&bufmgr_gem->lock);      /* Allow recursive mapping. Mesa may recursively map buffers with       * nested display loops. @@ -514,6 +537,8 @@ dri_gem_bo_map(dri_bo *bo, int write_enable)  	bo_gem->swrast = 1;      } +    pthread_mutex_unlock(&bufmgr_gem->lock); +      return 0;  } @@ -530,6 +555,7 @@ dri_gem_bo_unmap(dri_bo *bo)      assert(bo_gem->mapped); +    pthread_mutex_lock(&bufmgr_gem->lock);      if (bo_gem->swrast) {  	sw_finish.handle = bo_gem->gem_handle;  	do { @@ -538,6 +564,7 @@ dri_gem_bo_unmap(dri_bo *bo)  	} while (ret == -1 && errno == EINTR);  	bo_gem->swrast = 0;      } +    pthread_mutex_unlock(&bufmgr_gem->lock);      return 0;  } @@ -622,6 +649,8 @@ dri_bufmgr_gem_destroy(dri_bufmgr *bufmgr)      free(bufmgr_gem->exec_objects);      free(bufmgr_gem->exec_bos); +    pthread_mutex_destroy(&bufmgr_gem->lock); +      /* Free any cached buffer objects we were going to reuse */      for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++) {  	struct dri_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i]; @@ -657,6 +686,8 @@ dri_gem_bo_emit_reloc(dri_bo *bo, uint32_t read_domains, uint32_t write_domain,      dri_bo_gem *bo_gem = (dri_bo_gem *)bo;      dri_bo_gem *target_bo_gem = (dri_bo_gem *)target_bo; +    pthread_mutex_lock(&bufmgr_gem->lock); +      /* Create a new relocation list if needed */      if (bo_gem->relocs == NULL)  	intel_setup_reloc_list(bo); @@ -677,9 +708,12 @@ dri_gem_bo_emit_reloc(dri_bo *bo, uint32_t read_domains, uint32_t write_domain,      bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;      bo_gem->reloc_target_bo[bo_gem->reloc_count] = target_bo; -    dri_bo_reference(target_bo); +    dri_gem_bo_reference_locked(target_bo);      bo_gem->reloc_count++; + +    pthread_mutex_unlock(&bufmgr_gem->lock); +      return 0;  } @@ -736,6 +770,7 @@ dri_gem_bo_exec(dri_bo *bo, int used,      struct drm_i915_gem_execbuffer execbuf;      int ret, i; +    pthread_mutex_lock(&bufmgr_gem->lock);      /* Update indices and set up the validate list. */      dri_gem_bo_process_reloc(bo); @@ -771,10 +806,11 @@ dri_gem_bo_exec(dri_bo *bo, int used,  	/* Disconnect the buffer from the validate list */  	bo_gem->validate_index = -1; -	dri_bo_unreference(bo); +	dri_gem_bo_unreference_locked(bo);  	bufmgr_gem->exec_bos[i] = NULL;      }      bufmgr_gem->exec_count = 0; +    pthread_mutex_unlock(&bufmgr_gem->lock);      return 0;  } @@ -899,6 +935,11 @@ intel_bufmgr_gem_init(int fd, int batch_size)      bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));      bufmgr_gem->fd = fd; +    if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) { +      free(bufmgr_gem); +      return NULL; +   } +      /* Let's go with one relocation per every 2 dwords (but round down a bit       * since a power of two will mean an extra page allocation for the reloc       * buffer). | 
