diff options
Diffstat (limited to 'libdrm')
| -rw-r--r-- | libdrm/Makefile.am | 8 | ||||
| -rw-r--r-- | libdrm/dri_bufmgr.c | 141 | ||||
| -rw-r--r-- | libdrm/dri_bufmgr.h | 178 | ||||
| -rw-r--r-- | libdrm/intel/Makefile.am | 39 | ||||
| -rw-r--r-- | libdrm/intel/intel_bufmgr.h | 130 | ||||
| -rw-r--r-- | libdrm/intel/intel_bufmgr_fake.c | 1232 | ||||
| -rw-r--r-- | libdrm/intel/intel_bufmgr_gem.c | 993 | ||||
| -rw-r--r-- | libdrm/intel/mm.c | 281 | ||||
| -rw-r--r-- | libdrm/intel/mm.h | 96 | ||||
| -rw-r--r-- | libdrm/libdrm_lists.h | 87 | ||||
| -rw-r--r-- | libdrm/xf86drm.c | 715 | ||||
| -rw-r--r-- | libdrm/xf86drm.h | 3 | ||||
| -rw-r--r-- | libdrm/xf86mm.h | 12 | 
13 files changed, 3273 insertions, 642 deletions
| diff --git a/libdrm/Makefile.am b/libdrm/Makefile.am index 24c32038..c6d66732 100644 --- a/libdrm/Makefile.am +++ b/libdrm/Makefile.am @@ -18,14 +18,18 @@  #  IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN  #  CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +SUBDIRS = intel +  libdrm_la_LTLIBRARIES = libdrm.la  libdrm_ladir = $(libdir)  libdrm_la_LDFLAGS = -version-number 2:3:0 -no-undefined  AM_CFLAGS = -I$(top_srcdir)/shared-core -libdrm_la_SOURCES = xf86drm.c xf86drmHash.c xf86drmRandom.c xf86drmSL.c xf86drmMode.c +libdrm_la_SOURCES = xf86drm.c xf86drmHash.c xf86drmRandom.c xf86drmSL.c \ +	xf86drmMode.c dri_bufmgr.c libdrm_lists.h +libdrm_la_LIBADD = intel/libdrm_intel.la  libdrmincludedir = ${includedir} -libdrminclude_HEADERS = xf86drm.h xf86mm.h xf86drmMode.h +libdrminclude_HEADERS = xf86drm.h xf86drmMode.h dri_bufmgr.h  EXTRA_DIST = ChangeLog TODO diff --git a/libdrm/dri_bufmgr.c b/libdrm/dri_bufmgr.c new file mode 100644 index 00000000..a6eda3bd --- /dev/null +++ b/libdrm/dri_bufmgr.c @@ -0,0 +1,141 @@ +/* + * Copyright © 2007 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + *    Eric Anholt <eric@anholt.net> + * + */ + +#include <string.h> +#include <stdlib.h> +#include <assert.h> +#include "dri_bufmgr.h" + +/** @file dri_bufmgr.c + * + * Convenience functions for buffer management methods. + */ + +dri_bo * +dri_bo_alloc(dri_bufmgr *bufmgr, const char *name, unsigned long size, +	     unsigned int alignment) +{ +   return bufmgr->bo_alloc(bufmgr, name, size, alignment); +} + +void +dri_bo_reference(dri_bo *bo) +{ +   bo->bufmgr->bo_reference(bo); +} + +void +dri_bo_unreference(dri_bo *bo) +{ +   if (bo == NULL) +      return; + +   bo->bufmgr->bo_unreference(bo); +} + +int +dri_bo_map(dri_bo *buf, int write_enable) +{ +   return buf->bufmgr->bo_map(buf, write_enable); +} + +int +dri_bo_unmap(dri_bo *buf) +{ +   return buf->bufmgr->bo_unmap(buf); +} + +int +dri_bo_subdata(dri_bo *bo, unsigned long offset, +	       unsigned long size, const void *data) +{ +   int ret; +   if (bo->bufmgr->bo_subdata) +      return bo->bufmgr->bo_subdata(bo, offset, size, data); +   if (size == 0 || data == NULL) +      return 0; + +   ret = dri_bo_map(bo, 1); +   if (ret) +       return ret; +   memcpy((unsigned char *)bo->virtual + offset, data, size); +   dri_bo_unmap(bo); +   return 0; +} + +int +dri_bo_get_subdata(dri_bo *bo, unsigned long offset, +		   unsigned long size, void *data) +{ +   int ret; +   if (bo->bufmgr->bo_subdata) +      return bo->bufmgr->bo_get_subdata(bo, offset, size, data); + +   if (size == 0 || data == NULL) +      return 0; + +   ret = dri_bo_map(bo, 0); +   if (ret) +       return ret; +   memcpy(data, (unsigned char *)bo->virtual + offset, size); +   dri_bo_unmap(bo); +   return 0; +} + +void +dri_bo_wait_rendering(dri_bo *bo) +{ +   bo->bufmgr->bo_wait_rendering(bo); +} + +void +dri_bufmgr_destroy(dri_bufmgr *bufmgr) +{ +   bufmgr->destroy(bufmgr); +} + +void *dri_process_relocs(dri_bo *batch_buf) +{ +   return batch_buf->bufmgr->process_relocs(batch_buf); +} + +void dri_post_submit(dri_bo *batch_buf) +{ +   batch_buf->bufmgr->post_submit(batch_buf); +} + +void +dri_bufmgr_set_debug(dri_bufmgr *bufmgr, int enable_debug) +{ +   bufmgr->debug = enable_debug; +} + +int +dri_bufmgr_check_aperture_space(dri_bo **bo_array, int count) +{ +	return bo_array[0]->bufmgr->check_aperture_space(bo_array, count); +} diff --git a/libdrm/dri_bufmgr.h b/libdrm/dri_bufmgr.h new file mode 100644 index 00000000..2005bdc6 --- /dev/null +++ b/libdrm/dri_bufmgr.h @@ -0,0 +1,178 @@ +/************************************************************************** + *  + * Copyright © 2007 Intel Corporation + * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA + * All Rights Reserved. + *  + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + *  + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR  + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE  + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + *  + *  + **************************************************************************/ +/* + * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com> + *          Keith Whitwell <keithw-at-tungstengraphics-dot-com> + *	    Eric Anholt <eric@anholt.net> + */ + +#ifndef _DRI_BUFMGR_H_ +#define _DRI_BUFMGR_H_ +#include <xf86drm.h> + +typedef struct _dri_bufmgr dri_bufmgr; +typedef struct _dri_bo dri_bo; + +struct _dri_bo { +   /** +    * Size in bytes of the buffer object. +    * +    * The size may be larger than the size originally requested for the +    * allocation, such as being aligned to page size. +    */ +   unsigned long size; +   /** +    * Card virtual address (offset from the beginning of the aperture) for the +    * object.  Only valid while validated. +    */ +   unsigned long offset; +   /** +    * Virtual address for accessing the buffer data.  Only valid while mapped. +    */ +   void *virtual; +   /** Buffer manager context associated with this buffer object */ +   dri_bufmgr *bufmgr; +   /** +    * MM-specific handle for accessing object +    */ +   int handle; +}; + +/** + * Context for a buffer manager instance. + * + * Contains public methods followed by private storage for the buffer manager. + */ +struct _dri_bufmgr { +   /** +    * Allocate a buffer object. +    * +    * Buffer objects are not necessarily initially mapped into CPU virtual +    * address space or graphics device aperture.  They must be mapped using +    * bo_map() to be used by the CPU, and validated for use using bo_validate() +    * to be used from the graphics device. +    */ +   dri_bo *(*bo_alloc)(dri_bufmgr *bufmgr_ctx, const char *name, +		       unsigned long size, unsigned int alignment); + +   /** Takes a reference on a buffer object */ +   void (*bo_reference)(dri_bo *bo); + +   /** +    * Releases a reference on a buffer object, freeing the data if +    * rerefences remain. +    */ +   void (*bo_unreference)(dri_bo *bo); + +   /** +    * Maps the buffer into userspace. +    * +    * This function will block waiting for any existing execution on the +    * buffer to complete, first.  The resulting mapping is available at +    * buf->virtual. +    */ +   int (*bo_map)(dri_bo *buf, int write_enable); + +   /** Reduces the refcount on the userspace mapping of the buffer object. */ +   int (*bo_unmap)(dri_bo *buf); + +   /** +    * Write data into an object. +    * +    * This is an optional function, if missing, +    * dri_bo will map/memcpy/unmap. +    */ +   int (*bo_subdata) (dri_bo *buf, unsigned long offset, +		      unsigned long size, const void *data); + +   /** +    * Read data from an object +    * +    * This is an optional function, if missing, +    * dri_bo will map/memcpy/unmap. +    */ +   int (*bo_get_subdata) (dri_bo *bo, unsigned long offset, +			  unsigned long size, void *data); + +   /** +    * Waits for rendering to an object by the GPU to have completed. +    * +    * This is not required for any access to the BO by bo_map, bo_subdata, etc. +    * It is merely a way for the driver to implement glFinish. +    */ +   void (*bo_wait_rendering) (dri_bo *bo); + +   /** +    * Tears down the buffer manager instance. +    */ +   void (*destroy)(dri_bufmgr *bufmgr); + +   /** +    * Processes the relocations, either in userland or by converting the list +    * for use in batchbuffer submission. +    * +    * Kernel-based implementations will return a pointer to the arguments +    * to be handed with batchbuffer submission to the kernel.  The userland +    * implementation performs the buffer validation and emits relocations +    * into them the appopriate order. +    * +    * \param batch_buf buffer at the root of the tree of relocations +    * \return argument to be completed and passed to the execbuffers ioctl +    *   (if any). +    */ +   void *(*process_relocs)(dri_bo *batch_buf); + +   void (*post_submit)(dri_bo *batch_buf); + +   int (*check_aperture_space)(dri_bo **bo_array, int count); +   int debug; /**< Enables verbose debugging printouts */ +}; + +dri_bo *dri_bo_alloc(dri_bufmgr *bufmgr, const char *name, unsigned long size, +		     unsigned int alignment); +void dri_bo_reference(dri_bo *bo); +void dri_bo_unreference(dri_bo *bo); +int dri_bo_map(dri_bo *buf, int write_enable); +int dri_bo_unmap(dri_bo *buf); + +int dri_bo_subdata(dri_bo *bo, unsigned long offset, +		   unsigned long size, const void *data); +int dri_bo_get_subdata(dri_bo *bo, unsigned long offset, +		       unsigned long size, void *data); +void dri_bo_wait_rendering(dri_bo *bo); + +void dri_bufmgr_set_debug(dri_bufmgr *bufmgr, int enable_debug); +void dri_bufmgr_destroy(dri_bufmgr *bufmgr); + +void *dri_process_relocs(dri_bo *batch_buf); +void dri_post_process_relocs(dri_bo *batch_buf); +void dri_post_submit(dri_bo *batch_buf); +int dri_bufmgr_check_aperture_space(dri_bo **bo_array, int count); + +#endif diff --git a/libdrm/intel/Makefile.am b/libdrm/intel/Makefile.am new file mode 100644 index 00000000..31a8512a --- /dev/null +++ b/libdrm/intel/Makefile.am @@ -0,0 +1,39 @@ +# Copyright © 2008 Intel Corporation +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice (including the next +# paragraph) shall be included in all copies or substantial portions of the +# Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +# Authors: +#    Eric Anholt <eric@anholt.net> + +AM_CFLAGS = \ +	$(WARN_CFLAGS) \ +	-I$(top_srcdir)/libdrm \ +	-I$(top_srcdir)/shared-core + +noinst_LTLIBRARIES = libdrm_intel.la + +libdrm_intel_la_SOURCES = \ +	intel_bufmgr_fake.c \ +	intel_bufmgr_gem.c \ +	mm.c \ +	mm.h + +libdrm_intelincludedir = ${includedir} +libdrm_intelinclude_HEADERS = intel_bufmgr.h diff --git a/libdrm/intel/intel_bufmgr.h b/libdrm/intel/intel_bufmgr.h new file mode 100644 index 00000000..4d335210 --- /dev/null +++ b/libdrm/intel/intel_bufmgr.h @@ -0,0 +1,130 @@ +/* + * Copyright © 2008 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + *    Eric Anholt <eric@anholt.net> + * + */ + +/** + * @file intel_bufmgr.h + * + * Public definitions of Intel-specific bufmgr functions. + */ + +#ifndef INTEL_BUFMGR_GEM_H +#define INTEL_BUFMGR_GEM_H + +#include "dri_bufmgr.h" + +/** + * Intel-specific bufmgr bits that follow immediately after the + * generic bufmgr structure. + */ +struct intel_bufmgr { +    /** +     * Add relocation entry in reloc_buf, which will be updated with the +     * target buffer's real offset on on command submission. +     * +     * Relocations remain in place for the lifetime of the buffer object. +     * +     * \param reloc_buf Buffer to write the relocation into. +     * \param read_domains GEM read domains which the buffer will be read into +     *	      by the command that this relocation is part of. +     * \param write_domains GEM read domains which the buffer will be dirtied +     *	      in by the command that this relocation is part of. +     * \param delta Constant value to be added to the relocation target's +     *	       offset. +     * \param offset Byte offset within batch_buf of the relocated pointer. +     * \param target Buffer whose offset should be written into the relocation +     *	     entry. +     */ +    int (*emit_reloc)(dri_bo *reloc_buf, +		      uint32_t read_domains, uint32_t write_domain, +		      uint32_t delta, uint32_t offset, dri_bo *target); +    /** +     * Pin a buffer to the aperture and fix the offset until unpinned +     * +     * \param buf Buffer to pin +     * \param alignment Required alignment for aperture, in bytes +     */ +    int (*pin) (dri_bo *buf, uint32_t alignment); +    /** +     * Unpin a buffer from the aperture, allowing it to be removed +     * +     * \param buf Buffer to unpin +     */ +    int (*unpin) (dri_bo *buf); +    /** +     * Ask that the buffer be placed in tiling mode +     * +     * \param buf Buffer to set tiling mode for +     * \param tiling_mode desired, and returned tiling mode +     */ +    int (*set_tiling) (dri_bo *bo, uint32_t *tiling_mode); +    /** +     * Create a visible name for a buffer which can be used by other apps +     * +     * \param buf Buffer to create a name for +     * \param name Returned name +     */ +    int (*flink) (dri_bo *buf, uint32_t *name); +}; + +/* intel_bufmgr_gem.c */ +dri_bufmgr *intel_bufmgr_gem_init(int fd, int batch_size); +dri_bo *intel_bo_gem_create_from_name(dri_bufmgr *bufmgr, const char *name, +				      unsigned int handle); +void intel_bufmgr_gem_enable_reuse(dri_bufmgr *bufmgr); + +/* intel_bufmgr_fake.c */ +dri_bufmgr *intel_bufmgr_fake_init(unsigned long low_offset, void *low_virtual, +				   unsigned long size, +				   unsigned int (*fence_emit)(void *private), +				   int (*fence_wait)(void *private, +						     unsigned int cookie), +				   void *driver_priv); +dri_bo *intel_bo_fake_alloc_static(dri_bufmgr *bufmgr, const char *name, +				   unsigned long offset, unsigned long size, +				   void *virtual); + +void intel_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr); +void intel_bo_fake_disable_backing_store(dri_bo *bo, +					 void (*invalidate_cb)(dri_bo *bo, +							       void *ptr), +					 void *ptr); +void intel_bufmgr_fake_evict_all(dri_bufmgr *bufmgr); + +int intel_bo_emit_reloc(dri_bo *reloc_buf, +			uint32_t read_domains, uint32_t write_domain, +			uint32_t delta, uint32_t offset, dri_bo *target_buf); + +int intel_bo_pin(dri_bo *buf, uint32_t alignment); + +int intel_bo_unpin(dri_bo *buf); + +int intel_bo_set_tiling(dri_bo *buf, uint32_t *tiling_mode); + +int intel_bo_flink(dri_bo *buf, uint32_t *name); + +#endif /* INTEL_BUFMGR_GEM_H */ + diff --git a/libdrm/intel/intel_bufmgr_fake.c b/libdrm/intel/intel_bufmgr_fake.c new file mode 100644 index 00000000..e2dd9dc7 --- /dev/null +++ b/libdrm/intel/intel_bufmgr_fake.c @@ -0,0 +1,1232 @@ +/************************************************************************** + *  + * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas. + * All Rights Reserved. + *  + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + *  + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + *  + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + *  + **************************************************************************/ + +/* Originally a fake version of the buffer manager so that we can + * prototype the changes in a driver fairly quickly, has been fleshed + * out to a fully functional interim solution. + * + * Basically wraps the old style memory management in the new + * programming interface, but is more expressive and avoids many of + * the bugs in the old texture manager. + */ + +#include <stdlib.h> +#include <string.h> +#include <assert.h> +#include "dri_bufmgr.h" +#include "intel_bufmgr.h" +#include "drm.h" +#include "i915_drm.h" +#include "mm.h" +#include "libdrm_lists.h" + +#define ALIGN(value, alignment)  ((value + alignment - 1) & ~(alignment - 1)) + +#define DBG(...) do {					\ +   if (bufmgr_fake->bufmgr.debug)			\ +      drmMsg(__VA_ARGS__);				\ +} while (0) + +/* Internal flags: + */ +#define BM_NO_BACKING_STORE			0x00000001 +#define BM_NO_FENCE_SUBDATA			0x00000002 +#define BM_PINNED				0x00000004 + +/* Wrapper around mm.c's mem_block, which understands that you must + * wait for fences to expire before memory can be freed.  This is + * specific to our use of memcpy for uploads - an upload that was + * processed through the command queue wouldn't need to care about + * fences. + */ +#define MAX_RELOCS 4096 + +struct fake_buffer_reloc +{ +   /** Buffer object that the relocation points at. */ +   dri_bo *target_buf; +   /** Offset of the relocation entry within reloc_buf. */ +   uint32_t offset; +   /** Cached value of the offset when we last performed this relocation. */ +   uint32_t last_target_offset; +   /** Value added to target_buf's offset to get the relocation entry. */ +   uint32_t delta; +   /** Cache domains the target buffer is read into. */ +   uint32_t read_domains; +   /** Cache domain the target buffer will have dirty cachelines in. */ +   uint32_t write_domain; +}; + +struct block { +   struct block *next, *prev; +   struct mem_block *mem;	/* BM_MEM_AGP */ + +   /** +    * Marks that the block is currently in the aperture and has yet to be +    * fenced. +    */ +   unsigned on_hardware:1; +   /** +    * Marks that the block is currently fenced (being used by rendering) and +    * can't be freed until @fence is passed. +    */ +   unsigned fenced:1; + +   /** Fence cookie for the block. */ +   unsigned fence; /* Split to read_fence, write_fence */ + +   dri_bo *bo; +   void *virtual; +}; + +typedef struct _bufmgr_fake { +   dri_bufmgr bufmgr; +   struct intel_bufmgr intel_bufmgr; + +   unsigned long low_offset; +   unsigned long size; +   void *virtual; + +   struct mem_block *heap; + +   unsigned buf_nr;		/* for generating ids */ + +   /** +    * List of blocks which are currently in the GART but haven't been +    * fenced yet. +    */ +   struct block on_hardware; +   /** +    * List of blocks which are in the GART and have an active fence on them. +    */ +   struct block fenced; +   /** +    * List of blocks which have an expired fence and are ready to be evicted. +    */ +   struct block lru; + +   unsigned int last_fence; + +   unsigned fail:1; +   unsigned need_fence:1; +   int thrashing; + +   /** +    * Driver callback to emit a fence, returning the cookie. +    * +    * Currently, this also requires that a write flush be emitted before +    * emitting the fence, but this should change. +    */ +   unsigned int (*fence_emit)(void *private); +   /** Driver callback to wait for a fence cookie to have passed. */ +   int (*fence_wait)(void *private, unsigned int fence_cookie); +   /** Driver-supplied argument to driver callbacks */ +   void *driver_priv; + +   int debug; + +   int performed_rendering; +} dri_bufmgr_fake; + +typedef struct _dri_bo_fake { +   dri_bo bo; + +   unsigned id;			/* debug only */ +   const char *name; + +   unsigned dirty:1; +   /** has the card written to this buffer - we make need to copy it back */ +   unsigned card_dirty:1; +   unsigned int refcount; +   /* Flags may consist of any of the DRM_BO flags, plus +    * DRM_BO_NO_BACKING_STORE and BM_NO_FENCE_SUBDATA, which are the first two +    * driver private flags. +    */ +   uint64_t flags; +   /** Cache domains the target buffer is read into. */ +   uint32_t read_domains; +   /** Cache domain the target buffer will have dirty cachelines in. */ +   uint32_t write_domain; + +   unsigned int alignment; +   int is_static, validated; +   unsigned int map_count; + +   /** relocation list */ +   struct fake_buffer_reloc *relocs; +   int nr_relocs; +   /** +    * Total size of the target_bos of this buffer. +    * +    * Used for estimation in check_aperture. +    */ +   unsigned int child_size; + +   struct block *block; +   void *backing_store; +   void (*invalidate_cb)(dri_bo *bo, void *ptr); +   void *invalidate_ptr; +} dri_bo_fake; + +static int clear_fenced(dri_bufmgr_fake *bufmgr_fake, +			unsigned int fence_cookie); + +#define MAXFENCE 0x7fffffff + +static int FENCE_LTE( unsigned a, unsigned b ) +{ +   if (a == b) +      return 1; + +   if (a < b && b - a < (1<<24)) +      return 1; + +   if (a > b && MAXFENCE - a + b < (1<<24)) +      return 1; + +   return 0; +} + +static unsigned int +_fence_emit_internal(dri_bufmgr_fake *bufmgr_fake) +{ +   bufmgr_fake->last_fence = bufmgr_fake->fence_emit(bufmgr_fake->driver_priv); +   return bufmgr_fake->last_fence; +} + +static void +_fence_wait_internal(dri_bufmgr_fake *bufmgr_fake, unsigned int cookie) +{ +   int ret; + +   ret = bufmgr_fake->fence_wait(bufmgr_fake->driver_priv, cookie); +   if (ret != 0) { +      drmMsg("%s:%d: Error %d waiting for fence.\n", __FILE__, __LINE__); +      abort(); +   } +   clear_fenced(bufmgr_fake, cookie); +} + +static int +_fence_test(dri_bufmgr_fake *bufmgr_fake, unsigned fence) +{ +   /* Slight problem with wrap-around: +    */ +   return fence == 0 || FENCE_LTE(fence, bufmgr_fake->last_fence); +} + +/** + * Allocate a memory manager block for the buffer. + */ +static int +alloc_block(dri_bo *bo) +{ +   dri_bo_fake *bo_fake = (dri_bo_fake *)bo; +   dri_bufmgr_fake *bufmgr_fake= (dri_bufmgr_fake *)bo->bufmgr; +   struct block *block = (struct block *)calloc(sizeof *block, 1); +   unsigned int align_log2 = ffs(bo_fake->alignment) - 1; +   unsigned int sz; + +   if (!block) +      return 1; + +   sz = (bo->size + bo_fake->alignment - 1) & ~(bo_fake->alignment - 1); + +   block->mem = mmAllocMem(bufmgr_fake->heap, sz, align_log2, 0); +   if (!block->mem) { +      free(block); +      return 0; +   } + +   DRMINITLISTHEAD(block); + +   /* Insert at head or at tail???    +    */ +   DRMLISTADDTAIL(block, &bufmgr_fake->lru); + +   block->virtual = (uint8_t *)bufmgr_fake->virtual + +      block->mem->ofs - bufmgr_fake->low_offset; +   block->bo = bo; + +   bo_fake->block = block; + +   return 1; +} + +/* Release the card storage associated with buf: + */ +static void free_block(dri_bufmgr_fake *bufmgr_fake, struct block *block) +{ +   dri_bo_fake *bo_fake; +   DBG("free block %p %08x %d %d\n", block, block->mem->ofs, block->on_hardware, block->fenced); + +   if (!block) +      return; + +   bo_fake = (dri_bo_fake *)block->bo; +   if (!(bo_fake->flags & BM_NO_BACKING_STORE) && (bo_fake->card_dirty == 1)) { +     memcpy(bo_fake->backing_store, block->virtual, block->bo->size); +     bo_fake->card_dirty = 1; +     bo_fake->dirty = 1; +   } + +   if (block->on_hardware) { +      block->bo = NULL; +   } +   else if (block->fenced) { +      block->bo = NULL; +   } +   else { +      DBG("    - free immediately\n"); +      DRMLISTDEL(block); + +      mmFreeMem(block->mem); +      free(block); +   } +} + +static void +alloc_backing_store(dri_bo *bo) +{ +   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; +   dri_bo_fake *bo_fake = (dri_bo_fake *)bo; +   assert(!bo_fake->backing_store); +   assert(!(bo_fake->flags & (BM_PINNED|BM_NO_BACKING_STORE))); + +   bo_fake->backing_store = malloc(bo->size); + +   DBG("alloc_backing - buf %d %p %d\n", bo_fake->id, bo_fake->backing_store, bo->size); +   assert(bo_fake->backing_store); +} + +static void +free_backing_store(dri_bo *bo) +{ +   dri_bo_fake *bo_fake = (dri_bo_fake *)bo; + +   if (bo_fake->backing_store) { +      assert(!(bo_fake->flags & (BM_PINNED|BM_NO_BACKING_STORE))); +      free(bo_fake->backing_store); +      bo_fake->backing_store = NULL; +   } +} + +static void +set_dirty(dri_bo *bo) +{ +   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; +   dri_bo_fake *bo_fake = (dri_bo_fake *)bo; + +   if (bo_fake->flags & BM_NO_BACKING_STORE && bo_fake->invalidate_cb != NULL) +      bo_fake->invalidate_cb(bo, bo_fake->invalidate_ptr); + +   assert(!(bo_fake->flags & BM_PINNED)); + +   DBG("set_dirty - buf %d\n", bo_fake->id); +   bo_fake->dirty = 1; +} + +static int +evict_lru(dri_bufmgr_fake *bufmgr_fake, unsigned int max_fence) +{ +   struct block *block, *tmp; + +   DBG("%s\n", __FUNCTION__); + +   DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) { +      dri_bo_fake *bo_fake = (dri_bo_fake *)block->bo; + +      if (bo_fake != NULL && (bo_fake->flags & BM_NO_FENCE_SUBDATA)) +	 continue; + +      if (block->fence && max_fence && !FENCE_LTE(block->fence, max_fence)) +	 return 0; + +      set_dirty(&bo_fake->bo); +      bo_fake->block = NULL; + +      free_block(bufmgr_fake, block); +      return 1; +   } + +   return 0; +} + +static int +evict_mru(dri_bufmgr_fake *bufmgr_fake) +{ +   struct block *block, *tmp; + +   DBG("%s\n", __FUNCTION__); + +   DRMLISTFOREACHSAFEREVERSE(block, tmp, &bufmgr_fake->lru) { +      dri_bo_fake *bo_fake = (dri_bo_fake *)block->bo; + +      if (bo_fake && (bo_fake->flags & BM_NO_FENCE_SUBDATA)) +	 continue; + +      set_dirty(&bo_fake->bo); +      bo_fake->block = NULL; + +      free_block(bufmgr_fake, block); +      return 1; +   } + +   return 0; +} + +/** + * Removes all objects from the fenced list older than the given fence. + */ +static int clear_fenced(dri_bufmgr_fake *bufmgr_fake, +			unsigned int fence_cookie) +{ +   struct block *block, *tmp; +   int ret = 0; + +   DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->fenced) { +      assert(block->fenced); + +      if (_fence_test(bufmgr_fake, block->fence)) { + +	 block->fenced = 0; + +	 if (!block->bo) { +	    DBG("delayed free: offset %x sz %x\n", +		block->mem->ofs, block->mem->size); +	    DRMLISTDEL(block); +	    mmFreeMem(block->mem); +	    free(block); +	 } +	 else { +	    DBG("return to lru: offset %x sz %x\n", +		block->mem->ofs, block->mem->size); +	    DRMLISTDEL(block); +	    DRMLISTADDTAIL(block, &bufmgr_fake->lru); +	 } + +	 ret = 1; +      } +      else { +	 /* Blocks are ordered by fence, so if one fails, all from +	  * here will fail also: +	  */ +	DBG("fence not passed: offset %x sz %x %d %d \n", +	    block->mem->ofs, block->mem->size, block->fence, bufmgr_fake->last_fence); +	 break; +      } +   } + +   DBG("%s: %d\n", __FUNCTION__, ret); +   return ret; +} + +static void fence_blocks(dri_bufmgr_fake *bufmgr_fake, unsigned fence) +{ +   struct block *block, *tmp; + +   DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->on_hardware) { +      DBG("Fence block %p (sz 0x%x ofs %x buf %p) with fence %d\n", block, +	  block->mem->size, block->mem->ofs, block->bo, fence); +      block->fence = fence; + +      block->on_hardware = 0; +      block->fenced = 1; + +      /* Move to tail of pending list here +       */ +      DRMLISTDEL(block); +      DRMLISTADDTAIL(block, &bufmgr_fake->fenced); +   } + +   assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware)); +} + +static int evict_and_alloc_block(dri_bo *bo) +{ +   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; +   dri_bo_fake *bo_fake = (dri_bo_fake *)bo; + +   assert(bo_fake->block == NULL); + +   /* Search for already free memory: +    */ +   if (alloc_block(bo)) +      return 1; + +   /* If we're not thrashing, allow lru eviction to dig deeper into +    * recently used textures.  We'll probably be thrashing soon: +    */ +   if (!bufmgr_fake->thrashing) { +      while (evict_lru(bufmgr_fake, 0)) +	 if (alloc_block(bo)) +	    return 1; +   } + +   /* Keep thrashing counter alive? +    */ +   if (bufmgr_fake->thrashing) +      bufmgr_fake->thrashing = 20; + +   /* Wait on any already pending fences - here we are waiting for any +    * freed memory that has been submitted to hardware and fenced to +    * become available: +    */ +   while (!DRMLISTEMPTY(&bufmgr_fake->fenced)) { +      uint32_t fence = bufmgr_fake->fenced.next->fence; +      _fence_wait_internal(bufmgr_fake, fence); + +      if (alloc_block(bo)) +	 return 1; +   } + +   if (!DRMLISTEMPTY(&bufmgr_fake->on_hardware)) { +      while (!DRMLISTEMPTY(&bufmgr_fake->fenced)) { +	 uint32_t fence = bufmgr_fake->fenced.next->fence; +	 _fence_wait_internal(bufmgr_fake, fence); +      } + +      if (!bufmgr_fake->thrashing) { +	 DBG("thrashing\n"); +      } +      bufmgr_fake->thrashing = 20; + +      if (alloc_block(bo)) +	 return 1; +   } + +   while (evict_mru(bufmgr_fake)) +      if (alloc_block(bo)) +	 return 1; + +   DBG("%s 0x%x bytes failed\n", __FUNCTION__, bo->size); + +   return 0; +} + +/*********************************************************************** + * Public functions + */ + +/** + * Wait for hardware idle by emitting a fence and waiting for it. + */ +static void +dri_bufmgr_fake_wait_idle(dri_bufmgr_fake *bufmgr_fake) +{ +   unsigned int cookie; + +   cookie = bufmgr_fake->fence_emit(bufmgr_fake->driver_priv); +   _fence_wait_internal(bufmgr_fake, cookie); +} + +/** + * Wait for rendering to a buffer to complete. + * + * It is assumed that the bathcbuffer which performed the rendering included + * the necessary flushing. + */ +static void +dri_fake_bo_wait_rendering(dri_bo *bo) +{ +   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; +   dri_bo_fake *bo_fake = (dri_bo_fake *)bo; + +   if (bo_fake->block == NULL || !bo_fake->block->fenced) +      return; + +   _fence_wait_internal(bufmgr_fake, bo_fake->block->fence); +} + +/* Specifically ignore texture memory sharing. + *  -- just evict everything + *  -- and wait for idle + */ +void +intel_bufmgr_fake_contended_lock_take(dri_bufmgr *bufmgr) +{ +   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr; +   struct block *block, *tmp; + +   bufmgr_fake->need_fence = 1; +   bufmgr_fake->fail = 0; + +   /* Wait for hardware idle.  We don't know where acceleration has been +    * happening, so we'll need to wait anyway before letting anything get +    * put on the card again. +    */ +   dri_bufmgr_fake_wait_idle(bufmgr_fake); + +   /* Check that we hadn't released the lock without having fenced the last +    * set of buffers. +    */ +   assert(DRMLISTEMPTY(&bufmgr_fake->fenced)); +   assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware)); + +   DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) { +      assert(_fence_test(bufmgr_fake, block->fence)); +      set_dirty(block->bo); +   } +} + +static dri_bo * +dri_fake_bo_alloc(dri_bufmgr *bufmgr, const char *name, +		  unsigned long size, unsigned int alignment) +{ +   dri_bufmgr_fake *bufmgr_fake; +   dri_bo_fake *bo_fake; + +   bufmgr_fake = (dri_bufmgr_fake *)bufmgr; + +   assert(size != 0); + +   bo_fake = calloc(1, sizeof(*bo_fake)); +   if (!bo_fake) +      return NULL; + +   bo_fake->bo.size = size; +   bo_fake->bo.offset = -1; +   bo_fake->bo.virtual = NULL; +   bo_fake->bo.bufmgr = bufmgr; +   bo_fake->refcount = 1; + +   /* Alignment must be a power of two */ +   assert((alignment & (alignment - 1)) == 0); +   if (alignment == 0) +      alignment = 1; +   bo_fake->alignment = alignment; +   bo_fake->id = ++bufmgr_fake->buf_nr; +   bo_fake->name = name; +   bo_fake->flags = 0; +   bo_fake->is_static = 0; + +   DBG("drm_bo_alloc: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name, +       bo_fake->bo.size / 1024); + +   return &bo_fake->bo; +} + +dri_bo * +intel_bo_fake_alloc_static(dri_bufmgr *bufmgr, const char *name, +			   unsigned long offset, unsigned long size, +			   void *virtual) +{ +   dri_bufmgr_fake *bufmgr_fake; +   dri_bo_fake *bo_fake; + +   bufmgr_fake = (dri_bufmgr_fake *)bufmgr; + +   assert(size != 0); + +   bo_fake = calloc(1, sizeof(*bo_fake)); +   if (!bo_fake) +      return NULL; + +   bo_fake->bo.size = size; +   bo_fake->bo.offset = offset; +   bo_fake->bo.virtual = virtual; +   bo_fake->bo.bufmgr = bufmgr; +   bo_fake->refcount = 1; +   bo_fake->id = ++bufmgr_fake->buf_nr; +   bo_fake->name = name; +   bo_fake->flags = BM_PINNED | DRM_BO_FLAG_NO_MOVE; +   bo_fake->is_static = 1; + +   DBG("drm_bo_alloc_static: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name, +       bo_fake->bo.size / 1024); + +   return &bo_fake->bo; +} + +static void +dri_fake_bo_reference(dri_bo *bo) +{ +   dri_bo_fake *bo_fake = (dri_bo_fake *)bo; + +   bo_fake->refcount++; +} + +static void +dri_fake_bo_unreference(dri_bo *bo) +{ +   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; +   dri_bo_fake *bo_fake = (dri_bo_fake *)bo; +   int i; + +   if (!bo) +      return; + +   if (--bo_fake->refcount == 0) { +      assert(bo_fake->map_count == 0); +      /* No remaining references, so free it */ +      if (bo_fake->block) +	 free_block(bufmgr_fake, bo_fake->block); +      free_backing_store(bo); + +      for (i = 0; i < bo_fake->nr_relocs; i++) +	 dri_bo_unreference(bo_fake->relocs[i].target_buf); + +      DBG("drm_bo_unreference: free buf %d %s\n", bo_fake->id, bo_fake->name); + +      free(bo_fake->relocs); +      free(bo); + +      return; +   } +} + +/** + * Set the buffer as not requiring backing store, and instead get the callback + * invoked whenever it would be set dirty. + */ +void intel_bo_fake_disable_backing_store(dri_bo *bo, +					 void (*invalidate_cb)(dri_bo *bo, +							       void *ptr), +					 void *ptr) +{ +   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; +   dri_bo_fake *bo_fake = (dri_bo_fake *)bo; + +   if (bo_fake->backing_store) +      free_backing_store(bo); + +   bo_fake->flags |= BM_NO_BACKING_STORE; + +   DBG("disable_backing_store set buf %d dirty\n", bo_fake->id); +   bo_fake->dirty = 1; +   bo_fake->invalidate_cb = invalidate_cb; +   bo_fake->invalidate_ptr = ptr; + +   /* Note that it is invalid right from the start.  Also note +    * invalidate_cb is called with the bufmgr locked, so cannot +    * itself make bufmgr calls. +    */ +   if (invalidate_cb != NULL) +      invalidate_cb(bo, ptr); +} + +/** + * Map a buffer into bo->virtual, allocating either card memory space (If + * BM_NO_BACKING_STORE or BM_PINNED) or backing store, as necessary. + */ +static int +dri_fake_bo_map(dri_bo *bo, int write_enable) +{ +   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; +   dri_bo_fake *bo_fake = (dri_bo_fake *)bo; + +   /* Static buffers are always mapped. */ +   if (bo_fake->is_static) +      return 0; + +   /* Allow recursive mapping.  Mesa may recursively map buffers with +    * nested display loops, and it is used internally in bufmgr_fake +    * for relocation. +    */ +   if (bo_fake->map_count++ != 0) +      return 0; + +   { +      DBG("drm_bo_map: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name, +	  bo_fake->bo.size / 1024); + +      if (bo->virtual != NULL) { +	 drmMsg("%s: already mapped\n", __FUNCTION__); +	 abort(); +      } +      else if (bo_fake->flags & (BM_NO_BACKING_STORE|BM_PINNED)) { + +	 if (!bo_fake->block && !evict_and_alloc_block(bo)) { +	    DBG("%s: alloc failed\n", __FUNCTION__); +	    bufmgr_fake->fail = 1; +	    return 1; +	 } +	 else { +	    assert(bo_fake->block); +	    bo_fake->dirty = 0; + +	    if (!(bo_fake->flags & BM_NO_FENCE_SUBDATA) && +		bo_fake->block->fenced) { +	       dri_fake_bo_wait_rendering(bo); +	    } + +	    bo->virtual = bo_fake->block->virtual; +	 } +      } +      else { +	 if (write_enable) +	    set_dirty(bo); + +	 if (bo_fake->backing_store == 0) +	    alloc_backing_store(bo); + +	 bo->virtual = bo_fake->backing_store; +      } +   } + +   return 0; +} + +static int +dri_fake_bo_unmap(dri_bo *bo) +{ +   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; +   dri_bo_fake *bo_fake = (dri_bo_fake *)bo; + +   /* Static buffers are always mapped. */ +   if (bo_fake->is_static) +      return 0; + +   assert(bo_fake->map_count != 0); +   if (--bo_fake->map_count != 0) +      return 0; + +   DBG("drm_bo_unmap: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name, +       bo_fake->bo.size / 1024); + +   bo->virtual = NULL; + +   return 0; +} + +static void +dri_fake_kick_all(dri_bufmgr_fake *bufmgr_fake) +{ +   struct block *block, *tmp; + +   bufmgr_fake->performed_rendering = 0; +   /* okay for ever BO that is on the HW kick it off. +      seriously not afraid of the POLICE right now */ +   DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->on_hardware) { +      dri_bo_fake *bo_fake = (dri_bo_fake *)block->bo; + +      block->on_hardware = 0; +      free_block(bufmgr_fake, block); +      bo_fake->block = NULL; +      bo_fake->validated = 0; +      if (!(bo_fake->flags & BM_NO_BACKING_STORE)) +         bo_fake->dirty = 1; +   } +} + +static int +dri_fake_bo_validate(dri_bo *bo) +{ +   dri_bufmgr_fake *bufmgr_fake; +   dri_bo_fake *bo_fake = (dri_bo_fake *)bo; + +   /* XXX: Sanity-check whether we've already validated this one under +    * different flags.  See drmAddValidateItem(). +    */ +   bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; + +   DBG("drm_bo_validate: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name, +       bo_fake->bo.size / 1024); + +   /* Sanity check: Buffers should be unmapped before being validated. +    * This is not so much of a problem for bufmgr_fake, but TTM refuses, +    * and the problem is harder to debug there. +    */ +   assert(bo_fake->map_count == 0); + +   if (bo_fake->is_static) { +      /* Add it to the needs-fence list */ +      bufmgr_fake->need_fence = 1; +      return 0; +   } + +   /* Allocate the card memory */ +   if (!bo_fake->block && !evict_and_alloc_block(bo)) { +      bufmgr_fake->fail = 1; +      DBG("Failed to validate buf %d:%s\n", bo_fake->id, bo_fake->name); +      return -1; +   } + +   assert(bo_fake->block); +   assert(bo_fake->block->bo == &bo_fake->bo); + +   bo->offset = bo_fake->block->mem->ofs; + +   /* Upload the buffer contents if necessary */ +   if (bo_fake->dirty) { +      DBG("Upload dirty buf %d:%s, sz %d offset 0x%x\n", bo_fake->id, +	  bo_fake->name, bo->size, bo_fake->block->mem->ofs); + +      assert(!(bo_fake->flags & +	       (BM_NO_BACKING_STORE|BM_PINNED))); + +      /* Actually, should be able to just wait for a fence on the memory, +       * which we would be tracking when we free it.  Waiting for idle is +       * a sufficiently large hammer for now. +       */ +      dri_bufmgr_fake_wait_idle(bufmgr_fake); + +      /* we may never have mapped this BO so it might not have any backing +       * store if this happens it should be rare, but 0 the card memory +       * in any case */ +      if (bo_fake->backing_store) +         memcpy(bo_fake->block->virtual, bo_fake->backing_store, bo->size); +      else +         memset(bo_fake->block->virtual, 0, bo->size); + +      bo_fake->dirty = 0; +   } + +   bo_fake->block->fenced = 0; +   bo_fake->block->on_hardware = 1; +   DRMLISTDEL(bo_fake->block); +   DRMLISTADDTAIL(bo_fake->block, &bufmgr_fake->on_hardware); + +   bo_fake->validated = 1; +   bufmgr_fake->need_fence = 1; + +   return 0; +} + +static void +dri_fake_fence_validated(dri_bufmgr *bufmgr) +{ +   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr; +   unsigned int cookie; + +   cookie = _fence_emit_internal(bufmgr_fake); +   fence_blocks(bufmgr_fake, cookie); + +   DBG("drm_fence_validated: 0x%08x cookie\n", cookie); +} + +static void +dri_fake_destroy(dri_bufmgr *bufmgr) +{ +   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr; + +   mmDestroy(bufmgr_fake->heap); +   free(bufmgr); +} + +static int +dri_fake_emit_reloc(dri_bo *reloc_buf, +		    uint32_t read_domains, uint32_t write_domain, +		    uint32_t delta, uint32_t offset, dri_bo *target_buf) +{ +   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)reloc_buf->bufmgr; +   struct fake_buffer_reloc *r; +   dri_bo_fake *reloc_fake = (dri_bo_fake *)reloc_buf; +   dri_bo_fake *target_fake = (dri_bo_fake *)target_buf; +   int i; + +   assert(reloc_buf); +   assert(target_buf); + +   if (reloc_fake->relocs == NULL) { +      reloc_fake->relocs = malloc(sizeof(struct fake_buffer_reloc) * +				  MAX_RELOCS); +   } + +   r = &reloc_fake->relocs[reloc_fake->nr_relocs++]; + +   assert(reloc_fake->nr_relocs <= MAX_RELOCS); + +   dri_bo_reference(target_buf); + +   if (!target_fake->is_static) +      reloc_fake->child_size += ALIGN(target_buf->size, target_fake->alignment); + +   r->target_buf = target_buf; +   r->offset = offset; +   r->last_target_offset = target_buf->offset; +   r->delta = delta; +   r->read_domains = read_domains; +   r->write_domain = write_domain; + +   if (bufmgr_fake->debug) { +      /* Check that a conflicting relocation hasn't already been emitted. */ +      for (i = 0; i < reloc_fake->nr_relocs - 1; i++) { +	 struct fake_buffer_reloc *r2 = &reloc_fake->relocs[i]; + +	 assert(r->offset != r2->offset); +      } +   } + +   return 0; +} + +/** + * Incorporates the validation flags associated with each relocation into + * the combined validation flags for the buffer on this batchbuffer submission. + */ +static void +dri_fake_calculate_domains(dri_bo *bo) +{ +   dri_bo_fake *bo_fake = (dri_bo_fake *)bo; +   int i; + +   for (i = 0; i < bo_fake->nr_relocs; i++) { +      struct fake_buffer_reloc *r = &bo_fake->relocs[i]; +      dri_bo_fake *target_fake = (dri_bo_fake *)r->target_buf; + +      /* Do the same for the tree of buffers we depend on */ +      dri_fake_calculate_domains(r->target_buf); + +      target_fake->read_domains |= r->read_domains; +      if (target_fake->write_domain != 0) +	 target_fake->write_domain = r->write_domain; +   } +} + + +static int +dri_fake_reloc_and_validate_buffer(dri_bo *bo) +{ +   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; +   dri_bo_fake *bo_fake = (dri_bo_fake *)bo; +   int i, ret; + +   assert(bo_fake->map_count == 0); + +   for (i = 0; i < bo_fake->nr_relocs; i++) { +      struct fake_buffer_reloc *r = &bo_fake->relocs[i]; +      dri_bo_fake *target_fake = (dri_bo_fake *)r->target_buf; +      uint32_t reloc_data; + +      /* Validate the target buffer if that hasn't been done. */ +      if (!target_fake->validated) { +         ret = dri_fake_reloc_and_validate_buffer(r->target_buf); +         if (ret != 0) { +            if (bo->virtual != NULL) +                dri_bo_unmap(bo); +            return ret; +         } +      } + +      /* Calculate the value of the relocation entry. */ +      if (r->target_buf->offset != r->last_target_offset) { +	 reloc_data = r->target_buf->offset + r->delta; + +	 if (bo->virtual == NULL) +	    dri_bo_map(bo, 1); + +	 *(uint32_t *)((uint8_t *)bo->virtual + r->offset) = reloc_data; + +	 r->last_target_offset = r->target_buf->offset; +      } +   } + +   if (bo->virtual != NULL) +      dri_bo_unmap(bo); + +   if (bo_fake->write_domain != 0) { +      if (!(bo_fake->flags & (BM_NO_BACKING_STORE|BM_PINNED))) { +         if (bo_fake->backing_store == 0) +            alloc_backing_store(bo); + +         bo_fake->card_dirty = 1; +      } +      bufmgr_fake->performed_rendering = 1; +   } + +   return dri_fake_bo_validate(bo); +} + +static void * +dri_fake_process_relocs(dri_bo *batch_buf) +{ +   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)batch_buf->bufmgr; +   dri_bo_fake *batch_fake = (dri_bo_fake *)batch_buf; +   int ret; +   int retry_count = 0; + +   bufmgr_fake->performed_rendering = 0; + +   dri_fake_calculate_domains(batch_buf); + +   batch_fake->read_domains = I915_GEM_DOMAIN_COMMAND; + +   /* we've ran out of RAM so blow the whole lot away and retry */ + restart: +   ret = dri_fake_reloc_and_validate_buffer(batch_buf); +   if (bufmgr_fake->fail == 1) { +      if (retry_count == 0) { +         retry_count++; +         dri_fake_kick_all(bufmgr_fake); +         bufmgr_fake->fail = 0; +         goto restart; +      } else /* dump out the memory here */ +         mmDumpMemInfo(bufmgr_fake->heap); +   } + +   assert(ret == 0); + +   return NULL; +} + +static void +dri_bo_fake_post_submit(dri_bo *bo) +{ +   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr; +   dri_bo_fake *bo_fake = (dri_bo_fake *)bo; +   int i; + +   for (i = 0; i < bo_fake->nr_relocs; i++) { +      struct fake_buffer_reloc *r = &bo_fake->relocs[i]; +      dri_bo_fake *target_fake = (dri_bo_fake *)r->target_buf; + +      if (target_fake->validated) +	 dri_bo_fake_post_submit(r->target_buf); + +      DBG("%s@0x%08x + 0x%08x -> %s@0x%08x + 0x%08x\n", +	  bo_fake->name, (uint32_t)bo->offset, r->offset, +	  target_fake->name, (uint32_t)r->target_buf->offset, r->delta); +   } + +   assert(bo_fake->map_count == 0); +   bo_fake->validated = 0; +   bo_fake->read_domains = 0; +   bo_fake->write_domain = 0; +} + + +static void +dri_fake_post_submit(dri_bo *batch_buf) +{ +   dri_fake_fence_validated(batch_buf->bufmgr); + +   dri_bo_fake_post_submit(batch_buf); +} + +/** + * Return an error if the list of BOs will exceed the aperture size. + * + * This is a rough guess and likely to fail, as during the validate sequence we + * may place a buffer in an inopportune spot early on and then fail to fit + * a set smaller than the aperture. + */ +static int +dri_fake_check_aperture_space(dri_bo **bo_array, int count) +{ +   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo_array[0]->bufmgr; +   unsigned int sz = 0; +   int i; + +   for (i = 0; i < count; i++) { +      dri_bo_fake *bo_fake = (dri_bo_fake *)bo_array[i]; + +      if (bo_fake == NULL) +	 continue; + +      if (!bo_fake->is_static) +	 sz += ALIGN(bo_array[i]->size, bo_fake->alignment); +      sz += bo_fake->child_size; +   } + +   if (sz > bufmgr_fake->size) { +      DBG("check_space: overflowed bufmgr size, %dkb vs %dkb\n", +	  sz / 1024, bufmgr_fake->size / 1024); +      return -1; +   } + +   DBG("drm_check_space: sz %dkb vs bufgr %dkb\n", sz / 1024 , +       bufmgr_fake->size / 1024); +   return 0; +} + +/** + * Evicts all buffers, waiting for fences to pass and copying contents out + * as necessary. + * + * Used by the X Server on LeaveVT, when the card memory is no longer our + * own. + */ +void +intel_bufmgr_fake_evict_all(dri_bufmgr *bufmgr) +{ +   dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr; +   struct block *block, *tmp; + +   bufmgr_fake->need_fence = 1; +   bufmgr_fake->fail = 0; + +   /* Wait for hardware idle.  We don't know where acceleration has been +    * happening, so we'll need to wait anyway before letting anything get +    * put on the card again. +    */ +   dri_bufmgr_fake_wait_idle(bufmgr_fake); + +   /* Check that we hadn't released the lock without having fenced the last +    * set of buffers. +    */ +   assert(DRMLISTEMPTY(&bufmgr_fake->fenced)); +   assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware)); + +   DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) { +      /* Releases the memory, and memcpys dirty contents out if necessary. */ +      free_block(bufmgr_fake, block); +   } +} + +dri_bufmgr * +intel_bufmgr_fake_init(unsigned long low_offset, void *low_virtual, +		       unsigned long size, +		       unsigned int (*fence_emit)(void *private), +		       int (*fence_wait)(void *private, unsigned int cookie), +		       void *driver_priv) +{ +   dri_bufmgr_fake *bufmgr_fake; + +   bufmgr_fake = calloc(1, sizeof(*bufmgr_fake)); + +   /* Initialize allocator */ +   DRMINITLISTHEAD(&bufmgr_fake->fenced); +   DRMINITLISTHEAD(&bufmgr_fake->on_hardware); +   DRMINITLISTHEAD(&bufmgr_fake->lru); + +   bufmgr_fake->low_offset = low_offset; +   bufmgr_fake->virtual = low_virtual; +   bufmgr_fake->size = size; +   bufmgr_fake->heap = mmInit(low_offset, size); + +   /* Hook in methods */ +   bufmgr_fake->bufmgr.bo_alloc = dri_fake_bo_alloc; +   bufmgr_fake->bufmgr.bo_reference = dri_fake_bo_reference; +   bufmgr_fake->bufmgr.bo_unreference = dri_fake_bo_unreference; +   bufmgr_fake->bufmgr.bo_map = dri_fake_bo_map; +   bufmgr_fake->bufmgr.bo_unmap = dri_fake_bo_unmap; +   bufmgr_fake->bufmgr.bo_wait_rendering = dri_fake_bo_wait_rendering; +   bufmgr_fake->bufmgr.destroy = dri_fake_destroy; +   bufmgr_fake->bufmgr.process_relocs = dri_fake_process_relocs; +   bufmgr_fake->bufmgr.post_submit = dri_fake_post_submit; +   bufmgr_fake->bufmgr.check_aperture_space = dri_fake_check_aperture_space; +   bufmgr_fake->bufmgr.debug = 0; +   bufmgr_fake->intel_bufmgr.emit_reloc = dri_fake_emit_reloc; + +   bufmgr_fake->fence_emit = fence_emit; +   bufmgr_fake->fence_wait = fence_wait; +   bufmgr_fake->driver_priv = driver_priv; + +   return &bufmgr_fake->bufmgr; +} + diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c new file mode 100644 index 00000000..af20efb2 --- /dev/null +++ b/libdrm/intel/intel_bufmgr_gem.c @@ -0,0 +1,993 @@ +/************************************************************************** + * + * Copyright © 2007 Red Hat Inc. + * Copyright © 2007 Intel Corporation + * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * + **************************************************************************/ +/* + * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com> + *          Keith Whitwell <keithw-at-tungstengraphics-dot-com> + *	    Eric Anholt <eric@anholt.net> + *	    Dave Airlie <airlied@linux.ie> + */ + +#include <xf86drm.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> +#include <assert.h> +#include <sys/ioctl.h> +#include <sys/mman.h> + +#include "errno.h" +#include "dri_bufmgr.h" +#include "intel_bufmgr.h" +#include "string.h" + +#include "i915_drm.h" + +#define DBG(...) do {					\ +   if (bufmgr_gem->bufmgr.debug)			\ +      fprintf(stderr, __VA_ARGS__);			\ +} while (0) + +typedef struct _dri_bo_gem dri_bo_gem; + +struct dri_gem_bo_bucket { +   dri_bo_gem *head, **tail; +   /** +    * Limit on the number of entries in this bucket. +    * +    * 0 means that this caching at this bucket size is disabled. +    * -1 means that there is no limit to caching at this size. +    */ +   int max_entries; +   int num_entries; +}; + +/* Arbitrarily chosen, 16 means that the maximum size we'll cache for reuse + * is 1 << 16 pages, or 256MB. + */ +#define INTEL_GEM_BO_BUCKETS	16 +typedef struct _dri_bufmgr_gem { +    dri_bufmgr bufmgr; + +    struct intel_bufmgr intel_bufmgr; + +    int fd; + +    int max_relocs; + +    struct drm_i915_gem_exec_object *exec_objects; +    dri_bo **exec_bos; +    int exec_size; +    int exec_count; + +    /** Array of lists of cached gem objects of power-of-two sizes */ +    struct dri_gem_bo_bucket cache_bucket[INTEL_GEM_BO_BUCKETS]; + +    struct drm_i915_gem_execbuffer exec_arg; +} dri_bufmgr_gem; + +struct _dri_bo_gem { +    dri_bo bo; + +    int refcount; +    /** Boolean whether the mmap ioctl has been called for this buffer yet. */ +    int mapped; +    uint32_t gem_handle; +    const char *name; + +    /** +     * Kenel-assigned global name for this object +     */ +    unsigned int global_name; +     +    /** +     * Index of the buffer within the validation list while preparing a +     * batchbuffer execution. +     */ +    int validate_index; + +    /** +     * Boolean whether we've started swrast +     * Set when the buffer has been mapped +     * Cleared when the buffer is unmapped +     */ +    int swrast; + +    /** Array passed to the DRM containing relocation information. */ +    struct drm_i915_gem_relocation_entry *relocs; +    /** Array of bos corresponding to relocs[i].target_handle */ +    dri_bo **reloc_target_bo; +    /** Number of entries in relocs */ +    int reloc_count; +    /** Mapped address for the buffer */ +    void *virtual; + +    /** free list */ +    dri_bo_gem *next; +}; + +static int +logbase2(int n) +{ +   int i = 1; +   int log2 = 0; + +   while (n > i) { +      i *= 2; +      log2++; +   } + +   return log2; +} + +static struct dri_gem_bo_bucket * +dri_gem_bo_bucket_for_size(dri_bufmgr_gem *bufmgr_gem, unsigned long size) +{ +    int i; + +    /* We only do buckets in power of two increments */ +    if ((size & (size - 1)) != 0) +	return NULL; + +    /* We should only see sizes rounded to pages. */ +    assert((size % 4096) == 0); + +    /* We always allocate in units of pages */ +    i = ffs(size / 4096) - 1; +    if (i >= INTEL_GEM_BO_BUCKETS) +	return NULL; + +    return &bufmgr_gem->cache_bucket[i]; +} + + +static void dri_gem_dump_validation_list(dri_bufmgr_gem *bufmgr_gem) +{ +    int i, j; + +    for (i = 0; i < bufmgr_gem->exec_count; i++) { +	dri_bo *bo = bufmgr_gem->exec_bos[i]; +	dri_bo_gem *bo_gem = (dri_bo_gem *)bo; + +	if (bo_gem->relocs == NULL) { +	    DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle, bo_gem->name); +	    continue; +	} + +	for (j = 0; j < bo_gem->reloc_count; j++) { +	    dri_bo *target_bo = bo_gem->reloc_target_bo[j]; +	    dri_bo_gem *target_gem = (dri_bo_gem *)target_bo; + +	    DBG("%2d: %d (%s)@0x%08llx -> %d (%s)@0x%08lx + 0x%08x\n", +		i, +		bo_gem->gem_handle, bo_gem->name, bo_gem->relocs[j].offset, +		target_gem->gem_handle, target_gem->name, target_bo->offset, +		bo_gem->relocs[j].delta); +	} +    } +} + +/** + * Adds the given buffer to the list of buffers to be validated (moved into the + * appropriate memory type) with the next batch submission. + * + * If a buffer is validated multiple times in a batch submission, it ends up + * with the intersection of the memory type flags and the union of the + * access flags. + */ +static void +intel_add_validate_buffer(dri_bo *bo) +{ +    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; +    dri_bo_gem *bo_gem = (dri_bo_gem *)bo; +    int index; + +    if (bo_gem->validate_index != -1) +	return; + +    /* Extend the array of validation entries as necessary. */ +    if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) { +	int new_size = bufmgr_gem->exec_size * 2; + +	if (new_size == 0) +	    new_size = 5; + +	bufmgr_gem->exec_objects = +	    realloc(bufmgr_gem->exec_objects, +		    sizeof(*bufmgr_gem->exec_objects) * new_size); +	bufmgr_gem->exec_bos = +	    realloc(bufmgr_gem->exec_bos, +		    sizeof(*bufmgr_gem->exec_bos) * new_size); +	bufmgr_gem->exec_size = new_size; +    } + +    index = bufmgr_gem->exec_count; +    bo_gem->validate_index = index; +    /* Fill in array entry */ +    bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle; +    bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count; +    bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs; +    bufmgr_gem->exec_objects[index].alignment = 0; +    bufmgr_gem->exec_objects[index].offset = 0; +    bufmgr_gem->exec_bos[index] = bo; +    dri_bo_reference(bo); +    bufmgr_gem->exec_count++; +} + + +#define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \ +	sizeof(uint32_t)) + +static int +intel_setup_reloc_list(dri_bo *bo) +{ +    dri_bo_gem *bo_gem = (dri_bo_gem *)bo; +    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; + +    bo_gem->relocs = malloc(bufmgr_gem->max_relocs * +			    sizeof(struct drm_i915_gem_relocation_entry)); +    bo_gem->reloc_target_bo = malloc(bufmgr_gem->max_relocs * sizeof(dri_bo *)); + +    return 0; +} + +static dri_bo * +dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name, +		 unsigned long size, unsigned int alignment) +{ +    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr; +    dri_bo_gem *bo_gem; +    unsigned int page_size = getpagesize(); +    int ret; +    struct dri_gem_bo_bucket *bucket; +    int alloc_from_cache = 0; +    unsigned long bo_size; + +    /* Round the allocated size up to a power of two number of pages. */ +    bo_size = 1 << logbase2(size); +    if (bo_size < page_size) +	bo_size = page_size; +    bucket = dri_gem_bo_bucket_for_size(bufmgr_gem, bo_size); + +    /* If we don't have caching at this size, don't actually round the +     * allocation up. +     */ +    if (bucket == NULL || bucket->max_entries == 0) { +	bo_size = size; +	if (bo_size < page_size) +	    bo_size = page_size; +    } + +    /* Get a buffer out of the cache if available */ +    if (bucket != NULL && bucket->num_entries > 0) { +	struct drm_i915_gem_busy busy; +	 +	bo_gem = bucket->head; +        busy.handle = bo_gem->gem_handle; + +        ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy); +        alloc_from_cache = (ret == 0 && busy.busy == 0); + +	if (alloc_from_cache) { +	    bucket->head = bo_gem->next; +	    if (bo_gem->next == NULL) +		bucket->tail = &bucket->head; +	    bucket->num_entries--; +	} +    } + +    if (!alloc_from_cache) { +	struct drm_i915_gem_create create; + +	bo_gem = calloc(1, sizeof(*bo_gem)); +	if (!bo_gem) +	    return NULL; + +	bo_gem->bo.size = bo_size; +	memset(&create, 0, sizeof(create)); +	create.size = bo_size; + +	ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CREATE, &create); +	bo_gem->gem_handle = create.handle; +	bo_gem->bo.handle = bo_gem->gem_handle; +	if (ret != 0) { +	    free(bo_gem); +	    return NULL; +	} +	bo_gem->bo.bufmgr = bufmgr; +    } + +    bo_gem->name = name; +    bo_gem->refcount = 1; +    bo_gem->validate_index = -1; + +    DBG("bo_create: buf %d (%s) %ldb\n", +	bo_gem->gem_handle, bo_gem->name, size); + +    return &bo_gem->bo; +} + +/** + * Returns a dri_bo wrapping the given buffer object handle. + * + * This can be used when one application needs to pass a buffer object + * to another. + */ +dri_bo * +intel_bo_gem_create_from_name(dri_bufmgr *bufmgr, const char *name, +			      unsigned int handle) +{ +    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr; +    dri_bo_gem *bo_gem; +    int ret; +    struct drm_gem_open open_arg; + +    bo_gem = calloc(1, sizeof(*bo_gem)); +    if (!bo_gem) +	return NULL; + +    memset(&open_arg, 0, sizeof(open_arg)); +    open_arg.name = handle; +    ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_OPEN, &open_arg); +    if (ret != 0) { +	fprintf(stderr, "Couldn't reference %s handle 0x%08x: %s\n", +	       name, handle, strerror(-ret)); +	free(bo_gem); +	return NULL; +    } +    bo_gem->bo.size = open_arg.size; +    bo_gem->bo.offset = 0; +    bo_gem->bo.virtual = NULL; +    bo_gem->bo.bufmgr = bufmgr; +    bo_gem->name = name; +    bo_gem->refcount = 1; +    bo_gem->validate_index = -1; +    bo_gem->gem_handle = open_arg.handle; + +    DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name); + +    return &bo_gem->bo; +} + +static void +dri_gem_bo_reference(dri_bo *bo) +{ +    dri_bo_gem *bo_gem = (dri_bo_gem *)bo; + +    bo_gem->refcount++; +} + +static void +dri_gem_bo_free(dri_bo *bo) +{ +    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; +    dri_bo_gem *bo_gem = (dri_bo_gem *)bo; +    struct drm_gem_close close; +    int ret; + +    if (bo_gem->mapped) +	munmap (bo_gem->virtual, bo_gem->bo.size); + +    /* Close this object */ +    close.handle = bo_gem->gem_handle; +    ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close); +    if (ret != 0) { +	fprintf(stderr, +		"DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n", +		bo_gem->gem_handle, bo_gem->name, strerror(-ret)); +    } +    free(bo); +} + +static void +dri_gem_bo_unreference(dri_bo *bo) +{ +    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; +    dri_bo_gem *bo_gem = (dri_bo_gem *)bo; + +    if (!bo) +	return; + +    if (--bo_gem->refcount == 0) { +	struct dri_gem_bo_bucket *bucket; + +	if (bo_gem->relocs != NULL) { +	    int i; + +	    /* Unreference all the target buffers */ +	    for (i = 0; i < bo_gem->reloc_count; i++) +		 dri_bo_unreference(bo_gem->reloc_target_bo[i]); +	    free(bo_gem->reloc_target_bo); +	    free(bo_gem->relocs); +	} + +	DBG("bo_unreference final: %d (%s)\n", +	    bo_gem->gem_handle, bo_gem->name); + +	bucket = dri_gem_bo_bucket_for_size(bufmgr_gem, bo->size); +	/* Put the buffer into our internal cache for reuse if we can. */ +	if (bucket != NULL && +	    (bucket->max_entries == -1 || +	     (bucket->max_entries > 0 && +	      bucket->num_entries < bucket->max_entries))) +	{ +	    bo_gem->name = 0; +	    bo_gem->validate_index = -1; +	    bo_gem->relocs = NULL; +	    bo_gem->reloc_target_bo = NULL; +	    bo_gem->reloc_count = 0; + +	    bo_gem->next = NULL; +	    *bucket->tail = bo_gem; +	    bucket->tail = &bo_gem->next; +	    bucket->num_entries++; +	} else { +	    dri_gem_bo_free(bo); +	} + +	return; +    } +} + +static int +dri_gem_bo_map(dri_bo *bo, int write_enable) +{ +    dri_bufmgr_gem *bufmgr_gem; +    dri_bo_gem *bo_gem = (dri_bo_gem *)bo; +    struct drm_i915_gem_set_domain set_domain; +    int ret; + +    bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; + +    /* Allow recursive mapping. Mesa may recursively map buffers with +     * nested display loops. +     */ +    if (!bo_gem->mapped) { +     +	assert(bo->virtual == NULL); +     +	DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name); +     +	if (bo_gem->virtual == NULL) { +	    struct drm_i915_gem_mmap mmap_arg; +     +	    memset(&mmap_arg, 0, sizeof(mmap_arg)); +	    mmap_arg.handle = bo_gem->gem_handle; +	    mmap_arg.offset = 0; +	    mmap_arg.size = bo->size; +	    ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg); +	    if (ret != 0) { +		fprintf(stderr, "%s:%d: Error mapping buffer %d (%s): %s .\n", +			__FILE__, __LINE__, +			bo_gem->gem_handle, bo_gem->name, strerror(errno)); +	    } +	    bo_gem->virtual = (void *)(uintptr_t)mmap_arg.addr_ptr; +	} +	bo->virtual = bo_gem->virtual; +	bo_gem->swrast = 0; +	bo_gem->mapped = 1; +	DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, bo_gem->virtual); +    } + +    if (!bo_gem->swrast) { +	set_domain.handle = bo_gem->gem_handle; +	set_domain.read_domains = I915_GEM_DOMAIN_CPU; +	if (write_enable) +	    set_domain.write_domain = I915_GEM_DOMAIN_CPU; +	else +	    set_domain.write_domain = 0; +	do { +	    ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, +			&set_domain); +	} while (ret == -1 && errno == EINTR); +	if (ret != 0) { +	    fprintf (stderr, "%s:%d: Error setting swrast %d: %s\n", +		     __FILE__, __LINE__, bo_gem->gem_handle, strerror (errno)); +	} +	bo_gem->swrast = 1; +    } + +    return 0; +} + +static int +dri_gem_bo_unmap(dri_bo *bo) +{ +    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; +    dri_bo_gem *bo_gem = (dri_bo_gem *)bo; +    struct drm_i915_gem_sw_finish sw_finish; +    int ret; + +    if (bo == NULL) +	return 0; + +    assert(bo_gem->mapped); + +    if (bo_gem->swrast) { +	sw_finish.handle = bo_gem->gem_handle; +	do { +	    ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SW_FINISH, +			&sw_finish); +	} while (ret == -1 && errno == EINTR); +	bo_gem->swrast = 0; +    } +    return 0; +} + +static int +dri_gem_bo_subdata (dri_bo *bo, unsigned long offset, +		    unsigned long size, const void *data) +{ +    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; +    dri_bo_gem *bo_gem = (dri_bo_gem *)bo; +    struct drm_i915_gem_pwrite pwrite; +    int ret; + +    memset (&pwrite, 0, sizeof (pwrite)); +    pwrite.handle = bo_gem->gem_handle; +    pwrite.offset = offset; +    pwrite.size = size; +    pwrite.data_ptr = (uint64_t) (uintptr_t) data; +    do { +	ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite); +    } while (ret == -1 && errno == EINTR); +    if (ret != 0) { +	fprintf (stderr, "%s:%d: Error writing data to buffer %d: (%d %d) %s .\n", +		 __FILE__, __LINE__, +		 bo_gem->gem_handle, (int) offset, (int) size, +		 strerror (errno)); +    } +    return 0; +} + +static int +dri_gem_bo_get_subdata (dri_bo *bo, unsigned long offset, +			unsigned long size, void *data) +{ +    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; +    dri_bo_gem *bo_gem = (dri_bo_gem *)bo; +    struct drm_i915_gem_pread pread; +    int ret; + +    memset (&pread, 0, sizeof (pread)); +    pread.handle = bo_gem->gem_handle; +    pread.offset = offset; +    pread.size = size; +    pread.data_ptr = (uint64_t) (uintptr_t) data; +    do { +	ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PREAD, &pread); +    } while (ret == -1 && errno == EINTR); +    if (ret != 0) { +	fprintf (stderr, "%s:%d: Error reading data from buffer %d: (%d %d) %s .\n", +		 __FILE__, __LINE__, +		 bo_gem->gem_handle, (int) offset, (int) size, +		 strerror (errno)); +    } +    return 0; +} + +static void +dri_gem_bo_wait_rendering(dri_bo *bo) +{ +    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; +    dri_bo_gem *bo_gem = (dri_bo_gem *)bo; +    struct drm_i915_gem_set_domain set_domain; +    int ret; + +    set_domain.handle = bo_gem->gem_handle; +    set_domain.read_domains = I915_GEM_DOMAIN_GTT; +    set_domain.write_domain = 0; +    ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain); +    if (ret != 0) { +	fprintf (stderr, "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n", +		 __FILE__, __LINE__, +		 bo_gem->gem_handle, set_domain.read_domains, set_domain.write_domain, +		 strerror (errno)); +    } +} + +static void +dri_bufmgr_gem_destroy(dri_bufmgr *bufmgr) +{ +    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr; +    int i; + +    free(bufmgr_gem->exec_objects); +    free(bufmgr_gem->exec_bos); + +    /* Free any cached buffer objects we were going to reuse */ +    for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++) { +	struct dri_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i]; +	dri_bo_gem *bo_gem; + +	while ((bo_gem = bucket->head) != NULL) { +	    bucket->head = bo_gem->next; +	    if (bo_gem->next == NULL) +		bucket->tail = &bucket->head; +	    bucket->num_entries--; + +	    dri_gem_bo_free(&bo_gem->bo); +	} +    } + +    free(bufmgr); +} + +/** + * Adds the target buffer to the validation list and adds the relocation + * to the reloc_buffer's relocation list. + * + * The relocation entry at the given offset must already contain the + * precomputed relocation value, because the kernel will optimize out + * the relocation entry write when the buffer hasn't moved from the + * last known offset in target_bo. + */ +static int +dri_gem_emit_reloc(dri_bo *bo, uint32_t read_domains, uint32_t write_domain, +		   uint32_t delta, uint32_t offset, dri_bo *target_bo) +{ +    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; +    dri_bo_gem *bo_gem = (dri_bo_gem *)bo; +    dri_bo_gem *target_bo_gem = (dri_bo_gem *)target_bo; + +    /* Create a new relocation list if needed */ +    if (bo_gem->relocs == NULL) +	intel_setup_reloc_list(bo); + +    /* Check overflow */ +    assert(bo_gem->reloc_count < bufmgr_gem->max_relocs); + +    /* Check args */ +    assert (offset <= bo->size - 4); +    assert ((write_domain & (write_domain-1)) == 0); + +    bo_gem->relocs[bo_gem->reloc_count].offset = offset; +    bo_gem->relocs[bo_gem->reloc_count].delta = delta; +    bo_gem->relocs[bo_gem->reloc_count].target_handle = +	target_bo_gem->gem_handle; +    bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains; +    bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain; +    bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset; + +    bo_gem->reloc_target_bo[bo_gem->reloc_count] = target_bo; +    dri_bo_reference(target_bo); + +    bo_gem->reloc_count++; +    return 0; +} + +/** + * Walk the tree of relocations rooted at BO and accumulate the list of + * validations to be performed and update the relocation buffers with + * index values into the validation list. + */ +static void +dri_gem_bo_process_reloc(dri_bo *bo) +{ +    dri_bo_gem *bo_gem = (dri_bo_gem *)bo; +    int i; + +    if (bo_gem->relocs == NULL) +	return; + +    for (i = 0; i < bo_gem->reloc_count; i++) { +	dri_bo *target_bo = bo_gem->reloc_target_bo[i]; + +	/* Continue walking the tree depth-first. */ +	dri_gem_bo_process_reloc(target_bo); + +	/* Add the target to the validate list */ +	intel_add_validate_buffer(target_bo); +    } +} + +static void * +dri_gem_process_reloc(dri_bo *batch_buf) +{ +    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *) batch_buf->bufmgr; + +    /* Update indices and set up the validate list. */ +    dri_gem_bo_process_reloc(batch_buf); + +    /* Add the batch buffer to the validation list.  There are no relocations +     * pointing to it. +     */ +    intel_add_validate_buffer(batch_buf); + +    bufmgr_gem->exec_arg.buffers_ptr = (uintptr_t)bufmgr_gem->exec_objects; +    bufmgr_gem->exec_arg.buffer_count = bufmgr_gem->exec_count; +    bufmgr_gem->exec_arg.batch_start_offset = 0; +    bufmgr_gem->exec_arg.batch_len = 0;	/* written in intel_exec_ioctl */ + +    return &bufmgr_gem->exec_arg; +} + +static void +intel_update_buffer_offsets (dri_bufmgr_gem *bufmgr_gem) +{ +    int i; + +    for (i = 0; i < bufmgr_gem->exec_count; i++) { +	dri_bo *bo = bufmgr_gem->exec_bos[i]; +	dri_bo_gem *bo_gem = (dri_bo_gem *)bo; + +	/* Update the buffer offset */ +	if (bufmgr_gem->exec_objects[i].offset != bo->offset) { +	    DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n", +		bo_gem->gem_handle, bo_gem->name, bo->offset, +		bufmgr_gem->exec_objects[i].offset); +	    bo->offset = bufmgr_gem->exec_objects[i].offset; +	} +    } +} + +static void +dri_gem_post_submit(dri_bo *batch_buf) +{ +    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)batch_buf->bufmgr; +    int i; + +    intel_update_buffer_offsets (bufmgr_gem); + +    if (bufmgr_gem->bufmgr.debug) +	dri_gem_dump_validation_list(bufmgr_gem); + +    for (i = 0; i < bufmgr_gem->exec_count; i++) { +	dri_bo *bo = bufmgr_gem->exec_bos[i]; +	dri_bo_gem *bo_gem = (dri_bo_gem *)bo; + +	/* Need to call swrast on next bo_map */ +	bo_gem->swrast = 0; + +	/* Disconnect the buffer from the validate list */ +	bo_gem->validate_index = -1; +	dri_bo_unreference(bo); +	bufmgr_gem->exec_bos[i] = NULL; +    } +    bufmgr_gem->exec_count = 0; +} + +static int +dri_gem_pin(dri_bo *bo, uint32_t alignment) +{ +    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; +    dri_bo_gem *bo_gem = (dri_bo_gem *)bo; +    struct drm_i915_gem_pin pin; +    int ret; + +    pin.handle = bo_gem->gem_handle; +    pin.alignment = alignment; + +    ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PIN, &pin); +    if (ret != 0) +	return -errno; + +    bo->offset = pin.offset; +    return 0; +} + +static int +dri_gem_unpin(dri_bo *bo) +{ +    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; +    dri_bo_gem *bo_gem = (dri_bo_gem *)bo; +    struct drm_i915_gem_unpin unpin; +    int ret; + +    unpin.handle = bo_gem->gem_handle; + +    ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin); +    if (ret != 0) +	return -errno; + +    return 0; +} + +static int +dri_gem_set_tiling(dri_bo *bo, uint32_t *tiling_mode) +{ +    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; +    dri_bo_gem *bo_gem = (dri_bo_gem *)bo; +    struct drm_i915_gem_set_tiling set_tiling; +    int ret; + +    set_tiling.handle = bo_gem->gem_handle; +    set_tiling.tiling_mode = *tiling_mode; + +    ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling); +    if (ret != 0) { +	*tiling_mode = I915_TILING_NONE; +	return -errno; +    } + +    *tiling_mode = set_tiling.tiling_mode; +    return 0; +} + +static int +dri_gem_flink(dri_bo *bo, uint32_t *name) +{ +    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; +    dri_bo_gem *bo_gem = (dri_bo_gem *)bo; +    struct drm_gem_flink flink; +    int ret; + +    if (!bo_gem->global_name) { +	flink.handle = bo_gem->gem_handle; +     +	ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink); +	if (ret != 0) +	    return -errno; +	bo_gem->global_name = flink.name; +    } +     +    *name = bo_gem->global_name; +    return 0; +} + +/** + * Enables unlimited caching of buffer objects for reuse. + * + * This is potentially very memory expensive, as the cache at each bucket + * size is only bounded by how many buffers of that size we've managed to have + * in flight at once. + */ +void +intel_bufmgr_gem_enable_reuse(dri_bufmgr *bufmgr) +{ +    dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr; +    int i; + +    for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++) { +	bufmgr_gem->cache_bucket[i].max_entries = -1; +    } +} + +/* + * + */ +static int +dri_gem_check_aperture_space(dri_bo **bo_array, int count) +{ +    return 0; +} + +/** + * Initializes the GEM buffer manager, which uses the kernel to allocate, map, + * and manage map buffer objections. + * + * \param fd File descriptor of the opened DRM device. + */ +dri_bufmgr * +intel_bufmgr_gem_init(int fd, int batch_size) +{ +    dri_bufmgr_gem *bufmgr_gem; +    int i; + +    bufmgr_gem = calloc(1, sizeof(*bufmgr_gem)); +    bufmgr_gem->fd = fd; + +    /* Let's go with one relocation per every 2 dwords (but round down a bit +     * since a power of two will mean an extra page allocation for the reloc +     * buffer). +     * +     * Every 4 was too few for the blender benchmark. +     */ +    bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2; + +    bufmgr_gem->bufmgr.bo_alloc = dri_gem_bo_alloc; +    bufmgr_gem->bufmgr.bo_reference = dri_gem_bo_reference; +    bufmgr_gem->bufmgr.bo_unreference = dri_gem_bo_unreference; +    bufmgr_gem->bufmgr.bo_map = dri_gem_bo_map; +    bufmgr_gem->bufmgr.bo_unmap = dri_gem_bo_unmap; +    bufmgr_gem->bufmgr.bo_subdata = dri_gem_bo_subdata; +    bufmgr_gem->bufmgr.bo_get_subdata = dri_gem_bo_get_subdata; +    bufmgr_gem->bufmgr.bo_wait_rendering = dri_gem_bo_wait_rendering; +    bufmgr_gem->bufmgr.destroy = dri_bufmgr_gem_destroy; +    bufmgr_gem->bufmgr.process_relocs = dri_gem_process_reloc; +    bufmgr_gem->bufmgr.post_submit = dri_gem_post_submit; +    bufmgr_gem->bufmgr.debug = 0; +    bufmgr_gem->bufmgr.check_aperture_space = dri_gem_check_aperture_space; +    bufmgr_gem->intel_bufmgr.emit_reloc = dri_gem_emit_reloc; +    bufmgr_gem->intel_bufmgr.pin = dri_gem_pin; +    bufmgr_gem->intel_bufmgr.unpin = dri_gem_unpin; +    bufmgr_gem->intel_bufmgr.set_tiling = dri_gem_set_tiling; +    bufmgr_gem->intel_bufmgr.flink = dri_gem_flink; +    /* Initialize the linked lists for BO reuse cache. */ +    for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++) +	bufmgr_gem->cache_bucket[i].tail = &bufmgr_gem->cache_bucket[i].head; + +    return &bufmgr_gem->bufmgr; +} + +int +intel_bo_emit_reloc(dri_bo *reloc_buf, +		    uint32_t read_domains, uint32_t write_domain, +		    uint32_t delta, uint32_t offset, dri_bo *target_buf) +{ +    struct intel_bufmgr *intel_bufmgr; + +    intel_bufmgr = (struct intel_bufmgr *)(reloc_buf->bufmgr + 1); + +    return intel_bufmgr->emit_reloc(reloc_buf, read_domains, write_domain, +				    delta, offset, target_buf); +} + +int +intel_bo_pin(dri_bo *bo, uint32_t alignment) +{ +    struct intel_bufmgr *intel_bufmgr; + +    intel_bufmgr = (struct intel_bufmgr *)(bo->bufmgr + 1); + +    if (intel_bufmgr->pin) +	return intel_bufmgr->pin(bo, alignment); + +    return 0; +} + +int +intel_bo_unpin(dri_bo *bo) +{ +    struct intel_bufmgr *intel_bufmgr; + +    intel_bufmgr = (struct intel_bufmgr *)(bo->bufmgr + 1); + +    if (intel_bufmgr->unpin) +	return intel_bufmgr->unpin(bo); + +    return 0; +} + +int intel_bo_set_tiling(dri_bo *bo, uint32_t *tiling_mode) +{ +    struct intel_bufmgr *intel_bufmgr; + +    intel_bufmgr = (struct intel_bufmgr *)(bo->bufmgr + 1); + +    if (intel_bufmgr->set_tiling) +	return intel_bufmgr->set_tiling (bo, tiling_mode); + +    *tiling_mode = I915_TILING_NONE; +    return 0; +} + +int intel_bo_flink(dri_bo *bo, uint32_t *name) +{ +    struct intel_bufmgr *intel_bufmgr; + +    intel_bufmgr = (struct intel_bufmgr *)(bo->bufmgr + 1); + +    if (intel_bufmgr->flink) +	return intel_bufmgr->flink (bo, name); + +    return -ENODEV; +} + diff --git a/libdrm/intel/mm.c b/libdrm/intel/mm.c new file mode 100644 index 00000000..98146405 --- /dev/null +++ b/libdrm/intel/mm.c @@ -0,0 +1,281 @@ +/* + * GLX Hardware Device Driver common code + * Copyright (C) 1999 Wittawat Yamwong + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * WITTAWAT YAMWONG, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,  + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR  + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE  + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <stdlib.h> +#include <assert.h> + +#include "xf86drm.h" +#include "mm.h" + +void +mmDumpMemInfo(const struct mem_block *heap) +{ +   drmMsg("Memory heap %p:\n", (void *)heap); +   if (heap == 0) { +      drmMsg("  heap == 0\n"); +   } else { +      const struct mem_block *p; + +      for(p = heap->next; p != heap; p = p->next) { +	 drmMsg("  Offset:%08x, Size:%08x, %c%c\n",p->ofs,p->size, +		p->free ? 'F':'.', +		p->reserved ? 'R':'.'); +      } + +      drmMsg("\nFree list:\n"); + +      for(p = heap->next_free; p != heap; p = p->next_free) { +	 drmMsg(" FREE Offset:%08x, Size:%08x, %c%c\n",p->ofs,p->size, +		p->free ? 'F':'.', +		p->reserved ? 'R':'.'); +      } + +   } +   drmMsg("End of memory blocks\n"); +} + +struct mem_block * +mmInit(int ofs, int size) +{ +   struct mem_block *heap, *block; +   +   if (size <= 0)  +      return NULL; + +   heap = (struct mem_block *) calloc(1, sizeof(struct mem_block)); +   if (!heap)  +      return NULL; +    +   block = (struct mem_block *) calloc(1, sizeof(struct mem_block)); +   if (!block) { +      free(heap); +      return NULL; +   } + +   heap->next = block; +   heap->prev = block; +   heap->next_free = block; +   heap->prev_free = block; + +   block->heap = heap; +   block->next = heap; +   block->prev = heap; +   block->next_free = heap; +   block->prev_free = heap; + +   block->ofs = ofs; +   block->size = size; +   block->free = 1; + +   return heap; +} + + +static struct mem_block * +SliceBlock(struct mem_block *p,  +           int startofs, int size,  +           int reserved, int alignment) +{ +   struct mem_block *newblock; + +   /* break left  [p, newblock, p->next], then p = newblock */ +   if (startofs > p->ofs) { +      newblock = (struct mem_block*) calloc(1, sizeof(struct mem_block)); +      if (!newblock) +	 return NULL; +      newblock->ofs = startofs; +      newblock->size = p->size - (startofs - p->ofs); +      newblock->free = 1; +      newblock->heap = p->heap; + +      newblock->next = p->next; +      newblock->prev = p; +      p->next->prev = newblock; +      p->next = newblock; + +      newblock->next_free = p->next_free; +      newblock->prev_free = p; +      p->next_free->prev_free = newblock; +      p->next_free = newblock; + +      p->size -= newblock->size; +      p = newblock; +   } + +   /* break right, also [p, newblock, p->next] */ +   if (size < p->size) { +      newblock = (struct mem_block*) calloc(1, sizeof(struct mem_block)); +      if (!newblock) +	 return NULL; +      newblock->ofs = startofs + size; +      newblock->size = p->size - size; +      newblock->free = 1; +      newblock->heap = p->heap; + +      newblock->next = p->next; +      newblock->prev = p; +      p->next->prev = newblock; +      p->next = newblock; + +      newblock->next_free = p->next_free; +      newblock->prev_free = p; +      p->next_free->prev_free = newblock; +      p->next_free = newblock; +	  +      p->size = size; +   } + +   /* p = middle block */ +   p->free = 0; + +   /* Remove p from the free list:  +    */ +   p->next_free->prev_free = p->prev_free; +   p->prev_free->next_free = p->next_free; + +   p->next_free = 0; +   p->prev_free = 0; + +   p->reserved = reserved; +   return p; +} + + +struct mem_block * +mmAllocMem(struct mem_block *heap, int size, int align2, int startSearch) +{ +   struct mem_block *p; +   const int mask = (1 << align2)-1; +   int startofs = 0; +   int endofs; + +   if (!heap || align2 < 0 || size <= 0) +      return NULL; + +   for (p = heap->next_free; p != heap; p = p->next_free) { +      assert(p->free); + +      startofs = (p->ofs + mask) & ~mask; +      if ( startofs < startSearch ) { +	 startofs = startSearch; +      } +      endofs = startofs+size; +      if (endofs <= (p->ofs+p->size)) +	 break; +   } + +   if (p == heap)  +      return NULL; + +   assert(p->free); +   p = SliceBlock(p,startofs,size,0,mask+1); + +   return p; +} + + +struct mem_block * +mmFindBlock(struct mem_block *heap, int start) +{ +   struct mem_block *p; + +   for (p = heap->next; p != heap; p = p->next) { +      if (p->ofs == start)  +	 return p; +   } + +   return NULL; +} + + +static int +Join2Blocks(struct mem_block *p) +{ +   /* XXX there should be some assertions here */ + +   /* NOTE: heap->free == 0 */ + +   if (p->free && p->next->free) { +      struct mem_block *q = p->next; + +      assert(p->ofs + p->size == q->ofs); +      p->size += q->size; + +      p->next = q->next; +      q->next->prev = p; + +      q->next_free->prev_free = q->prev_free;  +      q->prev_free->next_free = q->next_free; +      +      free(q); +      return 1; +   } +   return 0; +} + +int +mmFreeMem(struct mem_block *b) +{ +   if (!b) +      return 0; + +   if (b->free) { +      drmMsg("block already free\n"); +      return -1; +   } +   if (b->reserved) { +      drmMsg("block is reserved\n"); +      return -1; +   } + +   b->free = 1; +   b->next_free = b->heap->next_free; +   b->prev_free = b->heap; +   b->next_free->prev_free = b; +   b->prev_free->next_free = b; + +   Join2Blocks(b); +   if (b->prev != b->heap) +      Join2Blocks(b->prev); + +   return 0; +} + + +void +mmDestroy(struct mem_block *heap) +{ +   struct mem_block *p; + +   if (!heap) +      return; + +   for (p = heap->next; p != heap; ) { +      struct mem_block *next = p->next; +      free(p); +      p = next; +   } + +   free(heap); +} diff --git a/libdrm/intel/mm.h b/libdrm/intel/mm.h new file mode 100644 index 00000000..49e3eecc --- /dev/null +++ b/libdrm/intel/mm.h @@ -0,0 +1,96 @@ +/* + * GLX Hardware Device Driver common code + * Copyright (C) 1999 Wittawat Yamwong + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * KEITH WHITWELL, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,  + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR  + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE  + * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + + +/** + * Memory manager code.  Primarily used by device drivers to manage texture + * heaps, etc. + */ + + +#ifndef MM_H +#define MM_H + +struct mem_block { +   struct mem_block *next, *prev; +   struct mem_block *next_free, *prev_free; +   struct mem_block *heap; +   int ofs,size; +   unsigned int free:1; +   unsigned int reserved:1; +}; + +/* Rename the variables in the drm copy of this code so that it doesn't + * conflict with mesa or whoever else has copied it around. + */ +#define mmInit drm_mmInit +#define mmAllocMem drm_mmAllocMem +#define mmFreeMem drm_mmFreeMem +#define mmFindBlock drm_mmFindBlock +#define mmDestroy drm_mmDestroy +#define mmDumpMemInfo drm_mmDumpMemInfo + +/**  + * input: total size in bytes + * return: a heap pointer if OK, NULL if error + */ +extern struct mem_block *mmInit(int ofs, int size); + +/** + * Allocate 'size' bytes with 2^align2 bytes alignment, + * restrict the search to free memory after 'startSearch' + * depth and back buffers should be in different 4mb banks + * to get better page hits if possible + * input:	size = size of block + *       	align2 = 2^align2 bytes alignment + *		startSearch = linear offset from start of heap to begin search + * return: pointer to the allocated block, 0 if error + */ +extern struct mem_block *mmAllocMem(struct mem_block *heap, int size, +				       int align2, int startSearch); + +/** + * Free block starts at offset + * input: pointer to a block + * return: 0 if OK, -1 if error + */ +extern int mmFreeMem(struct mem_block *b); + +/** + * Free block starts at offset + * input: pointer to a heap, start offset + * return: pointer to a block + */ +extern struct mem_block *mmFindBlock(struct mem_block *heap, int start); + +/** + * destroy MM + */ +extern void mmDestroy(struct mem_block *mmInit); + +/** + * For debuging purpose. + */ +extern void mmDumpMemInfo(const struct mem_block *mmInit); + +#endif diff --git a/libdrm/libdrm_lists.h b/libdrm/libdrm_lists.h new file mode 100644 index 00000000..8e23991f --- /dev/null +++ b/libdrm/libdrm_lists.h @@ -0,0 +1,87 @@ +/************************************************************************** + * + * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + */ + +/* + * List macros heavily inspired by the Linux kernel + * list handling. No list looping yet. + */ + +typedef struct _drmMMListHead +{ +    struct _drmMMListHead *prev; +    struct _drmMMListHead *next; +} drmMMListHead; + +#define DRMINITLISTHEAD(__item)		       \ +  do{					       \ +    (__item)->prev = (__item);		       \ +    (__item)->next = (__item);		       \ +  } while (0) + +#define DRMLISTADD(__item, __list)		\ +  do {						\ +    (__item)->prev = (__list);			\ +    (__item)->next = (__list)->next;		\ +    (__list)->next->prev = (__item);		\ +    (__list)->next = (__item);			\ +  } while (0) + +#define DRMLISTADDTAIL(__item, __list)		\ +  do {						\ +    (__item)->next = (__list);			\ +    (__item)->prev = (__list)->prev;		\ +    (__list)->prev->next = (__item);		\ +    (__list)->prev = (__item);			\ +  } while(0) + +#define DRMLISTDEL(__item)			\ +  do {						\ +    (__item)->prev->next = (__item)->next;	\ +    (__item)->next->prev = (__item)->prev;	\ +  } while(0) + +#define DRMLISTDELINIT(__item)			\ +  do {						\ +    (__item)->prev->next = (__item)->next;	\ +    (__item)->next->prev = (__item)->prev;	\ +    (__item)->next = (__item);			\ +    (__item)->prev = (__item);			\ +  } while(0) + +#define DRMLISTENTRY(__type, __item, __field)   \ +    ((__type *)(((char *) (__item)) - offsetof(__type, __field))) + +#define DRMLISTEMPTY(__item) ((__item)->next == (__item)) + +#define DRMLISTFOREACHSAFE(__item, __temp, __list)			\ +	for ((__item) = (__list)->next, (__temp) = (__item)->next;	\ +	     (__item) != (__list);					\ +	     (__item) = (__temp), (__temp) = (__item)->next) + +#define DRMLISTFOREACHSAFEREVERSE(__item, __temp, __list)		\ +	for ((__item) = (__list)->prev, (__temp) = (__item)->prev;	\ +	     (__item) != (__list);					\ +	     (__item) = (__temp), (__temp) = (__item)->prev) diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c index 7b678138..64765339 100644 --- a/libdrm/xf86drm.c +++ b/libdrm/xf86drm.c @@ -113,7 +113,7 @@ static int drmDebugPrint(const char *format, va_list ap)  static int (*drm_debug_print)(const char *format, va_list ap) = drmDebugPrint; -static void +void  drmMsg(const char *format, ...)  {      va_list	ap; @@ -174,6 +174,19 @@ static char *drmStrdup(const char *s)      return retval;  } +/** + * Call ioctl, restarting if it is interupted + */ +static int +drmIoctl(int fd, int request, void *arg) +{ +    int	ret; + +    do { +	ret = ioctl(fd, request, arg); +    } while (ret == -1 && (errno == EINTR || errno == EAGAIN)); +    return ret; +}  static unsigned long drmGetKeyFromFd(int fd)  { @@ -675,7 +688,7 @@ drmVersionPtr drmGetVersion(int fd)      version->desc_len    = 0;      version->desc        = NULL; -    if (ioctl(fd, DRM_IOCTL_VERSION, version)) { +    if (drmIoctl(fd, DRM_IOCTL_VERSION, version)) {  	drmFreeKernelVersion(version);  	return NULL;      } @@ -687,7 +700,7 @@ drmVersionPtr drmGetVersion(int fd)      if (version->desc_len)  	version->desc    = drmMalloc(version->desc_len + 1); -    if (ioctl(fd, DRM_IOCTL_VERSION, version)) { +    if (drmIoctl(fd, DRM_IOCTL_VERSION, version)) {  	drmMsg("DRM_IOCTL_VERSION: %s\n", strerror(errno));  	drmFreeKernelVersion(version);  	return NULL; @@ -773,10 +786,10 @@ char *drmGetBusid(int fd)      u.unique_len = 0;      u.unique     = NULL; -    if (ioctl(fd, DRM_IOCTL_GET_UNIQUE, &u)) +    if (drmIoctl(fd, DRM_IOCTL_GET_UNIQUE, &u))  	return NULL;      u.unique = drmMalloc(u.unique_len + 1); -    if (ioctl(fd, DRM_IOCTL_GET_UNIQUE, &u)) +    if (drmIoctl(fd, DRM_IOCTL_GET_UNIQUE, &u))  	return NULL;      u.unique[u.unique_len] = '\0'; @@ -803,7 +816,7 @@ int drmSetBusid(int fd, const char *busid)      u.unique     = (char *)busid;      u.unique_len = strlen(busid); -    if (ioctl(fd, DRM_IOCTL_SET_UNIQUE, &u)) { +    if (drmIoctl(fd, DRM_IOCTL_SET_UNIQUE, &u)) {  	return -errno;      }      return 0; @@ -814,7 +827,7 @@ int drmGetMagic(int fd, drm_magic_t * magic)      drm_auth_t auth;      *magic = 0; -    if (ioctl(fd, DRM_IOCTL_GET_MAGIC, &auth)) +    if (drmIoctl(fd, DRM_IOCTL_GET_MAGIC, &auth))  	return -errno;      *magic = auth.magic;      return 0; @@ -825,7 +838,7 @@ int drmAuthMagic(int fd, drm_magic_t magic)      drm_auth_t auth;      auth.magic = magic; -    if (ioctl(fd, DRM_IOCTL_AUTH_MAGIC, &auth)) +    if (drmIoctl(fd, DRM_IOCTL_AUTH_MAGIC, &auth))  	return -errno;      return 0;  } @@ -890,7 +903,7 @@ int drmAddMap(int fd, drm_handle_t offset, drmSize size, drmMapType type,      map.handle  = 0;      map.type    = type;      map.flags   = flags; -    if (ioctl(fd, DRM_IOCTL_ADD_MAP, &map)) +    if (drmIoctl(fd, DRM_IOCTL_ADD_MAP, &map))  	return -errno;      if (handle)  	*handle = (drm_handle_t)map.handle; @@ -903,7 +916,7 @@ int drmRmMap(int fd, drm_handle_t handle)      map.handle = (void *)handle; -    if(ioctl(fd, DRM_IOCTL_RM_MAP, &map)) +    if(drmIoctl(fd, DRM_IOCTL_RM_MAP, &map))  	return -errno;      return 0;  } @@ -936,7 +949,7 @@ int drmAddBufs(int fd, int count, int size, drmBufDescFlags flags,      request.flags     = flags;      request.agp_start = agp_offset; -    if (ioctl(fd, DRM_IOCTL_ADD_BUFS, &request)) +    if (drmIoctl(fd, DRM_IOCTL_ADD_BUFS, &request))  	return -errno;      return request.count;  } @@ -949,7 +962,7 @@ int drmMarkBufs(int fd, double low, double high)      info.count = 0;      info.list  = NULL; -    if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info)) +    if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info))  	return -EINVAL;      if (!info.count) @@ -958,7 +971,7 @@ int drmMarkBufs(int fd, double low, double high)      if (!(info.list = drmMalloc(info.count * sizeof(*info.list))))  	return -ENOMEM; -    if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info)) { +    if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info)) {  	int retval = -errno;  	drmFree(info.list);  	return retval; @@ -967,7 +980,7 @@ int drmMarkBufs(int fd, double low, double high)      for (i = 0; i < info.count; i++) {  	info.list[i].low_mark  = low  * info.list[i].count;  	info.list[i].high_mark = high * info.list[i].count; -	if (ioctl(fd, DRM_IOCTL_MARK_BUFS, &info.list[i])) { +	if (drmIoctl(fd, DRM_IOCTL_MARK_BUFS, &info.list[i])) {  	    int retval = -errno;  	    drmFree(info.list);  	    return retval; @@ -999,7 +1012,7 @@ int drmFreeBufs(int fd, int count, int *list)      request.count = count;      request.list  = list; -    if (ioctl(fd, DRM_IOCTL_FREE_BUFS, &request)) +    if (drmIoctl(fd, DRM_IOCTL_FREE_BUFS, &request))  	return -errno;      return 0;  } @@ -1088,14 +1101,14 @@ drmBufInfoPtr drmGetBufInfo(int fd)      info.count = 0;      info.list  = NULL; -    if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info)) +    if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info))  	return NULL;      if (info.count) {  	if (!(info.list = drmMalloc(info.count * sizeof(*info.list))))  	    return NULL; -	if (ioctl(fd, DRM_IOCTL_INFO_BUFS, &info)) { +	if (drmIoctl(fd, DRM_IOCTL_INFO_BUFS, &info)) {  	    drmFree(info.list);  	    return NULL;  	} @@ -1139,7 +1152,7 @@ drmBufMapPtr drmMapBufs(int fd)      bufs.count = 0;      bufs.list  = NULL;      bufs.virtual = NULL; -    if (ioctl(fd, DRM_IOCTL_MAP_BUFS, &bufs)) +    if (drmIoctl(fd, DRM_IOCTL_MAP_BUFS, &bufs))  	return NULL;      if (!bufs.count) @@ -1148,7 +1161,7 @@ drmBufMapPtr drmMapBufs(int fd)  	if (!(bufs.list = drmMalloc(bufs.count * sizeof(*bufs.list))))  	    return NULL; -	if (ioctl(fd, DRM_IOCTL_MAP_BUFS, &bufs)) { +	if (drmIoctl(fd, DRM_IOCTL_MAP_BUFS, &bufs)) {  	    drmFree(bufs.list);  	    return NULL;  	} @@ -1263,7 +1276,7 @@ int drmGetLock(int fd, drm_context_t context, drmLockFlags flags)      if (flags & DRM_HALT_ALL_QUEUES) lock.flags |= _DRM_HALT_ALL_QUEUES;      if (flags & DRM_HALT_CUR_QUEUES) lock.flags |= _DRM_HALT_CUR_QUEUES; -    while (ioctl(fd, DRM_IOCTL_LOCK, &lock)) +    while (drmIoctl(fd, DRM_IOCTL_LOCK, &lock))  	;      return 0;  } @@ -1286,7 +1299,7 @@ int drmUnlock(int fd, drm_context_t context)      lock.context = context;      lock.flags   = 0; -    return ioctl(fd, DRM_IOCTL_UNLOCK, &lock); +    return drmIoctl(fd, DRM_IOCTL_UNLOCK, &lock);  }  drm_context_t *drmGetReservedContextList(int fd, int *count) @@ -1298,7 +1311,7 @@ drm_context_t *drmGetReservedContextList(int fd, int *count)      res.count    = 0;      res.contexts = NULL; -    if (ioctl(fd, DRM_IOCTL_RES_CTX, &res)) +    if (drmIoctl(fd, DRM_IOCTL_RES_CTX, &res))  	return NULL;      if (!res.count) @@ -1312,7 +1325,7 @@ drm_context_t *drmGetReservedContextList(int fd, int *count)      }      res.contexts = list; -    if (ioctl(fd, DRM_IOCTL_RES_CTX, &res)) +    if (drmIoctl(fd, DRM_IOCTL_RES_CTX, &res))  	return NULL;      for (i = 0; i < res.count; i++) @@ -1351,7 +1364,7 @@ int drmCreateContext(int fd, drm_context_t *handle)      drm_ctx_t ctx;      ctx.flags = 0;	/* Modified with functions below */ -    if (ioctl(fd, DRM_IOCTL_ADD_CTX, &ctx)) +    if (drmIoctl(fd, DRM_IOCTL_ADD_CTX, &ctx))  	return -errno;      *handle = ctx.handle;      return 0; @@ -1362,7 +1375,7 @@ int drmSwitchToContext(int fd, drm_context_t context)      drm_ctx_t ctx;      ctx.handle = context; -    if (ioctl(fd, DRM_IOCTL_SWITCH_CTX, &ctx)) +    if (drmIoctl(fd, DRM_IOCTL_SWITCH_CTX, &ctx))  	return -errno;      return 0;  } @@ -1383,7 +1396,7 @@ int drmSetContextFlags(int fd, drm_context_t context, drm_context_tFlags flags)  	ctx.flags |= _DRM_CONTEXT_PRESERVED;      if (flags & DRM_CONTEXT_2DONLY)  	ctx.flags |= _DRM_CONTEXT_2DONLY; -    if (ioctl(fd, DRM_IOCTL_MOD_CTX, &ctx)) +    if (drmIoctl(fd, DRM_IOCTL_MOD_CTX, &ctx))  	return -errno;      return 0;  } @@ -1394,7 +1407,7 @@ int drmGetContextFlags(int fd, drm_context_t context,      drm_ctx_t ctx;      ctx.handle = context; -    if (ioctl(fd, DRM_IOCTL_GET_CTX, &ctx)) +    if (drmIoctl(fd, DRM_IOCTL_GET_CTX, &ctx))  	return -errno;      *flags = 0;      if (ctx.flags & _DRM_CONTEXT_PRESERVED) @@ -1425,7 +1438,7 @@ int drmDestroyContext(int fd, drm_context_t handle)  {      drm_ctx_t ctx;      ctx.handle = handle; -    if (ioctl(fd, DRM_IOCTL_RM_CTX, &ctx)) +    if (drmIoctl(fd, DRM_IOCTL_RM_CTX, &ctx))  	return -errno;      return 0;  } @@ -1433,7 +1446,7 @@ int drmDestroyContext(int fd, drm_context_t handle)  int drmCreateDrawable(int fd, drm_drawable_t *handle)  {      drm_draw_t draw; -    if (ioctl(fd, DRM_IOCTL_ADD_DRAW, &draw)) +    if (drmIoctl(fd, DRM_IOCTL_ADD_DRAW, &draw))  	return -errno;      *handle = draw.handle;      return 0; @@ -1443,7 +1456,7 @@ int drmDestroyDrawable(int fd, drm_drawable_t handle)  {      drm_draw_t draw;      draw.handle = handle; -    if (ioctl(fd, DRM_IOCTL_RM_DRAW, &draw)) +    if (drmIoctl(fd, DRM_IOCTL_RM_DRAW, &draw))  	return -errno;      return 0;  } @@ -1459,7 +1472,7 @@ int drmUpdateDrawableInfo(int fd, drm_drawable_t handle,      update.num = num;      update.data = (unsigned long long)(unsigned long)data; -    if (ioctl(fd, DRM_IOCTL_UPDATE_DRAW, &update)) +    if (drmIoctl(fd, DRM_IOCTL_UPDATE_DRAW, &update))  	return -errno;      return 0; @@ -1479,7 +1492,7 @@ int drmUpdateDrawableInfo(int fd, drm_drawable_t handle,   */  int drmAgpAcquire(int fd)  { -    if (ioctl(fd, DRM_IOCTL_AGP_ACQUIRE, NULL)) +    if (drmIoctl(fd, DRM_IOCTL_AGP_ACQUIRE, NULL))  	return -errno;      return 0;  } @@ -1497,7 +1510,7 @@ int drmAgpAcquire(int fd)   */  int drmAgpRelease(int fd)  { -    if (ioctl(fd, DRM_IOCTL_AGP_RELEASE, NULL)) +    if (drmIoctl(fd, DRM_IOCTL_AGP_RELEASE, NULL))  	return -errno;      return 0;  } @@ -1520,7 +1533,7 @@ int drmAgpEnable(int fd, unsigned long mode)      drm_agp_mode_t m;      m.mode = mode; -    if (ioctl(fd, DRM_IOCTL_AGP_ENABLE, &m)) +    if (drmIoctl(fd, DRM_IOCTL_AGP_ENABLE, &m))  	return -errno;      return 0;  } @@ -1551,7 +1564,7 @@ int drmAgpAlloc(int fd, unsigned long size, unsigned long type,      b.size   = size;      b.handle = 0;      b.type   = type; -    if (ioctl(fd, DRM_IOCTL_AGP_ALLOC, &b)) +    if (drmIoctl(fd, DRM_IOCTL_AGP_ALLOC, &b))  	return -errno;      if (address != 0UL)  	*address = b.physical; @@ -1578,7 +1591,7 @@ int drmAgpFree(int fd, drm_handle_t handle)      b.size   = 0;      b.handle = handle; -    if (ioctl(fd, DRM_IOCTL_AGP_FREE, &b)) +    if (drmIoctl(fd, DRM_IOCTL_AGP_FREE, &b))  	return -errno;      return 0;  } @@ -1603,7 +1616,7 @@ int drmAgpBind(int fd, drm_handle_t handle, unsigned long offset)      b.handle = handle;      b.offset = offset; -    if (ioctl(fd, DRM_IOCTL_AGP_BIND, &b)) +    if (drmIoctl(fd, DRM_IOCTL_AGP_BIND, &b))  	return -errno;      return 0;  } @@ -1627,7 +1640,7 @@ int drmAgpUnbind(int fd, drm_handle_t handle)      b.handle = handle;      b.offset = 0; -    if (ioctl(fd, DRM_IOCTL_AGP_UNBIND, &b)) +    if (drmIoctl(fd, DRM_IOCTL_AGP_UNBIND, &b))  	return -errno;      return 0;  } @@ -1648,7 +1661,7 @@ int drmAgpVersionMajor(int fd)  {      drm_agp_info_t i; -    if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) +    if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))  	return -errno;      return i.agp_version_major;  } @@ -1669,7 +1682,7 @@ int drmAgpVersionMinor(int fd)  {      drm_agp_info_t i; -    if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) +    if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))  	return -errno;      return i.agp_version_minor;  } @@ -1690,7 +1703,7 @@ unsigned long drmAgpGetMode(int fd)  {      drm_agp_info_t i; -    if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) +    if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))  	return 0;      return i.mode;  } @@ -1711,7 +1724,7 @@ unsigned long drmAgpBase(int fd)  {      drm_agp_info_t i; -    if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) +    if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))  	return 0;      return i.aperture_base;  } @@ -1732,7 +1745,7 @@ unsigned long drmAgpSize(int fd)  {      drm_agp_info_t i; -    if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) +    if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))  	return 0;      return i.aperture_size;  } @@ -1753,7 +1766,7 @@ unsigned long drmAgpMemoryUsed(int fd)  {      drm_agp_info_t i; -    if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) +    if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))  	return 0;      return i.memory_used;  } @@ -1774,7 +1787,7 @@ unsigned long drmAgpMemoryAvail(int fd)  {      drm_agp_info_t i; -    if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) +    if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))  	return 0;      return i.memory_allowed;  } @@ -1795,7 +1808,7 @@ unsigned int drmAgpVendorId(int fd)  {      drm_agp_info_t i; -    if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) +    if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))  	return 0;      return i.id_vendor;  } @@ -1816,7 +1829,7 @@ unsigned int drmAgpDeviceId(int fd)  {      drm_agp_info_t i; -    if (ioctl(fd, DRM_IOCTL_AGP_INFO, &i)) +    if (drmIoctl(fd, DRM_IOCTL_AGP_INFO, &i))  	return 0;      return i.id_device;  } @@ -1828,7 +1841,7 @@ int drmScatterGatherAlloc(int fd, unsigned long size, drm_handle_t *handle)      *handle = 0;      sg.size   = size;      sg.handle = 0; -    if (ioctl(fd, DRM_IOCTL_SG_ALLOC, &sg)) +    if (drmIoctl(fd, DRM_IOCTL_SG_ALLOC, &sg))  	return -errno;      *handle = sg.handle;      return 0; @@ -1840,7 +1853,7 @@ int drmScatterGatherFree(int fd, drm_handle_t handle)      sg.size   = 0;      sg.handle = handle; -    if (ioctl(fd, DRM_IOCTL_SG_FREE, &sg)) +    if (drmIoctl(fd, DRM_IOCTL_SG_FREE, &sg))  	return -errno;      return 0;  } @@ -1861,7 +1874,7 @@ int drmWaitVBlank(int fd, drmVBlankPtr vbl)      int ret;      do { -       ret = ioctl(fd, DRM_IOCTL_WAIT_VBLANK, vbl); +       ret = drmIoctl(fd, DRM_IOCTL_WAIT_VBLANK, vbl);         vbl->request.type &= ~DRM_VBLANK_RELATIVE;      } while (ret && errno == EINTR); @@ -1911,7 +1924,7 @@ int drmCtlInstHandler(int fd, int irq)      ctl.func  = DRM_INST_HANDLER;      ctl.irq   = irq; -    if (ioctl(fd, DRM_IOCTL_CONTROL, &ctl)) +    if (drmIoctl(fd, DRM_IOCTL_CONTROL, &ctl))  	return -errno;      return 0;  } @@ -1934,7 +1947,7 @@ int drmCtlUninstHandler(int fd)      ctl.func  = DRM_UNINST_HANDLER;      ctl.irq   = 0; -    if (ioctl(fd, DRM_IOCTL_CONTROL, &ctl)) +    if (drmIoctl(fd, DRM_IOCTL_CONTROL, &ctl))  	return -errno;      return 0;  } @@ -1951,7 +1964,7 @@ int drmFinish(int fd, int context, drmLockFlags flags)      if (flags & DRM_LOCK_FLUSH_ALL)  lock.flags |= _DRM_LOCK_FLUSH_ALL;      if (flags & DRM_HALT_ALL_QUEUES) lock.flags |= _DRM_HALT_ALL_QUEUES;      if (flags & DRM_HALT_CUR_QUEUES) lock.flags |= _DRM_HALT_CUR_QUEUES; -    if (ioctl(fd, DRM_IOCTL_FINISH, &lock)) +    if (drmIoctl(fd, DRM_IOCTL_FINISH, &lock))  	return -errno;      return 0;  } @@ -1977,7 +1990,7 @@ int drmGetInterruptFromBusID(int fd, int busnum, int devnum, int funcnum)      p.busnum  = busnum;      p.devnum  = devnum;      p.funcnum = funcnum; -    if (ioctl(fd, DRM_IOCTL_IRQ_BUSID, &p)) +    if (drmIoctl(fd, DRM_IOCTL_IRQ_BUSID, &p))  	return -errno;      return p.irq;  } @@ -2019,7 +2032,7 @@ int drmAddContextPrivateMapping(int fd, drm_context_t ctx_id,      map.ctx_id = ctx_id;      map.handle = (void *)handle; -    if (ioctl(fd, DRM_IOCTL_SET_SAREA_CTX, &map)) +    if (drmIoctl(fd, DRM_IOCTL_SET_SAREA_CTX, &map))  	return -errno;      return 0;  } @@ -2031,7 +2044,7 @@ int drmGetContextPrivateMapping(int fd, drm_context_t ctx_id,      map.ctx_id = ctx_id; -    if (ioctl(fd, DRM_IOCTL_GET_SAREA_CTX, &map)) +    if (drmIoctl(fd, DRM_IOCTL_GET_SAREA_CTX, &map))  	return -errno;      if (handle)  	*handle = (drm_handle_t)map.handle; @@ -2046,7 +2059,7 @@ int drmGetMap(int fd, int idx, drm_handle_t *offset, drmSize *size,      drm_map_t map;      map.offset = idx; -    if (ioctl(fd, DRM_IOCTL_GET_MAP, &map)) +    if (drmIoctl(fd, DRM_IOCTL_GET_MAP, &map))  	return -errno;      *offset = map.offset;      *size   = map.size; @@ -2063,7 +2076,7 @@ int drmGetClient(int fd, int idx, int *auth, int *pid, int *uid,      drm_client_t client;      client.idx = idx; -    if (ioctl(fd, DRM_IOCTL_GET_CLIENT, &client)) +    if (drmIoctl(fd, DRM_IOCTL_GET_CLIENT, &client))  	return -errno;      *auth      = client.auth;      *pid       = client.pid; @@ -2078,7 +2091,7 @@ int drmGetStats(int fd, drmStatsT *stats)      drm_stats_t s;      int         i; -    if (ioctl(fd, DRM_IOCTL_GET_STATS, &s)) +    if (drmIoctl(fd, DRM_IOCTL_GET_STATS, &s))  	return -errno;      stats->count = 0; @@ -2220,7 +2233,7 @@ int drmSetInterfaceVersion(int fd, drmSetVersion *version)      sv.drm_dd_major = version->drm_dd_major;      sv.drm_dd_minor = version->drm_dd_minor; -    if (ioctl(fd, DRM_IOCTL_SET_VERSION, &sv)) { +    if (drmIoctl(fd, DRM_IOCTL_SET_VERSION, &sv)) {  	retcode = -errno;      } @@ -2251,7 +2264,7 @@ int drmCommandNone(int fd, unsigned long drmCommandIndex)      request = DRM_IO( DRM_COMMAND_BASE + drmCommandIndex); -    if (ioctl(fd, request, data)) { +    if (drmIoctl(fd, request, data)) {  	return -errno;      }      return 0; @@ -2280,7 +2293,7 @@ int drmCommandRead(int fd, unsigned long drmCommandIndex, void *data,      request = DRM_IOC( DRM_IOC_READ, DRM_IOCTL_BASE,   	DRM_COMMAND_BASE + drmCommandIndex, size); -    if (ioctl(fd, request, data)) { +    if (drmIoctl(fd, request, data)) {  	return -errno;      }      return 0; @@ -2309,7 +2322,7 @@ int drmCommandWrite(int fd, unsigned long drmCommandIndex, void *data,      request = DRM_IOC( DRM_IOC_WRITE, DRM_IOCTL_BASE,   	DRM_COMMAND_BASE + drmCommandIndex, size); -    if (ioctl(fd, request, data)) { +    if (drmIoctl(fd, request, data)) {  	return -errno;      }      return 0; @@ -2338,585 +2351,11 @@ int drmCommandWriteRead(int fd, unsigned long drmCommandIndex, void *data,      request = DRM_IOC( DRM_IOC_READ|DRM_IOC_WRITE, DRM_IOCTL_BASE,   	DRM_COMMAND_BASE + drmCommandIndex, size); -    if (ioctl(fd, request, data)) { -	return -errno; -    } -    return 0; -} - - -/* - * Valid flags are  - * DRM_FENCE_FLAG_EMIT - * DRM_FENCE_FLAG_SHAREABLE - * DRM_FENCE_MASK_DRIVER - */ - -int drmFenceCreate(int fd, unsigned flags, int fence_class, unsigned type, -		   drmFence *fence) -{ -    drm_fence_arg_t arg; - -    memset(&arg, 0, sizeof(arg)); -    arg.flags = flags; -    arg.type = type; -    arg.fence_class = fence_class; - -    if (ioctl(fd, DRM_IOCTL_FENCE_CREATE, &arg)) -	return -errno; -    fence->handle = arg.handle; -    fence->fence_class = arg.fence_class; -    fence->type = arg.type; -    fence->flags = arg.flags; -    fence->signaled = 0; -    return 0; -} - -/* - * Valid flags are  - * DRM_FENCE_FLAG_SHAREABLE - * DRM_FENCE_MASK_DRIVER - */ - -int drmFenceBuffers(int fd, unsigned flags, uint32_t fence_class, drmFence *fence) -{ -    drm_fence_arg_t arg; - -    memset(&arg, 0, sizeof(arg)); -    arg.flags = flags; -    arg.fence_class = fence_class; - -    if (ioctl(fd, DRM_IOCTL_FENCE_BUFFERS, &arg)) -	return -errno; -    fence->handle = arg.handle; -    fence->fence_class = arg.fence_class; -    fence->type = arg.type; -    fence->flags = arg.flags; -    fence->sequence = arg.sequence; -    fence->signaled = 0; -    return 0; -} - -int drmFenceReference(int fd, unsigned handle, drmFence *fence) -{ -    drm_fence_arg_t arg; - -    memset(&arg, 0, sizeof(arg)); -    arg.handle = handle; - -    if (ioctl(fd, DRM_IOCTL_FENCE_REFERENCE, &arg)) -	return -errno; -    fence->handle = arg.handle; -    fence->fence_class = arg.fence_class; -    fence->type = arg.type; -    fence->flags = arg.flags; -    fence->signaled = arg.signaled; -    return 0; -} - -int drmFenceUnreference(int fd, const drmFence *fence) -{ -    drm_fence_arg_t arg; - -    memset(&arg, 0, sizeof(arg)); -    arg.handle = fence->handle; - -    if (ioctl(fd, DRM_IOCTL_FENCE_UNREFERENCE, &arg)) -	return -errno; -    return 0; -} - -int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type) -{ -    drm_fence_arg_t arg; - -    memset(&arg, 0, sizeof(arg)); -    arg.handle = fence->handle; -    arg.type = flush_type; - -    if (ioctl(fd, DRM_IOCTL_FENCE_FLUSH, &arg)) -	return -errno; -    fence->fence_class = arg.fence_class; -    fence->type = arg.type; -    fence->signaled = arg.signaled; -    return arg.error; -} - -int drmFenceUpdate(int fd, drmFence *fence) -{ -    drm_fence_arg_t arg; - -    memset(&arg, 0, sizeof(arg)); -    arg.handle = fence->handle; - -    if (ioctl(fd, DRM_IOCTL_FENCE_SIGNALED, &arg)) -	return -errno; -    fence->fence_class = arg.fence_class; -    fence->type = arg.type; -    fence->signaled = arg.signaled; -    return 0; -} - -int drmFenceSignaled(int fd, drmFence *fence, unsigned fenceType,  -		     int *signaled) -{ -    if ((fence->flags & DRM_FENCE_FLAG_SHAREABLE) || -	((fenceType & fence->signaled) != fenceType)) { -	int ret = drmFenceFlush(fd, fence, fenceType); -	if (ret) -	    return ret; -    } - -    *signaled = ((fenceType & fence->signaled) == fenceType); - -    return 0; -} - -/* - * Valid flags are  - * DRM_FENCE_FLAG_SHAREABLE - * DRM_FENCE_MASK_DRIVER - */ - - -int drmFenceEmit(int fd, unsigned flags, drmFence *fence, unsigned emit_type) -{ -    drm_fence_arg_t arg; - -    memset(&arg, 0, sizeof(arg)); -    arg.fence_class = fence->fence_class; -    arg.flags = flags; -    arg.handle = fence->handle; -    arg.type = emit_type; - -    if (ioctl(fd, DRM_IOCTL_FENCE_EMIT, &arg)) -	return -errno; -    fence->fence_class = arg.fence_class; -    fence->type = arg.type; -    fence->signaled = arg.signaled; -    fence->sequence = arg.sequence; -    return 0; -} - -/* - * Valid flags are  - * DRM_FENCE_FLAG_WAIT_LAZY - * DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS - */ - -#define DRM_IOCTL_TIMEOUT_USEC 3000000UL - -static unsigned long -drmTimeDiff(struct timeval *now, struct timeval *then) -{ -    uint64_t val; - -    val = now->tv_sec - then->tv_sec; -    val *= 1000000LL; -    val += now->tv_usec; -    val -= then->tv_usec; - -    return (unsigned long) val; -} - -static int -drmIoctlTimeout(int fd, unsigned long request, void *argp) -{ -    int haveThen = 0; -    struct timeval then, now; -    int ret; - -    do { -	ret = ioctl(fd, request, argp); -	if (ret != 0 && errno == EAGAIN) { -	    if (!haveThen) { -		gettimeofday(&then, NULL); -		haveThen = 1; -	    } -	    gettimeofday(&now, NULL); -	} -    } while (ret != 0 && errno == EAGAIN &&  -	     drmTimeDiff(&now, &then) < DRM_IOCTL_TIMEOUT_USEC); -     -    if (ret != 0) -	return ((errno == EAGAIN) ? -EBUSY : -errno); - -    return 0; -} -     -	 - - -int drmFenceWait(int fd, unsigned flags, drmFence *fence, unsigned flush_type) -{ -    drm_fence_arg_t arg; -    int ret; - -    if (flush_type == 0) { -	flush_type = fence->type; -    } - -    if (!(fence->flags & DRM_FENCE_FLAG_SHAREABLE)) { -	if ((flush_type & fence->signaled) == flush_type) { -	    return 0; -	} -    } - -    memset(&arg, 0, sizeof(arg)); -    arg.handle = fence->handle; -    arg.type = flush_type; -    arg.flags = flags; - - -    ret = drmIoctlTimeout(fd, DRM_IOCTL_FENCE_WAIT, &arg); -    if (ret) -	return ret; - -    fence->fence_class = arg.fence_class; -    fence->type = arg.type; -    fence->signaled = arg.signaled; -    return arg.error; -}     - -static void drmBOCopyReply(const struct drm_bo_info_rep *rep, drmBO *buf) -{ -    buf->handle = rep->handle; -    buf->flags = rep->flags; -    buf->size = rep->size; -    buf->offset = rep->offset; -    buf->mapHandle = rep->arg_handle; -    buf->proposedFlags = rep->proposed_flags; -    buf->start = rep->buffer_start; -    buf->fenceFlags = rep->fence_flags; -    buf->replyFlags = rep->rep_flags; -    buf->pageAlignment = rep->page_alignment; -    buf->tileInfo = rep->tile_info; -    buf->hwTileStride = rep->hw_tile_stride; -    buf->desiredTileStride = rep->desired_tile_stride; -} - - - -int drmBOCreate(int fd, unsigned long size, -		unsigned pageAlignment, void *user_buffer, -		uint64_t flags, -		unsigned hint, drmBO *buf) -{ -    struct drm_bo_create_arg arg; -    struct drm_bo_create_req *req = &arg.d.req; -    struct drm_bo_info_rep *rep = &arg.d.rep; -    int ret; - -    memset(buf, 0, sizeof(*buf)); -    memset(&arg, 0, sizeof(arg)); -    req->flags = flags; -    req->hint = hint; -    req->size = size; -    req->page_alignment = pageAlignment; -    req->buffer_start = (unsigned long) user_buffer; - -    buf->virtual = NULL; - -    ret = drmIoctlTimeout(fd, DRM_IOCTL_BO_CREATE, &arg); -    if (ret) -	return ret; - -    drmBOCopyReply(rep, buf); -    buf->virtual = user_buffer; -    buf->mapCount = 0; - -    return 0; -} - -int drmBOReference(int fd, unsigned handle, drmBO *buf) -{ -    struct drm_bo_reference_info_arg arg; -    struct drm_bo_handle_arg *req = &arg.d.req; -    struct drm_bo_info_rep *rep = &arg.d.rep; -     -    memset(&arg, 0, sizeof(arg)); -    req->handle = handle; -     -    if (ioctl(fd, DRM_IOCTL_BO_REFERENCE, &arg)) +    if (drmIoctl(fd, request, data))  	return -errno; - -    drmBOCopyReply(rep, buf); -    buf->mapVirtual = NULL; -    buf->mapCount = 0; -    buf->virtual = NULL; -      return 0;  } -int drmBOUnreference(int fd, drmBO *buf) -{ -    struct drm_bo_handle_arg arg; - -    if (buf->mapVirtual && buf->mapHandle) { -	(void) munmap(buf->mapVirtual, buf->start + buf->size); -	buf->mapVirtual = NULL; -	buf->virtual = NULL; -    } - -    memset(&arg, 0, sizeof(arg)); -    arg.handle = buf->handle; - -    if (ioctl(fd, DRM_IOCTL_BO_UNREFERENCE, &arg)) -	return -errno; - -    buf->handle = 0; -    return 0; -}    - - -/* - * Flags can be  DRM_BO_FLAG_READ, DRM_BO_FLAG_WRITE or'ed together - * Hint currently be DRM_BO_HINT_DONT_BLOCK, which makes the - * call return an -EBUSY if it can' immediately honor the mapping request. - */ - -int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint, -	     void **address) -{ -    struct drm_bo_map_wait_idle_arg arg; -    struct drm_bo_info_req *req = &arg.d.req; -    struct drm_bo_info_rep *rep = &arg.d.rep; -    int ret = 0; - -    /* -     * Make sure we have a virtual address of the buffer. -     */ - -    if (!buf->virtual) { -	drmAddress virtual; -	virtual = mmap(0, buf->size + buf->start,  -		       PROT_READ | PROT_WRITE, MAP_SHARED, -		       fd, buf->mapHandle); -	if (virtual == MAP_FAILED) { -	    ret = -errno; -	} -	if (ret)  -	    return ret; -	buf->mapVirtual = virtual; -	buf->virtual = ((char *) virtual) + buf->start; -    } - -    memset(&arg, 0, sizeof(arg)); -    req->handle = buf->handle; -    req->mask = mapFlags; -    req->hint = mapHint; - -    /* -     * May hang if the buffer object is busy. -     * This IOCTL synchronizes the buffer. -     */ -     -    ret = drmIoctlTimeout(fd, DRM_IOCTL_BO_MAP, &arg); -    if (ret) -	return ret; - -    drmBOCopyReply(rep, buf);	 -    buf->mapFlags = mapFlags; -    ++buf->mapCount; -    *address = buf->virtual; - -    return 0; -} - - -int drmBOUnmap(int fd, drmBO *buf) -{ -    struct drm_bo_handle_arg arg; - -    memset(&arg, 0, sizeof(arg)); -    arg.handle = buf->handle; - -    if (ioctl(fd, DRM_IOCTL_BO_UNMAP, &arg)) { -	return -errno; -    } -    buf->mapCount--; -    return 0; -} - -int drmBOSetStatus(int fd, drmBO *buf,  -		   uint64_t flags, uint64_t mask, -		   unsigned int hint,  -		   unsigned int desired_tile_stride, -		   unsigned int tile_info) -{ - -    struct drm_bo_map_wait_idle_arg arg; -    struct drm_bo_info_req *req = &arg.d.req; -    struct drm_bo_info_rep *rep = &arg.d.rep; -    int ret = 0; - -    memset(&arg, 0, sizeof(arg)); -    req->mask = mask; -    req->flags = flags; -    req->handle = buf->handle; -    req->hint = hint; -    req->desired_tile_stride = desired_tile_stride; -    req->tile_info = tile_info; -     -    ret = drmIoctlTimeout(fd, DRM_IOCTL_BO_SETSTATUS, &arg); -    if (ret)  -	    return ret; - -    drmBOCopyReply(rep, buf); -    return 0; -} -	     - -int drmBOInfo(int fd, drmBO *buf) -{ -    struct drm_bo_reference_info_arg arg; -    struct drm_bo_handle_arg *req = &arg.d.req; -    struct drm_bo_info_rep *rep = &arg.d.rep; -    int ret = 0; - -    memset(&arg, 0, sizeof(arg)); -    req->handle = buf->handle; - -    ret = ioctl(fd, DRM_IOCTL_BO_INFO, &arg); -    if (ret)  -	return -errno; - -    drmBOCopyReply(rep, buf); -    return 0; -} - -int drmBOWaitIdle(int fd, drmBO *buf, unsigned hint) -{ -    struct drm_bo_map_wait_idle_arg arg; -    struct drm_bo_info_req *req = &arg.d.req; -    struct drm_bo_info_rep *rep = &arg.d.rep; -    int ret = 0; - -    if ((buf->flags & DRM_BO_FLAG_SHAREABLE) || -	(buf->replyFlags & DRM_BO_REP_BUSY)) { -        memset(&arg, 0, sizeof(arg)); -	req->handle = buf->handle; -	req->hint = hint; - -	ret = drmIoctlTimeout(fd, DRM_IOCTL_BO_WAIT_IDLE, &arg); -	if (ret)  -	    return ret; - -	drmBOCopyReply(rep, buf); -    } -    return 0; -} - -int drmBOBusy(int fd, drmBO *buf, int *busy) -{ -    int ret = drmBOInfo(fd, buf); - -    if (ret) -	return ret; - -    *busy = (buf->replyFlags & DRM_BO_REP_BUSY); -    return 0; -} - -int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize, -	      unsigned memType) -{ -    struct drm_mm_init_arg arg; - -    memset(&arg, 0, sizeof(arg)); - -    arg.magic = DRM_BO_INIT_MAGIC; -    arg.major = DRM_BO_INIT_MAJOR; -    arg.minor = DRM_BO_INIT_MINOR; -    arg.p_offset = pOffset; -    arg.p_size = pSize; -    arg.mem_type = memType; - -    if (ioctl(fd, DRM_IOCTL_MM_INIT, &arg)) -	return -errno; -    return 0;	 -} - -int drmMMTakedown(int fd, unsigned memType) -{ -    struct drm_mm_type_arg arg; - -    memset(&arg, 0, sizeof(arg)); -    arg.mem_type = memType; - -    if (ioctl(fd, DRM_IOCTL_MM_TAKEDOWN, &arg)) -	return -errno; -    return 0;	 -} - -/* - * If this function returns an error, and lockBM was set to 1, - * the buffer manager is NOT locked. - */ - -int drmMMLock(int fd, unsigned memType, int lockBM, int ignoreNoEvict) -{ -    struct drm_mm_type_arg arg; - -    memset(&arg, 0, sizeof(arg)); -    arg.mem_type = memType; -    arg.lock_flags |= (lockBM) ? DRM_BO_LOCK_UNLOCK_BM : 0; -    arg.lock_flags |= (ignoreNoEvict) ? DRM_BO_LOCK_IGNORE_NO_EVICT : 0; - -    return drmIoctlTimeout(fd, DRM_IOCTL_MM_LOCK, &arg); -} - -int drmMMUnlock(int fd, unsigned memType, int unlockBM) -{ -    struct drm_mm_type_arg arg; - -    memset(&arg, 0, sizeof(arg)); -     -    arg.mem_type = memType; -    arg.lock_flags |= (unlockBM) ? DRM_BO_LOCK_UNLOCK_BM : 0; - -    return drmIoctlTimeout(fd, DRM_IOCTL_MM_UNLOCK, &arg); -} - -int drmMMInfo(int fd, unsigned memType, uint64_t *size) -{ -    struct drm_mm_info_arg arg; - -    memset(&arg, 0, sizeof(arg)); -     -    arg.mem_type = memType; - -    if (ioctl(fd, DRM_IOCTL_MM_INFO, &arg)) -	return -errno; - -    *size = arg.p_size; -    return 0; -} - -int drmBOVersion(int fd, unsigned int *major, -		 unsigned int *minor, -		 unsigned int *patchlevel) -{ -    struct drm_bo_version_arg arg; -    int ret; - -    memset(&arg, 0, sizeof(arg)); -    ret = ioctl(fd, DRM_IOCTL_BO_VERSION, &arg); -    if (ret) -	return -errno; - -    if (major) -	*major = arg.major; -    if (minor) -	*minor = arg.minor; -    if (patchlevel) -	*patchlevel = arg.patchlevel; - -    return 0; -} - - -  #define DRM_MAX_FDS 16  static struct {      char *BusID; diff --git a/libdrm/xf86drm.h b/libdrm/xf86drm.h index 35780aca..584d2a41 100644 --- a/libdrm/xf86drm.h +++ b/libdrm/xf86drm.h @@ -659,10 +659,9 @@ extern int  drmSLLookupNeighbors(void *l, unsigned long key,  extern int drmOpenOnce(void *unused, const char *BusID, int *newlyopened);  extern void drmCloseOnce(int fd); +extern void drmMsg(const char *format, ...);  extern int drmSetMaster(int fd);  extern int drmDropMaster(int fd); -#include "xf86mm.h" -  #endif diff --git a/libdrm/xf86mm.h b/libdrm/xf86mm.h index bb573407..a31de424 100644 --- a/libdrm/xf86mm.h +++ b/libdrm/xf86mm.h @@ -94,6 +94,18 @@ typedef struct _drmMMListHead  #define DRMLISTENTRY(__type, __item, __field)   \      ((__type *)(((char *) (__item)) - offsetof(__type, __field))) +#define DRMLISTEMPTY(__item) ((__item)->next == (__item)) + +#define DRMLISTFOREACHSAFE(__item, __temp, __list)			\ +	for ((__item) = (__list)->next, (__temp) = (__item)->next;	\ +	     (__item) != (__list);					\ +	     (__item) = (__temp), (__temp) = (__item)->next) + +#define DRMLISTFOREACHSAFEREVERSE(__item, __temp, __list)		\ +	for ((__item) = (__list)->prev, (__temp) = (__item)->prev;	\ +	     (__item) != (__list);					\ +	     (__item) = (__temp), (__temp) = (__item)->prev) +  typedef struct _drmFence  {      unsigned handle; | 
