diff options
author | Dave Airlie <airlied@redhat.com> | 2008-07-31 12:54:48 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2008-07-31 12:54:48 +1000 |
commit | 9b8d71b5eb09857b07409731d3de182751f712a2 (patch) | |
tree | 452efcf85dffd27d292f480b536b20f52332f389 /shared-core | |
parent | fb5542aaa87aca9b6b312968abe0a6044812cf0e (diff) |
TTM: remove API and userspace objects.
This removes all the TTM userspace API and all userspace objects.
It also removes the drm_bo_lock.c code
Diffstat (limited to 'shared-core')
-rw-r--r-- | shared-core/drm.h | 343 | ||||
-rw-r--r-- | shared-core/i915_dma.c | 32 | ||||
-rw-r--r-- | shared-core/i915_drm.h | 52 |
3 files changed, 2 insertions, 425 deletions
diff --git a/shared-core/drm.h b/shared-core/drm.h index 7aba2939..76d2c4c5 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -675,324 +675,6 @@ struct drm_set_version { int drm_dd_minor; }; - -#define DRM_FENCE_FLAG_EMIT 0x00000001 -#define DRM_FENCE_FLAG_SHAREABLE 0x00000002 -/** - * On hardware with no interrupt events for operation completion, - * indicates that the kernel should sleep while waiting for any blocking - * operation to complete rather than spinning. - * - * Has no effect otherwise. - */ -#define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004 -#define DRM_FENCE_FLAG_NO_USER 0x00000010 - -/* Reserved for driver use */ -#define DRM_FENCE_MASK_DRIVER 0xFF000000 - -#define DRM_FENCE_TYPE_EXE 0x00000001 - -struct drm_fence_arg { - unsigned int handle; - unsigned int fence_class; - unsigned int type; - unsigned int flags; - unsigned int signaled; - unsigned int error; - unsigned int sequence; - unsigned int pad64; - uint64_t expand_pad[2]; /*Future expansion */ -}; - -/* Buffer permissions, referring to how the GPU uses the buffers. - * these translate to fence types used for the buffers. - * Typically a texture buffer is read, A destination buffer is write and - * a command (batch-) buffer is exe. Can be or-ed together. - */ - -#define DRM_BO_FLAG_READ (1ULL << 0) -#define DRM_BO_FLAG_WRITE (1ULL << 1) -#define DRM_BO_FLAG_EXE (1ULL << 2) - -/* - * All of the bits related to access mode - */ -#define DRM_BO_MASK_ACCESS (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE) -/* - * Status flags. Can be read to determine the actual state of a buffer. - * Can also be set in the buffer mask before validation. - */ - -/* - * Mask: Never evict this buffer. Not even with force. This type of buffer is only - * available to root and must be manually removed before buffer manager shutdown - * or lock. - * Flags: Acknowledge - */ -#define DRM_BO_FLAG_NO_EVICT (1ULL << 4) - -/* - * Mask: Require that the buffer is placed in mappable memory when validated. - * If not set the buffer may or may not be in mappable memory when validated. - * Flags: If set, the buffer is in mappable memory. - */ -#define DRM_BO_FLAG_MAPPABLE (1ULL << 5) - -/* Mask: The buffer should be shareable with other processes. - * Flags: The buffer is shareable with other processes. - */ -#define DRM_BO_FLAG_SHAREABLE (1ULL << 6) - -/* Mask: If set, place the buffer in cache-coherent memory if available. - * If clear, never place the buffer in cache coherent memory if validated. - * Flags: The buffer is currently in cache-coherent memory. - */ -#define DRM_BO_FLAG_CACHED (1ULL << 7) - -/* Mask: Make sure that every time this buffer is validated, - * it ends up on the same location provided that the memory mask is the same. - * The buffer will also not be evicted when claiming space for - * other buffers. Basically a pinned buffer but it may be thrown out as - * part of buffer manager shutdown or locking. - * Flags: Acknowledge. - */ -#define DRM_BO_FLAG_NO_MOVE (1ULL << 8) - -/* Mask: Make sure the buffer is in cached memory when mapped. In conjunction - * with DRM_BO_FLAG_CACHED it also allows the buffer to be bound into the GART - * with unsnooped PTEs instead of snooped, by using chipset-specific cache - * flushing at bind time. A better name might be DRM_BO_FLAG_TT_UNSNOOPED, - * as the eviction to local memory (TTM unbind) on map is just a side effect - * to prevent aggressive cache prefetch from the GPU disturbing the cache - * management that the DRM is doing. - * - * Flags: Acknowledge. - * Buffers allocated with this flag should not be used for suballocators - * This type may have issues on CPUs with over-aggressive caching - * http://marc.info/?l=linux-kernel&m=102376926732464&w=2 - */ -#define DRM_BO_FLAG_CACHED_MAPPED (1ULL << 19) - - -/* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set. - * Flags: Acknowledge. - */ -#define DRM_BO_FLAG_FORCE_CACHING (1ULL << 13) - -/* - * Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear. - * Flags: Acknowledge. - */ -#define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14) -#define DRM_BO_FLAG_TILE (1ULL << 15) - -/* - * Memory type flags that can be or'ed together in the mask, but only - * one appears in flags. - */ - -/* System memory */ -#define DRM_BO_FLAG_MEM_LOCAL (1ULL << 24) -/* Translation table memory */ -#define DRM_BO_FLAG_MEM_TT (1ULL << 25) -/* Vram memory */ -#define DRM_BO_FLAG_MEM_VRAM (1ULL << 26) -/* Up to the driver to define. */ -#define DRM_BO_FLAG_MEM_PRIV0 (1ULL << 27) -#define DRM_BO_FLAG_MEM_PRIV1 (1ULL << 28) -#define DRM_BO_FLAG_MEM_PRIV2 (1ULL << 29) -#define DRM_BO_FLAG_MEM_PRIV3 (1ULL << 30) -#define DRM_BO_FLAG_MEM_PRIV4 (1ULL << 31) -/* We can add more of these now with a 64-bit flag type */ - -/* - * This is a mask covering all of the memory type flags; easier to just - * use a single constant than a bunch of | values. It covers - * DRM_BO_FLAG_MEM_LOCAL through DRM_BO_FLAG_MEM_PRIV4 - */ -#define DRM_BO_MASK_MEM 0x00000000FF000000ULL -/* - * This adds all of the CPU-mapping options in with the memory - * type to label all bits which change how the page gets mapped - */ -#define DRM_BO_MASK_MEMTYPE (DRM_BO_MASK_MEM | \ - DRM_BO_FLAG_CACHED_MAPPED | \ - DRM_BO_FLAG_CACHED | \ - DRM_BO_FLAG_MAPPABLE) - -/* Driver-private flags */ -#define DRM_BO_MASK_DRIVER 0xFFFF000000000000ULL - -/* - * Don't block on validate and map. Instead, return EBUSY. - */ -#define DRM_BO_HINT_DONT_BLOCK 0x00000002 -/* - * Don't place this buffer on the unfenced list. This means - * that the buffer will not end up having a fence associated - * with it as a result of this operation - */ -#define DRM_BO_HINT_DONT_FENCE 0x00000004 -/** - * On hardware with no interrupt events for operation completion, - * indicates that the kernel should sleep while waiting for any blocking - * operation to complete rather than spinning. - * - * Has no effect otherwise. - */ -#define DRM_BO_HINT_WAIT_LAZY 0x00000008 -/* - * The client has compute relocations refering to this buffer using the - * offset in the presumed_offset field. If that offset ends up matching - * where this buffer lands, the kernel is free to skip executing those - * relocations - */ -#define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010 - -#define DRM_BO_INIT_MAGIC 0xfe769812 -#define DRM_BO_INIT_MAJOR 1 -#define DRM_BO_INIT_MINOR 0 -#define DRM_BO_INIT_PATCH 0 - - -struct drm_bo_info_req { - uint64_t mask; - uint64_t flags; - unsigned int handle; - unsigned int hint; - unsigned int fence_class; - unsigned int desired_tile_stride; - unsigned int tile_info; - unsigned int pad64; - uint64_t presumed_offset; -}; - -struct drm_bo_create_req { - uint64_t flags; - uint64_t size; - uint64_t buffer_start; - unsigned int hint; - unsigned int page_alignment; -}; - - -/* - * Reply flags - */ - -#define DRM_BO_REP_BUSY 0x00000001 - -struct drm_bo_info_rep { - uint64_t flags; - uint64_t proposed_flags; - uint64_t size; - uint64_t offset; - uint64_t arg_handle; - uint64_t buffer_start; - unsigned int handle; - unsigned int fence_flags; - unsigned int rep_flags; - unsigned int page_alignment; - unsigned int desired_tile_stride; - unsigned int hw_tile_stride; - unsigned int tile_info; - unsigned int pad64; - uint64_t expand_pad[4]; /*Future expansion */ -}; - -struct drm_bo_arg_rep { - struct drm_bo_info_rep bo_info; - int ret; - unsigned int pad64; -}; - -struct drm_bo_create_arg { - union { - struct drm_bo_create_req req; - struct drm_bo_info_rep rep; - } d; -}; - -struct drm_bo_handle_arg { - unsigned int handle; -}; - -struct drm_bo_reference_info_arg { - union { - struct drm_bo_handle_arg req; - struct drm_bo_info_rep rep; - } d; -}; - -struct drm_bo_map_wait_idle_arg { - union { - struct drm_bo_info_req req; - struct drm_bo_info_rep rep; - } d; -}; - -struct drm_bo_op_req { - enum { - drm_bo_validate, - drm_bo_fence, - drm_bo_ref_fence, - } op; - unsigned int arg_handle; - struct drm_bo_info_req bo_req; -}; - - -struct drm_bo_op_arg { - uint64_t next; - union { - struct drm_bo_op_req req; - struct drm_bo_arg_rep rep; - } d; - int handled; - unsigned int pad64; -}; - - -#define DRM_BO_MEM_LOCAL 0 -#define DRM_BO_MEM_TT 1 -#define DRM_BO_MEM_VRAM 2 -#define DRM_BO_MEM_PRIV0 3 -#define DRM_BO_MEM_PRIV1 4 -#define DRM_BO_MEM_PRIV2 5 -#define DRM_BO_MEM_PRIV3 6 -#define DRM_BO_MEM_PRIV4 7 - -#define DRM_BO_MEM_TYPES 8 /* For now. */ - -#define DRM_BO_LOCK_UNLOCK_BM (1 << 0) -#define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1) - -struct drm_bo_version_arg { - uint32_t major; - uint32_t minor; - uint32_t patchlevel; -}; - -struct drm_mm_type_arg { - unsigned int mem_type; - unsigned int lock_flags; -}; - -struct drm_mm_init_arg { - unsigned int magic; - unsigned int major; - unsigned int minor; - unsigned int mem_type; - uint64_t p_offset; - uint64_t p_size; -}; - -struct drm_mm_info_arg { - unsigned int mem_type; - uint64_t p_size; -}; - struct drm_gem_close { /** Handle of the object to be closed. */ uint32_t handle; @@ -1337,31 +1019,6 @@ struct drm_mode_crtc_lut { #define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw) -#define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, struct drm_mm_init_arg) -#define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, struct drm_mm_type_arg) -#define DRM_IOCTL_MM_LOCK DRM_IOWR(0xc2, struct drm_mm_type_arg) -#define DRM_IOCTL_MM_UNLOCK DRM_IOWR(0xc3, struct drm_mm_type_arg) - -#define DRM_IOCTL_FENCE_CREATE DRM_IOWR(0xc4, struct drm_fence_arg) -#define DRM_IOCTL_FENCE_REFERENCE DRM_IOWR(0xc6, struct drm_fence_arg) -#define DRM_IOCTL_FENCE_UNREFERENCE DRM_IOWR(0xc7, struct drm_fence_arg) -#define DRM_IOCTL_FENCE_SIGNALED DRM_IOWR(0xc8, struct drm_fence_arg) -#define DRM_IOCTL_FENCE_FLUSH DRM_IOWR(0xc9, struct drm_fence_arg) -#define DRM_IOCTL_FENCE_WAIT DRM_IOWR(0xca, struct drm_fence_arg) -#define DRM_IOCTL_FENCE_EMIT DRM_IOWR(0xcb, struct drm_fence_arg) -#define DRM_IOCTL_FENCE_BUFFERS DRM_IOWR(0xcc, struct drm_fence_arg) - -#define DRM_IOCTL_BO_CREATE DRM_IOWR(0xcd, struct drm_bo_create_arg) -#define DRM_IOCTL_BO_MAP DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg) -#define DRM_IOCTL_BO_UNMAP DRM_IOWR(0xd0, struct drm_bo_handle_arg) -#define DRM_IOCTL_BO_REFERENCE DRM_IOWR(0xd1, struct drm_bo_reference_info_arg) -#define DRM_IOCTL_BO_UNREFERENCE DRM_IOWR(0xd2, struct drm_bo_handle_arg) -#define DRM_IOCTL_BO_SETSTATUS DRM_IOWR(0xd3, struct drm_bo_map_wait_idle_arg) -#define DRM_IOCTL_BO_INFO DRM_IOWR(0xd4, struct drm_bo_reference_info_arg) -#define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg) -#define DRM_IOCTL_BO_VERSION DRM_IOR(0xd6, struct drm_bo_version_arg) -#define DRM_IOCTL_MM_INFO DRM_IOWR(0xd7, struct drm_mm_info_arg) - #define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res) #define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc) #define DRM_IOCTL_MODE_GETCONNECTOR DRM_IOWR(0xA2, struct drm_mode_get_connector) diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index 09c53676..3e6a6626 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -148,7 +148,7 @@ int i915_dma_cleanup(struct drm_device * dev) return 0; } -#if defined(I915_HAVE_BUFFER) && defined(DRI2) +#if defined(DRI2) #define DRI2_SAREA_BLOCK_TYPE(b) ((b) >> 16) #define DRI2_SAREA_BLOCK_SIZE(b) ((b) & 0xffff) #define DRI2_SAREA_BLOCK_NEXT(p) \ @@ -226,12 +226,7 @@ static int i915_initialize(struct drm_device * dev, } } -#ifdef I915_HAVE_BUFFER - if (!drm_core_check_feature(dev, DRIVER_MODESET)) { - dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS; - } -#endif - + if (init->ring_size != 0) { dev_priv->ring.Size = init->ring_size; dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; @@ -285,10 +280,6 @@ static int i915_initialize(struct drm_device * dev, } DRM_DEBUG("Enabled hardware status page\n"); -#ifdef I915_HAVE_BUFFER - if (!drm_core_check_feature(dev, DRIVER_MODESET)) { - mutex_init(&dev_priv->cmdbuf_mutex); - } #ifdef DRI2 if (init->func == I915_INIT_DMA2) { int ret = setup_dri2_sarea(dev, file_priv, init); @@ -299,7 +290,6 @@ static int i915_initialize(struct drm_device * dev, } } #endif /* DRI2 */ -#endif /* I915_HAVE_BUFFER */ return 0; } @@ -565,9 +555,6 @@ int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush) static int i915_dispatch_cmdbuffer(struct drm_device * dev, struct drm_i915_cmdbuffer * cmd) { -#ifdef I915_HAVE_FENCE - struct drm_i915_private *dev_priv = dev->dev_private; -#endif int nbox = cmd->num_cliprects; int i = 0, count, ret; @@ -594,10 +581,6 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev, } i915_emit_breadcrumb(dev); -#ifdef I915_HAVE_FENCE - if (unlikely((dev_priv->counter & 0xFF) == 0)) - drm_fence_flush_old(dev, 0, dev_priv->counter); -#endif return 0; } @@ -648,10 +631,6 @@ int i915_dispatch_batchbuffer(struct drm_device * dev, } i915_emit_breadcrumb(dev); -#ifdef I915_HAVE_FENCE - if (unlikely((dev_priv->counter & 0xFF) == 0)) - drm_fence_flush_old(dev, 0, dev_priv->counter); -#endif return 0; } @@ -724,10 +703,6 @@ void i915_dispatch_flip(struct drm_device * dev, int planes, int sync) i915_do_dispatch_flip(dev, i, sync); i915_emit_breadcrumb(dev); -#ifdef I915_HAVE_FENCE - if (unlikely(!sync && ((dev_priv->counter & 0xFF) == 0))) - drm_fence_flush_old(dev, 0, dev_priv->counter); -#endif } int i915_quiescent(struct drm_device *dev) @@ -1077,9 +1052,6 @@ struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_MMIO, i915_mmio, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH), -#ifdef I915_HAVE_BUFFER - DRM_IOCTL_DEF(DRM_I915_EXECBUFFER, i915_execbuffer, DRM_AUTH), -#endif DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), diff --git a/shared-core/i915_drm.h b/shared-core/i915_drm.h index 8ba71687..611f943a 100644 --- a/shared-core/i915_drm.h +++ b/shared-core/i915_drm.h @@ -375,58 +375,6 @@ typedef struct drm_i915_hws_addr { uint64_t addr; } drm_i915_hws_addr_t; -/* - * Relocation header is 4 uint32_ts - * 0 - 32 bit reloc count - * 1 - 32-bit relocation type - * 2-3 - 64-bit user buffer handle ptr for another list of relocs. - */ -#define I915_RELOC_HEADER 4 - -/* - * type 0 relocation has 4-uint32_t stride - * 0 - offset into buffer - * 1 - delta to add in - * 2 - buffer handle - * 3 - reserved (for optimisations later). - */ -/* - * type 1 relocation has 4-uint32_t stride. - * Hangs off the first item in the op list. - * Performed after all valiations are done. - * Try to group relocs into the same relocatee together for - * performance reasons. - * 0 - offset into buffer - * 1 - delta to add in - * 2 - buffer index in op list. - * 3 - relocatee index in op list. - */ -#define I915_RELOC_TYPE_0 0 -#define I915_RELOC0_STRIDE 4 -#define I915_RELOC_TYPE_1 1 -#define I915_RELOC1_STRIDE 4 - - -struct drm_i915_op_arg { - uint64_t next; - uint64_t reloc_ptr; - int handled; - unsigned int pad64; - union { - struct drm_bo_op_req req; - struct drm_bo_arg_rep rep; - } d; - -}; - -struct drm_i915_execbuffer { - uint64_t ops_list; - uint32_t num_buffers; - struct drm_i915_batchbuffer batch; - drm_context_t context; /* for lockless use in the future */ - struct drm_fence_arg fence_arg; -}; - struct drm_i915_gem_init { /** * Beginning offset in the GTT to be managed by the DRM memory |