diff options
Diffstat (limited to 'shared-core')
-rw-r--r-- | shared-core/drm.h | 239 | ||||
-rw-r--r-- | shared-core/drm_drawable.c | 330 | ||||
-rw-r--r-- | shared-core/drm_pciids.txt | 177 | ||||
-rw-r--r-- | shared-core/i915_dma.c | 43 | ||||
-rw-r--r-- | shared-core/i915_drm.h | 29 | ||||
-rw-r--r-- | shared-core/i915_drv.h | 77 | ||||
-rw-r--r-- | shared-core/i915_irq.c | 333 | ||||
-rw-r--r-- | shared-core/mach64_dma.c | 234 | ||||
-rw-r--r-- | shared-core/mach64_drm.h | 4 | ||||
-rw-r--r-- | shared-core/mach64_drv.h | 7 | ||||
-rw-r--r-- | shared-core/mach64_state.c | 205 | ||||
-rw-r--r-- | shared-core/r300_cmdbuf.c | 33 | ||||
-rw-r--r-- | shared-core/radeon_cp.c | 46 | ||||
-rw-r--r-- | shared-core/radeon_drv.h | 25 | ||||
-rw-r--r-- | shared-core/radeon_state.c | 167 | ||||
-rw-r--r-- | shared-core/savage_bci.c | 1 | ||||
-rw-r--r-- | shared-core/sis_drv.h | 22 | ||||
-rw-r--r-- | shared-core/via_dma.c | 18 | ||||
-rw-r--r-- | shared-core/via_drm.h | 42 | ||||
-rw-r--r-- | shared-core/via_drv.c | 1 | ||||
-rw-r--r-- | shared-core/via_drv.h | 16 | ||||
-rw-r--r-- | shared-core/via_irq.c | 25 | ||||
-rw-r--r-- | shared-core/via_verifier.h | 8 |
23 files changed, 1595 insertions, 487 deletions
diff --git a/shared-core/drm.h b/shared-core/drm.h index 87f8da6b..9efb1dc4 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -69,9 +69,6 @@ #endif #if defined(__linux__) -#if defined(__KERNEL__) -#include <linux/config.h> -#endif #include <asm/ioctl.h> /* For _IO* macros */ #define DRM_IOCTL_NR(n) _IOC_NR(n) #define DRM_IOC_VOID _IOC_NONE @@ -134,8 +131,16 @@ #define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT)) #if defined(__linux__) +#if defined(__KERNEL__) +typedef __u64 drm_u64_t; +#else +typedef unsigned long long drm_u64_t; +#endif + typedef unsigned int drm_handle_t; #else +#include <sys/types.h> +typedef u_int64_t drm_u64_t; typedef unsigned long drm_handle_t; /**< To mapped regions */ #endif typedef unsigned int drm_context_t; /**< GLXContext handle */ @@ -159,6 +164,14 @@ typedef struct drm_clip_rect { } drm_clip_rect_t; /** + * Drawable information. + */ +typedef struct drm_drawable_info { + unsigned int num_rects; + drm_clip_rect_t *rects; +} drm_drawable_info_t; + +/** * Texture region, */ typedef struct drm_tex_region { @@ -259,7 +272,8 @@ typedef enum drm_map_type { _DRM_SHM = 2, /**< shared, cached */ _DRM_AGP = 3, /**< AGP/GART */ _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */ - _DRM_CONSISTENT = 5 /**< Consistent memory for PCI DMA */ + _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */ + _DRM_TTM = 6 } drm_map_type_t; /** @@ -408,7 +422,8 @@ typedef struct drm_buf_desc { _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */ _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */ _DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */ - _DRM_FB_BUFFER = 0x08 /**< Buffer is in frame buffer */ + _DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */ + _DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */ } flags; unsigned long agp_start; /**< * Start address of where the AGP buffers are @@ -508,6 +523,20 @@ typedef struct drm_draw { } drm_draw_t; /** + * DRM_IOCTL_UPDATE_DRAW ioctl argument type. + */ +typedef enum { + DRM_DRAWABLE_CLIPRECTS, +} drm_drawable_info_type_t; + +typedef struct drm_update_draw { + drm_drawable_t handle; + unsigned int type; + unsigned int num; + unsigned long long data; +} drm_update_draw_t; + +/** * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type. */ typedef struct drm_auth { @@ -529,10 +558,14 @@ typedef struct drm_irq_busid { typedef enum { _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ + _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ + _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */ } drm_vblank_seq_type_t; -#define _DRM_VBLANK_FLAGS_MASK _DRM_VBLANK_SIGNAL +#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE) +#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \ + _DRM_VBLANK_NEXTONMISS) struct drm_wait_vblank_request { drm_vblank_seq_type_t type; @@ -629,6 +662,190 @@ typedef struct drm_set_version { int drm_dd_minor; } drm_set_version_t; + +#define DRM_FENCE_FLAG_EMIT 0x00000001 +#define DRM_FENCE_FLAG_SHAREABLE 0x00000002 +#define DRM_FENCE_FLAG_WAIT_LAZY 0x00000004 +#define DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS 0x00000008 + +/* Reserved for driver use */ +#define DRM_FENCE_MASK_DRIVER 0xFF000000 + +#define DRM_FENCE_TYPE_EXE 0x00000001 + +typedef struct drm_fence_arg { + unsigned handle; + int class; + unsigned type; + unsigned flags; + unsigned signaled; + unsigned expand_pad[4]; /*Future expansion */ + enum { + drm_fence_create, + drm_fence_destroy, + drm_fence_reference, + drm_fence_unreference, + drm_fence_signaled, + drm_fence_flush, + drm_fence_wait, + drm_fence_emit, + drm_fence_buffers + } op; +} drm_fence_arg_t; + +/* Buffer permissions, referring to how the GPU uses the buffers. + these translate to fence types used for the buffers. + Typically a texture buffer is read, A destination buffer is write and + a command (batch-) buffer is exe. Can be or-ed together. */ + +#define DRM_BO_FLAG_READ 0x00000001 +#define DRM_BO_FLAG_WRITE 0x00000002 +#define DRM_BO_FLAG_EXE 0x00000004 + +/* + * Status flags. Can be read to determine the actual state of a buffer. + */ + +/* + * Cannot evict this buffer. Not even with force. This type of buffer should + * only be available for root, and must be manually removed before buffer + * manager shutdown or swapout. + */ +#define DRM_BO_FLAG_NO_EVICT 0x00000010 +/* Always keep a system memory shadow to a vram buffer */ +#define DRM_BO_FLAG_SHADOW_VRAM 0x00000020 +/* The buffer is shareable with other processes */ +#define DRM_BO_FLAG_SHAREABLE 0x00000040 +/* The buffer is currently cached */ +#define DRM_BO_FLAG_CACHED 0x00000080 +/* Make sure that every time this buffer is validated, it ends up on the same + * location. The buffer will also not be evicted when claiming space for + * other buffers. Basically a pinned buffer but it may be thrown out as + * part of buffer manager shutdown or swapout. Not supported yet.*/ +#define DRM_BO_FLAG_NO_MOVE 0x00000100 + +/* Make sure the buffer is in cached memory when mapped for reading */ +#define DRM_BO_FLAG_READ_CACHED 0x00080000 +/* When there is a choice between VRAM and TT, prefer VRAM. + The default behaviour is to prefer TT. */ +#define DRM_BO_FLAG_PREFER_VRAM 0x00040000 +/* Bind this buffer cached if the hardware supports it. */ +#define DRM_BO_FLAG_BIND_CACHED 0x0002000 + +/* System Memory */ +#define DRM_BO_FLAG_MEM_LOCAL 0x01000000 +/* Translation table memory */ +#define DRM_BO_FLAG_MEM_TT 0x02000000 +/* Vram memory */ +#define DRM_BO_FLAG_MEM_VRAM 0x04000000 +/* Unmappable Vram memory */ +#define DRM_BO_FLAG_MEM_VRAM_NM 0x08000000 +/* Memory flag mask */ +#define DRM_BO_MASK_MEM 0xFF000000 + +/* When creating a buffer, Avoid system storage even if allowed */ +#define DRM_BO_HINT_AVOID_LOCAL 0x00000001 +/* Don't block on validate and map */ +#define DRM_BO_HINT_DONT_BLOCK 0x00000002 +/* Don't place this buffer on the unfenced list.*/ +#define DRM_BO_HINT_DONT_FENCE 0x00000004 +#define DRM_BO_HINT_WAIT_LAZY 0x00000008 +#define DRM_BO_HINT_ALLOW_UNFENCED_MAP 0x00000010 + + +/* Driver specific flags. Could be for example rendering engine */ +#define DRM_BO_MASK_DRIVER 0x00F00000 + +typedef enum { + drm_bo_type_dc, + drm_bo_type_user, + drm_bo_type_fake +}drm_bo_type_t; + + +typedef struct drm_bo_arg_request { + unsigned handle; /* User space handle */ + unsigned mask; + unsigned hint; + drm_u64_t size; + drm_bo_type_t type; + unsigned arg_handle; + drm_u64_t buffer_start; + unsigned page_alignment; + unsigned expand_pad[4]; /*Future expansion */ + enum { + drm_bo_create, + drm_bo_validate, + drm_bo_map, + drm_bo_unmap, + drm_bo_fence, + drm_bo_destroy, + drm_bo_reference, + drm_bo_unreference, + drm_bo_info, + drm_bo_wait_idle, + drm_bo_ref_fence + } op; +} drm_bo_arg_request_t; + + +/* + * Reply flags + */ + +#define DRM_BO_REP_BUSY 0x00000001 + +typedef struct drm_bo_arg_reply { + int ret; + unsigned handle; + unsigned flags; + drm_u64_t size; + drm_u64_t offset; + drm_u64_t arg_handle; + unsigned mask; + drm_u64_t buffer_start; + unsigned fence_flags; + unsigned rep_flags; + unsigned page_alignment; + unsigned expand_pad[4]; /*Future expansion */ +}drm_bo_arg_reply_t; + + +typedef struct drm_bo_arg{ + int handled; + drm_u64_t next; + union { + drm_bo_arg_request_t req; + drm_bo_arg_reply_t rep; + } d; +} drm_bo_arg_t; + +#define DRM_BO_MEM_LOCAL 0 +#define DRM_BO_MEM_TT 1 +#define DRM_BO_MEM_VRAM 2 +#define DRM_BO_MEM_VRAM_NM 3 +#define DRM_BO_MEM_TYPES 2 /* For now. */ + +typedef union drm_mm_init_arg{ + struct { + enum { + mm_init, + mm_takedown, + mm_query, + mm_lock, + mm_unlock + } op; + drm_u64_t p_offset; + drm_u64_t p_size; + unsigned mem_type; + unsigned expand_pad[8]; /*Future expansion */ + } req; + struct { + drm_handle_t mm_sarea; + unsigned expand_pad[8]; /*Future expansion */ + } rep; +} drm_mm_init_arg_t; + /** * \name Ioctls Definitions */ @@ -694,15 +911,23 @@ typedef struct drm_set_version { #define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, drm_wait_vblank_t) +#define DRM_IOCTL_FENCE DRM_IOWR(0x3b, drm_fence_arg_t) +#define DRM_IOCTL_BUFOBJ DRM_IOWR(0x3d, drm_bo_arg_t) +#define DRM_IOCTL_MM_INIT DRM_IOWR(0x3e, drm_mm_init_arg_t) + +#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, drm_update_draw_t) + /*@}*/ /** * Device specific ioctls should only be in their respective headers - * The device specific ioctl range is from 0x40 to 0x79. + * The device specific ioctl range is from 0x40 to 0x99. + * Generic IOCTLS restart at 0xA0. * * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and * drmCommandReadWrite(). */ #define DRM_COMMAND_BASE 0x40 +#define DRM_COMMAND_END 0xA0 #endif diff --git a/shared-core/drm_drawable.c b/shared-core/drm_drawable.c new file mode 100644 index 00000000..0817e321 --- /dev/null +++ b/shared-core/drm_drawable.c @@ -0,0 +1,330 @@ +/** + * \file drm_drawable.c + * IOCTLs for drawables + * + * \author Rickard E. (Rik) Faith <faith@valinux.com> + * \author Gareth Hughes <gareth@valinux.com> + * \author Michel Dänzer <michel@tungstengraphics.com> + */ + +/* + * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * Copyright 2006 Tungsten Graphics, Inc., Bismarck, North Dakota. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "drmP.h" + +/** + * Allocate drawable ID and memory to store information about it. + */ +int drm_adddraw(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + unsigned long irqflags; + int i, j; + u32 *bitfield = dev->drw_bitfield; + unsigned int bitfield_length = dev->drw_bitfield_length; + drm_drawable_info_t **info = dev->drw_info; + unsigned int info_length = dev->drw_info_length; + drm_draw_t draw; + + for (i = 0, j = 0; i < bitfield_length; i++) { + if (bitfield[i] == ~0) + continue; + + for (; j < 8 * sizeof(*bitfield); j++) + if (!(bitfield[i] & (1 << j))) + goto done; + } +done: + + if (i == bitfield_length) { + bitfield_length++; + + bitfield = drm_alloc(bitfield_length * sizeof(*bitfield), + DRM_MEM_BUFS); + + if (!bitfield) { + DRM_ERROR("Failed to allocate new drawable bitfield\n"); + return DRM_ERR(ENOMEM); + } + + if (8 * sizeof(*bitfield) * bitfield_length > info_length) { + info_length += 8 * sizeof(*bitfield); + + info = drm_alloc(info_length * sizeof(*info), + DRM_MEM_BUFS); + + if (!info) { + DRM_ERROR("Failed to allocate new drawable info" + " array\n"); + + drm_free(bitfield, + bitfield_length * sizeof(*bitfield), + DRM_MEM_BUFS); + return DRM_ERR(ENOMEM); + } + } + + bitfield[i] = 0; + } + + draw.handle = i * 8 * sizeof(*bitfield) + j + 1; + DRM_DEBUG("%d\n", draw.handle); + + spin_lock_irqsave(&dev->drw_lock, irqflags); + + bitfield[i] |= 1 << j; + info[draw.handle - 1] = NULL; + + if (bitfield != dev->drw_bitfield) { + memcpy(bitfield, dev->drw_bitfield, dev->drw_bitfield_length * + sizeof(*bitfield)); + drm_free(dev->drw_bitfield, sizeof(*bitfield) * + dev->drw_bitfield_length, DRM_MEM_BUFS); + dev->drw_bitfield = bitfield; + dev->drw_bitfield_length = bitfield_length; + } + + if (info != dev->drw_info) { + memcpy(info, dev->drw_info, dev->drw_info_length * + sizeof(*info)); + drm_free(dev->drw_info, sizeof(*info) * dev->drw_info_length, + DRM_MEM_BUFS); + dev->drw_info = info; + dev->drw_info_length = info_length; + } + + spin_unlock_irqrestore(&dev->drw_lock, irqflags); + + DRM_COPY_TO_USER_IOCTL((drm_draw_t __user *)data, draw, sizeof(draw)); + + return 0; +} + +/** + * Free drawable ID and memory to store information about it. + */ +int drm_rmdraw(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + drm_draw_t draw; + int id, idx; + unsigned int shift; + unsigned long irqflags; + u32 *bitfield = dev->drw_bitfield; + unsigned int bitfield_length = dev->drw_bitfield_length; + drm_drawable_info_t **info = dev->drw_info; + unsigned int info_length = dev->drw_info_length; + + DRM_COPY_FROM_USER_IOCTL(draw, (drm_draw_t __user *) data, + sizeof(draw)); + + id = draw.handle - 1; + idx = id / (8 * sizeof(*bitfield)); + shift = id % (8 * sizeof(*bitfield)); + + if (idx < 0 || idx >= bitfield_length || + !(bitfield[idx] & (1 << shift))) { + DRM_DEBUG("No such drawable %d\n", draw.handle); + return 0; + } + + spin_lock_irqsave(&dev->drw_lock, irqflags); + + bitfield[idx] &= ~(1 << shift); + + spin_unlock_irqrestore(&dev->drw_lock, irqflags); + + if (info[id]) { + drm_free(info[id]->rects, info[id]->num_rects * + sizeof(drm_clip_rect_t), DRM_MEM_BUFS); + drm_free(info[id], sizeof(**info), DRM_MEM_BUFS); + } + + /* Can we shrink the arrays? */ + if (idx == bitfield_length - 1) { + while (idx >= 0 && !bitfield[idx]) + --idx; + + bitfield_length = idx + 1; + + if (idx != id / (8 * sizeof(*bitfield))) + bitfield = drm_alloc(bitfield_length * + sizeof(*bitfield), DRM_MEM_BUFS); + + if (!bitfield && bitfield_length) { + bitfield = dev->drw_bitfield; + bitfield_length = dev->drw_bitfield_length; + } + } + + if (bitfield != dev->drw_bitfield) { + info_length = 8 * sizeof(*bitfield) * bitfield_length; + + info = drm_alloc(info_length * sizeof(*info), DRM_MEM_BUFS); + + if (!info && info_length) { + info = dev->drw_info; + info_length = dev->drw_info_length; + } + + spin_lock_irqsave(&dev->drw_lock, irqflags); + + memcpy(bitfield, dev->drw_bitfield, bitfield_length * + sizeof(*bitfield)); + drm_free(dev->drw_bitfield, sizeof(*bitfield) * + dev->drw_bitfield_length, DRM_MEM_BUFS); + dev->drw_bitfield = bitfield; + dev->drw_bitfield_length = bitfield_length; + + if (info != dev->drw_info) { + memcpy(info, dev->drw_info, info_length * + sizeof(*info)); + drm_free(dev->drw_info, sizeof(*info) * + dev->drw_info_length, DRM_MEM_BUFS); + dev->drw_info = info; + dev->drw_info_length = info_length; + } + + spin_unlock_irqrestore(&dev->drw_lock, irqflags); + } + + DRM_DEBUG("%d\n", draw.handle); + return 0; +} + +int drm_update_drawable_info(DRM_IOCTL_ARGS) { + DRM_DEVICE; + drm_update_draw_t update; + unsigned int id, idx, shift, bitfield_length = dev->drw_bitfield_length; + u32 *bitfield = dev->drw_bitfield; + unsigned long irqflags; + drm_drawable_info_t *info; + drm_clip_rect_t *rects; + int err; + + DRM_COPY_FROM_USER_IOCTL(update, (drm_update_draw_t __user *) data, + sizeof(update)); + + id = update.handle - 1; + idx = id / (8 * sizeof(*bitfield)); + shift = id % (8 * sizeof(*bitfield)); + + if (idx < 0 || idx >= bitfield_length || + !(bitfield[idx] & (1 << shift))) { + DRM_ERROR("No such drawable %d\n", update.handle); + return DRM_ERR(EINVAL); + } + + info = dev->drw_info[id]; + + if (!info) { + info = drm_calloc(1, sizeof(drm_drawable_info_t), DRM_MEM_BUFS); + + if (!info) { + DRM_ERROR("Failed to allocate drawable info memory\n"); + return DRM_ERR(ENOMEM); + } + } + + switch (update.type) { + case DRM_DRAWABLE_CLIPRECTS: + if (update.num != info->num_rects) { + rects = drm_alloc(update.num * sizeof(drm_clip_rect_t), + DRM_MEM_BUFS); + } else + rects = info->rects; + + if (update.num && !rects) { + DRM_ERROR("Failed to allocate cliprect memory\n"); + err = DRM_ERR(ENOMEM); + goto error; + } + + if (update.num && DRM_COPY_FROM_USER(rects, + (drm_clip_rect_t __user *) + (unsigned long)update.data, + update.num * + sizeof(*rects))) { + DRM_ERROR("Failed to copy cliprects from userspace\n"); + err = DRM_ERR(EFAULT); + goto error; + } + + spin_lock_irqsave(&dev->drw_lock, irqflags); + + if (rects != info->rects) { + drm_free(info->rects, info->num_rects * + sizeof(drm_clip_rect_t), DRM_MEM_BUFS); + } + + info->rects = rects; + info->num_rects = update.num; + dev->drw_info[id] = info; + + spin_unlock_irqrestore(&dev->drw_lock, irqflags); + + DRM_DEBUG("Updated %d cliprects for drawable %d\n", + info->num_rects, id); + break; + default: + DRM_ERROR("Invalid update type %d\n", update.type); + return DRM_ERR(EINVAL); + } + + return 0; + +error: + if (!dev->drw_info[id]) + drm_free(info, sizeof(*info), DRM_MEM_BUFS); + else if (rects != dev->drw_info[id]->rects) + drm_free(rects, update.num * + sizeof(drm_clip_rect_t), DRM_MEM_BUFS); + + return err; +} + +/** + * Caller must hold the drawable spinlock! + */ +drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id) { + u32 *bitfield = dev->drw_bitfield; + unsigned int idx, shift; + + id--; + idx = id / (8 * sizeof(*bitfield)); + shift = id % (8 * sizeof(*bitfield)); + + if (idx < 0 || idx >= dev->drw_bitfield_length || + !(bitfield[idx] & (1 << shift))) { + DRM_DEBUG("No such drawable %d\n", id); + return NULL; + } + + return dev->drw_info[id]; +} +EXPORT_SYMBOL(drm_get_drawable_info); diff --git a/shared-core/drm_pciids.txt b/shared-core/drm_pciids.txt index 1cb73950..7c185a30 100644 --- a/shared-core/drm_pciids.txt +++ b/shared-core/drm_pciids.txt @@ -1,11 +1,11 @@ [radeon] -0x1002 0x3150 CHIP_RV380|CHIP_IS_MOBILITY "ATI Radeon Mobility X600 M24" -0x1002 0x3152 CHIP_RV380|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP "ATI Radeon Mobility X300 M24" -0x1002 0x3154 CHIP_RV380|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP "ATI FireGL M24 GL" -0x1002 0x3E50 CHIP_RV380|CHIP_NEW_MEMMAP "ATI Radeon RV380 X600" -0x1002 0x3E54 CHIP_RV380|CHIP_NEW_MEMMAP "ATI FireGL V3200 RV380" -0x1002 0x4136 CHIP_RS100|CHIP_IS_IGP "ATI Radeon RS100 IGP 320" -0x1002 0x4137 CHIP_RS200|CHIP_IS_IGP "ATI Radeon RS200 IGP 340" +0x1002 0x3150 CHIP_RV380|RADEON_IS_MOBILITY "ATI Radeon Mobility X600 M24" +0x1002 0x3152 CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon Mobility X300 M24" +0x1002 0x3154 CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI FireGL M24 GL" +0x1002 0x3E50 CHIP_RV380|RADEON_NEW_MEMMAP "ATI Radeon RV380 X600" +0x1002 0x3E54 CHIP_RV380|RADEON_NEW_MEMMAP "ATI FireGL V3200 RV380" +0x1002 0x4136 CHIP_RS100|RADEON_IS_IGP "ATI Radeon RS100 IGP 320" +0x1002 0x4137 CHIP_RS200|RADEON_IS_IGP "ATI Radeon RS200 IGP 340" 0x1002 0x4144 CHIP_R300 "ATI Radeon AD 9500" 0x1002 0x4145 CHIP_R300 "ATI Radeon AE 9700 Pro" 0x1002 0x4146 CHIP_R300 "ATI Radeon AF R300 9600TX" @@ -21,35 +21,35 @@ 0x1002 0x4154 CHIP_RV350 "ATI FireGL AT T2" 0x1002 0x4155 CHIP_RV350 "ATI Radeon 9650" 0x1002 0x4156 CHIP_RV350 "ATI FireGL AV RV360 T2" -0x1002 0x4237 CHIP_RS200|CHIP_IS_IGP "ATI Radeon RS250 IGP" +0x1002 0x4237 CHIP_RS200|RADEON_IS_IGP "ATI Radeon RS250 IGP" 0x1002 0x4242 CHIP_R200 "ATI Radeon BB R200 AIW 8500DV" 0x1002 0x4243 CHIP_R200 "ATI Radeon BC R200" -0x1002 0x4336 CHIP_RS100|CHIP_IS_IGP|CHIP_IS_MOBILITY "ATI Radeon RS100 Mobility U1" -0x1002 0x4337 CHIP_RS200|CHIP_IS_IGP|CHIP_IS_MOBILITY "ATI Radeon RS200 Mobility IGP 340M" -0x1002 0x4437 CHIP_RS200|CHIP_IS_IGP|CHIP_IS_MOBILITY "ATI Radeon RS250 Mobility IGP" +0x1002 0x4336 CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY "ATI Radeon RS100 Mobility U1" +0x1002 0x4337 CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY "ATI Radeon RS200 Mobility IGP 340M" +0x1002 0x4437 CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY "ATI Radeon RS250 Mobility IGP" 0x1002 0x4966 CHIP_RV250 "ATI Radeon If RV250 9000" 0x1002 0x4967 CHIP_RV250 "ATI Radeon Ig RV250 9000" -0x1002 0x4A48 CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon JH R420 X800" -0x1002 0x4A49 CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon JI R420 X800 Pro" -0x1002 0x4A4A CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon JJ R420 X800 SE" -0x1002 0x4A4B CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon JK R420 X800 XT" -0x1002 0x4A4C CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon JL R420 X800" -0x1002 0x4A4D CHIP_R420|CHIP_NEW_MEMMAP "ATI FireGL JM X3-256" -0x1002 0x4A4E CHIP_R420|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP "ATI Radeon JN R420 Mobility M18" -0x1002 0x4A4F CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon JO R420 X800 SE" -0x1002 0x4A50 CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon JP R420 X800 XT PE" -0x1002 0x4A54 CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon JT R420 AIW X800 VE" -0x1002 0x4B49 CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R481 X850 XT" -0x1002 0x4B4A CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R481 X850 SE" -0x1002 0x4B4B CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R481 X850 Pro" -0x1002 0x4B4C CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R481 X850 XT PE" -0x1002 0x4C57 CHIP_RV200|CHIP_IS_MOBILITY "ATI Radeon LW RV200 Mobility 7500 M7" -0x1002 0x4C58 CHIP_RV200|CHIP_IS_MOBILITY "ATI Radeon LX RV200 Mobility FireGL 7800 M7" -0x1002 0x4C59 CHIP_RV100|CHIP_IS_MOBILITY "ATI Radeon LY RV100 Mobility M6" -0x1002 0x4C5A CHIP_RV100|CHIP_IS_MOBILITY "ATI Radeon LZ RV100 Mobility M6" -0x1002 0x4C64 CHIP_RV250|CHIP_IS_MOBILITY "ATI Radeon Ld RV250 Mobility 9000 M9" +0x1002 0x4A48 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon JH R420 X800" +0x1002 0x4A49 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon JI R420 X800 Pro" +0x1002 0x4A4A CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon JJ R420 X800 SE" +0x1002 0x4A4B CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon JK R420 X800 XT" +0x1002 0x4A4C CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon JL R420 X800" +0x1002 0x4A4D CHIP_R420|RADEON_NEW_MEMMAP "ATI FireGL JM X3-256" +0x1002 0x4A4E CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon JN R420 Mobility M18" +0x1002 0x4A4F CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon JO R420 X800 SE" +0x1002 0x4A50 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon JP R420 X800 XT PE" +0x1002 0x4A54 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon JT R420 AIW X800 VE" +0x1002 0x4B49 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R481 X850 XT" +0x1002 0x4B4A CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R481 X850 SE" +0x1002 0x4B4B CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R481 X850 Pro" +0x1002 0x4B4C CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R481 X850 XT PE" +0x1002 0x4C57 CHIP_RV200|RADEON_IS_MOBILITY "ATI Radeon LW RV200 Mobility 7500 M7" +0x1002 0x4C58 CHIP_RV200|RADEON_IS_MOBILITY "ATI Radeon LX RV200 Mobility FireGL 7800 M7" +0x1002 0x4C59 CHIP_RV100|RADEON_IS_MOBILITY "ATI Radeon LY RV100 Mobility M6" +0x1002 0x4C5A CHIP_RV100|RADEON_IS_MOBILITY "ATI Radeon LZ RV100 Mobility M6" +0x1002 0x4C64 CHIP_RV250|RADEON_IS_MOBILITY "ATI Radeon Ld RV250 Mobility 9000 M9" 0x1002 0x4C66 CHIP_RV250 "ATI Radeon Lf RV250 Mobility 9000 M9 / FireMV 2400 PCI" -0x1002 0x4C67 CHIP_RV250|CHIP_IS_MOBILITY "ATI Radeon Lg RV250 Mobility 9000 M9" +0x1002 0x4C67 CHIP_RV250|RADEON_IS_MOBILITY "ATI Radeon Lg RV250 Mobility 9000 M9" 0x1002 0x4E44 CHIP_R300 "ATI Radeon ND R300 9700 Pro" 0x1002 0x4E45 CHIP_R300 "ATI Radeon NE R300 9500 Pro / 9700" 0x1002 0x4E46 CHIP_R300 "ATI Radeon NF R300 9600TX" @@ -58,16 +58,16 @@ 0x1002 0x4E49 CHIP_R350 "ATI Radeon NI R350 9800" 0x1002 0x4E4A CHIP_R350 "ATI Radeon NJ R360 9800 XT" 0x1002 0x4E4B CHIP_R350 "ATI FireGL NK X2" -0x1002 0x4E50 CHIP_RV350|CHIP_IS_MOBILITY "ATI Radeon RV350 Mobility 9600 M10 NP" -0x1002 0x4E51 CHIP_RV350|CHIP_IS_MOBILITY "ATI Radeon RV350 Mobility 9600 M10 NQ" -0x1002 0x4E52 CHIP_RV350|CHIP_IS_MOBILITY "ATI Radeon RV350 Mobility 9600 M11 NR" -0x1002 0x4E53 CHIP_RV350|CHIP_IS_MOBILITY "ATI Radeon RV350 Mobility 9600 M10 NS" -0x1002 0x4E54 CHIP_RV350|CHIP_IS_MOBILITY "ATI FireGL T2/T2e" -0x1002 0x4E56 CHIP_RV350|CHIP_IS_MOBILITY "ATI Radeon Mobility 9550" -0x1002 0x5144 CHIP_R100|CHIP_SINGLE_CRTC "ATI Radeon QD R100" -0x1002 0x5145 CHIP_R100|CHIP_SINGLE_CRTC "ATI Radeon QE R100" -0x1002 0x5146 CHIP_R100|CHIP_SINGLE_CRTC "ATI Radeon QF R100" -0x1002 0x5147 CHIP_R100|CHIP_SINGLE_CRTC "ATI Radeon QG R100" +0x1002 0x4E50 CHIP_RV350|RADEON_IS_MOBILITY "ATI Radeon RV350 Mobility 9600 M10 NP" +0x1002 0x4E51 CHIP_RV350|RADEON_IS_MOBILITY "ATI Radeon RV350 Mobility 9600 M10 NQ" +0x1002 0x4E52 CHIP_RV350|RADEON_IS_MOBILITY "ATI Radeon RV350 Mobility 9600 M11 NR" +0x1002 0x4E53 CHIP_RV350|RADEON_IS_MOBILITY "ATI Radeon RV350 Mobility 9600 M10 NS" +0x1002 0x4E54 CHIP_RV350|RADEON_IS_MOBILITY "ATI FireGL T2/T2e" +0x1002 0x4E56 CHIP_RV350|RADEON_IS_MOBILITY "ATI Radeon Mobility 9550" +0x1002 0x5144 CHIP_R100|RADEON_SINGLE_CRTC "ATI Radeon QD R100" +0x1002 0x5145 CHIP_R100|RADEON_SINGLE_CRTC "ATI Radeon QE R100" +0x1002 0x5146 CHIP_R100|RADEON_SINGLE_CRTC "ATI Radeon QF R100" +0x1002 0x5147 CHIP_R100|RADEON_SINGLE_CRTC "ATI Radeon QG R100" 0x1002 0x5148 CHIP_R200 "ATI Radeon QH R200 8500" 0x1002 0x514C CHIP_R200 "ATI Radeon QL R200 8500 LE" 0x1002 0x514D CHIP_R200 "ATI Radeon QM R200 9100" @@ -76,59 +76,59 @@ 0x1002 0x5159 CHIP_RV100 "ATI Radeon QY RV100 7000/VE" 0x1002 0x515A CHIP_RV100 "ATI Radeon QZ RV100 7000/VE" 0x1002 0x515E CHIP_RV100 "ATI ES1000 RN50" -0x1002 0x5460 CHIP_RV380|CHIP_IS_MOBILITY "ATI Radeon Mobility X300 M22" -0x1002 0x5462 CHIP_RV380|CHIP_IS_MOBILITY "ATI Radeon Mobility X600 SE M24C" -0x1002 0x5464 CHIP_RV380|CHIP_IS_MOBILITY "ATI FireGL M22 GL 5464" -0x1002 0x5548 CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R423 X800" -0x1002 0x5549 CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R423 X800 Pro" -0x1002 0x554A CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R423 X800 XT PE" -0x1002 0x554B CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R423 X800 SE" -0x1002 0x554C CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R430 X800 XTP" -0x1002 0x554D CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R430 X800 XL" -0x1002 0x554E CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R430 X800 SE" -0x1002 0x554F CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R430 X800" -0x1002 0x5550 CHIP_R420|CHIP_NEW_MEMMAP "ATI FireGL V7100 R423" -0x1002 0x5551 CHIP_R420|CHIP_NEW_MEMMAP "ATI FireGL V5100 R423 UQ" -0x1002 0x5552 CHIP_R420|CHIP_NEW_MEMMAP "ATI FireGL unknown R423 UR" -0x1002 0x5554 CHIP_R420|CHIP_NEW_MEMMAP "ATI FireGL unknown R423 UT" -0x1002 0x564A CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP "ATI Mobility FireGL V5000 M26" -0x1002 0x564B CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP "ATI Mobility FireGL V5000 M26" -0x1002 0x564F CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP "ATI Radeon Mobility X700 XL M26" -0x1002 0x5652 CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP "ATI Radeon Mobility X700 M26" -0x1002 0x5653 CHIP_RV410|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP "ATI Radeon Mobility X700 M26" -0x1002 0x5834 CHIP_RS300|CHIP_IS_IGP "ATI Radeon RS300 9100 IGP" -0x1002 0x5835 CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY "ATI Radeon RS300 Mobility IGP" +0x1002 0x5460 CHIP_RV380|RADEON_IS_MOBILITY "ATI Radeon Mobility X300 M22" +0x1002 0x5462 CHIP_RV380|RADEON_IS_MOBILITY "ATI Radeon Mobility X600 SE M24C" +0x1002 0x5464 CHIP_RV380|RADEON_IS_MOBILITY "ATI FireGL M22 GL 5464" +0x1002 0x5548 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R423 X800" +0x1002 0x5549 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R423 X800 Pro" +0x1002 0x554A CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R423 X800 XT PE" +0x1002 0x554B CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R423 X800 SE" +0x1002 0x554C CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R430 X800 XTP" +0x1002 0x554D CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R430 X800 XL" +0x1002 0x554E CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R430 X800 SE" +0x1002 0x554F CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R430 X800" +0x1002 0x5550 CHIP_R420|RADEON_NEW_MEMMAP "ATI FireGL V7100 R423" +0x1002 0x5551 CHIP_R420|RADEON_NEW_MEMMAP "ATI FireGL V5100 R423 UQ" +0x1002 0x5552 CHIP_R420|RADEON_NEW_MEMMAP "ATI FireGL unknown R423 UR" +0x1002 0x5554 CHIP_R420|RADEON_NEW_MEMMAP "ATI FireGL unknown R423 UT" +0x1002 0x564A CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility FireGL V5000 M26" +0x1002 0x564B CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility FireGL V5000 M26" +0x1002 0x564F CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon Mobility X700 XL M26" +0x1002 0x5652 CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon Mobility X700 M26" +0x1002 0x5653 CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon Mobility X700 M26" +0x1002 0x5834 CHIP_RS300|RADEON_IS_IGP "ATI Radeon RS300 9100 IGP" +0x1002 0x5835 CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY "ATI Radeon RS300 Mobility IGP" 0x1002 0x5960 CHIP_RV280 "ATI Radeon RV280 9250" 0x1002 0x5961 CHIP_RV280 "ATI Radeon RV280 9200" 0x1002 0x5962 CHIP_RV280 "ATI Radeon RV280 9200" 0x1002 0x5964 CHIP_RV280 "ATI Radeon RV280 9200 SE" 0x1002 0x5965 CHIP_RV280 "ATI FireMV 2200 PCI" 0x1002 0x5969 CHIP_RV100 "ATI ES1000 RN50" -0x1002 0x5b60 CHIP_RV380|CHIP_NEW_MEMMAP "ATI Radeon RV370 X300 SE" -0x1002 0x5b62 CHIP_RV380|CHIP_NEW_MEMMAP "ATI Radeon RV370 X600 Pro" -0x1002 0x5b63 CHIP_RV380|CHIP_NEW_MEMMAP "ATI Radeon RV370 X550" -0x1002 0x5b64 CHIP_RV380|CHIP_NEW_MEMMAP "ATI FireGL V3100 (RV370) 5B64" -0x1002 0x5b65 CHIP_RV380|CHIP_NEW_MEMMAP "ATI FireMV 2200 PCIE (RV370) 5B65" -0x1002 0x5c61 CHIP_RV280|CHIP_IS_MOBILITY "ATI Radeon RV280 Mobility" -0x1002 0x5c63 CHIP_RV280|CHIP_IS_MOBILITY "ATI Radeon RV280 Mobility" -0x1002 0x5d48 CHIP_R420|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP "ATI Mobility Radeon X800 XT M28" -0x1002 0x5d49 CHIP_R420|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP "ATI Mobility FireGL V5100 M28" -0x1002 0x5d4a CHIP_R420|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP "ATI Mobility Radeon X800 M28" -0x1002 0x5d4c CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R480 X850" -0x1002 0x5d4d CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R480 X850 XT PE" -0x1002 0x5d4e CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R480 X850 SE" -0x1002 0x5d4f CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R480 X850 Pro" -0x1002 0x5d50 CHIP_R420|CHIP_NEW_MEMMAP "ATI unknown Radeon / FireGL R480" -0x1002 0x5d52 CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R480 X850 XT" -0x1002 0x5d57 CHIP_R420|CHIP_NEW_MEMMAP "ATI Radeon R423 X800 XT" -0x1002 0x5e48 CHIP_RV410|CHIP_NEW_MEMMAP "ATI FireGL V5000 RV410" -0x1002 0x5e4a CHIP_RV410|CHIP_NEW_MEMMAP "ATI Radeon RV410 X700 XT" -0x1002 0x5e4b CHIP_RV410|CHIP_NEW_MEMMAP "ATI Radeon RV410 X700 Pro" -0x1002 0x5e4c CHIP_RV410|CHIP_NEW_MEMMAP "ATI Radeon RV410 X700 SE" -0x1002 0x5e4d CHIP_RV410|CHIP_NEW_MEMMAP "ATI Radeon RV410 X700" -0x1002 0x5e4f CHIP_RV410|CHIP_NEW_MEMMAP "ATI Radeon RV410 X700 SE" -0x1002 0x7834 CHIP_RS300|CHIP_IS_IGP|CHIP_NEW_MEMMAP "ATI Radeon RS350 9000/9100 IGP" -0x1002 0x7835 CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY|CHIP_NEW_MEMMAP "ATI Radeon RS350 Mobility IGP" +0x1002 0x5b60 CHIP_RV380|RADEON_NEW_MEMMAP "ATI Radeon RV370 X300 SE" +0x1002 0x5b62 CHIP_RV380|RADEON_NEW_MEMMAP "ATI Radeon RV370 X600 Pro" +0x1002 0x5b63 CHIP_RV380|RADEON_NEW_MEMMAP "ATI Radeon RV370 X550" +0x1002 0x5b64 CHIP_RV380|RADEON_NEW_MEMMAP "ATI FireGL V3100 (RV370) 5B64" +0x1002 0x5b65 CHIP_RV380|RADEON_NEW_MEMMAP "ATI FireMV 2200 PCIE (RV370) 5B65" +0x1002 0x5c61 CHIP_RV280|RADEON_IS_MOBILITY "ATI Radeon RV280 Mobility" +0x1002 0x5c63 CHIP_RV280|RADEON_IS_MOBILITY "ATI Radeon RV280 Mobility" +0x1002 0x5d48 CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X800 XT M28" +0x1002 0x5d49 CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility FireGL V5100 M28" +0x1002 0x5d4a CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X800 M28" +0x1002 0x5d4c CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R480 X850" +0x1002 0x5d4d CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R480 X850 XT PE" +0x1002 0x5d4e CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R480 X850 SE" +0x1002 0x5d4f CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R480 X850 Pro" +0x1002 0x5d50 CHIP_R420|RADEON_NEW_MEMMAP "ATI unknown Radeon / FireGL R480" +0x1002 0x5d52 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R480 X850 XT" +0x1002 0x5d57 CHIP_R420|RADEON_NEW_MEMMAP "ATI Radeon R423 X800 XT" +0x1002 0x5e48 CHIP_RV410|RADEON_NEW_MEMMAP "ATI FireGL V5000 RV410" +0x1002 0x5e4a CHIP_RV410|RADEON_NEW_MEMMAP "ATI Radeon RV410 X700 XT" +0x1002 0x5e4b CHIP_RV410|RADEON_NEW_MEMMAP "ATI Radeon RV410 X700 Pro" +0x1002 0x5e4c CHIP_RV410|RADEON_NEW_MEMMAP "ATI Radeon RV410 X700 SE" +0x1002 0x5e4d CHIP_RV410|RADEON_NEW_MEMMAP "ATI Radeon RV410 X700" +0x1002 0x5e4f CHIP_RV410|RADEON_NEW_MEMMAP "ATI Radeon RV410 X700 SE" +0x1002 0x7834 CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP "ATI Radeon RS350 9000/9100 IGP" +0x1002 0x7835 CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon RS350 Mobility IGP" [r128] 0x1002 0x4c45 0 "ATI Rage 128 Mobility LE (PCI)" @@ -186,6 +186,7 @@ 0x1002 0x4c51 0 "3D Rage LT Pro" 0x1002 0x4c42 0 "3D Rage LT Pro AGP-133" 0x1002 0x4c44 0 "3D Rage LT Pro AGP-66" +0x1002 0x4759 0 "Rage 3D IICATI 3D RAGE IIC AGP(A12/A13) 0x1002 0x474c 0 "Rage XC" 0x1002 0x474f 0 "Rage XL" 0x1002 0x4752 0 "Rage XL" diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index 1a89514b..63b2a3ea 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -1,4 +1,4 @@ -/* i915_dma.c -- DMA support for the I915 -*- linux-c -*- +/ i915_dma.c -- DMA support for the I915 -*- linux-c -*- */ /* * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. @@ -37,7 +37,6 @@ dev->pdev->device == 0x29A2 || \ dev->pdev->device == 0x2A02) - /* Really want an OS-independent resettable timer. Would like to have * this loop run for (eg) 3 sec, but have the timer reset every time * the head pointer changes, so that EBUSY only happens if the ring @@ -164,6 +163,7 @@ static int i915_initialize(drm_device_t * dev, dev_priv->ring.virtual_start = dev_priv->ring.map.handle; + dev_priv->cpp = init->cpp; dev_priv->back_offset = init->back_offset; dev_priv->front_offset = init->front_offset; dev_priv->current_page = 0; @@ -196,9 +196,10 @@ static int i915_initialize(drm_device_t * dev, I915_WRITE(0x02080, dev_priv->dma_status_page); DRM_DEBUG("Enabled hardware status page\n"); - dev->dev_private = (void *)dev_priv; - +#ifdef I915_HAVE_BUFFER + drm_bo_driver_init(dev); +#endif return 0; } @@ -435,17 +436,39 @@ static void i915_emit_breadcrumb(drm_device_t *dev) dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter; - if (dev_priv->counter > 0x7FFFFFFFUL) - dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1; - BEGIN_LP_RING(4); OUT_RING(CMD_STORE_DWORD_IDX); OUT_RING(20); OUT_RING(dev_priv->counter); OUT_RING(0); ADVANCE_LP_RING(); +#ifdef I915_HAVE_FENCE + drm_fence_flush_old(dev, dev_priv->counter); +#endif +} + + +int i915_emit_mi_flush(drm_device_t *dev, uint32_t flush) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + uint32_t flush_cmd = CMD_MI_FLUSH; + RING_LOCALS; + + flush_cmd |= flush; + + i915_kernel_lost_context(dev); + + BEGIN_LP_RING(4); + OUT_RING(flush_cmd); + OUT_RING(0); + OUT_RING(0); + OUT_RING(0); + ADVANCE_LP_RING(); + + return 0; } + static int i915_dispatch_cmdbuffer(drm_device_t * dev, drm_i915_cmdbuffer_t * cmd) { @@ -566,7 +589,9 @@ static int i915_dispatch_flip(drm_device_t * dev) OUT_RING(dev_priv->counter); OUT_RING(0); ADVANCE_LP_RING(); - +#ifdef I915_HAVE_FENCE + drm_fence_flush_old(dev, dev_priv->counter); +#endif dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; return 0; } @@ -680,6 +705,7 @@ static int i915_flip_bufs(DRM_IOCTL_ARGS) return i915_dispatch_flip(dev); } + static int i915_getparam(DRM_IOCTL_ARGS) { DRM_DEVICE; @@ -798,6 +824,7 @@ drm_ioctl_desc_t i915_ioctls[] = { [DRM_IOCTL_NR(DRM_I915_DESTROY_HEAP)] = { i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, [DRM_IOCTL_NR(DRM_I915_SET_VBLANK_PIPE)] = { i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY }, [DRM_IOCTL_NR(DRM_I915_GET_VBLANK_PIPE)] = { i915_vblank_pipe_get, DRM_AUTH }, + [DRM_IOCTL_NR(DRM_I915_VBLANK_SWAP)] = {i915_vblank_swap, DRM_AUTH}, }; int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); diff --git a/shared-core/i915_drm.h b/shared-core/i915_drm.h index fcffb25c..9eec109e 100644 --- a/shared-core/i915_drm.h +++ b/shared-core/i915_drm.h @@ -104,8 +104,27 @@ typedef struct _drm_i915_sarea { unsigned int depth_tiled; unsigned int rotated_tiled; unsigned int rotated2_tiled; + + int pipeA_x; + int pipeA_y; + int pipeA_w; + int pipeA_h; + int pipeB_x; + int pipeB_y; + int pipeB_w; + int pipeB_h; } drm_i915_sarea_t; +/* Driver specific fence types and classes. + */ + +/* The only fence class we support */ +#define DRM_I915_FENCE_CLASS_ACCEL 0 +/* Fence type that guarantees read-write flush */ +#define DRM_I915_FENCE_TYPE_RW 2 +/* MI_FLUSH programmed just before the fence */ +#define DRM_I915_FENCE_FLAG_FLUSHED 0x01000000 + /* Flags for perf_boxes */ #define I915_BOX_RING_EMPTY 0x1 @@ -132,6 +151,7 @@ typedef struct _drm_i915_sarea { #define DRM_I915_DESTROY_HEAP 0x0c #define DRM_I915_SET_VBLANK_PIPE 0x0d #define DRM_I915_GET_VBLANK_PIPE 0x0e +#define DRM_I915_VBLANK_SWAP 0x0f #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) @@ -148,6 +168,7 @@ typedef struct _drm_i915_sarea { #define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t) #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) +#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) /* Allow drivers to submit batchbuffers directly to hardware, relying @@ -244,4 +265,12 @@ typedef struct drm_i915_vblank_pipe { int pipe; } drm_i915_vblank_pipe_t; +/* Schedule buffer swap at given vertical blank: + */ +typedef struct drm_i915_vblank_swap { + drm_drawable_t drawable; + drm_vblank_seq_type_t seqtype; + unsigned int sequence; +} drm_i915_vblank_swap_t; + #endif /* _I915_DRM_H_ */ diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index a87075b1..85804ce7 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -35,9 +35,9 @@ #define DRIVER_AUTHOR "Tungsten Graphics, Inc." -#define DRIVER_NAME "i915" +#define DRIVER_NAME "i915-mm" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20060119" +#define DRIVER_DATE "20060929" /* Interface history: * @@ -46,11 +46,18 @@ * 1.3: Add vblank support * 1.4: Fix cmdbuffer path, add heap destroy * 1.5: Add vblank pipe configuration + * 1.6: - New ioctl for scheduling buffer swaps on vertical blank + * - Support vertical blank on secondary display pipe */ #define DRIVER_MAJOR 1 -#define DRIVER_MINOR 5 +#define DRIVER_MINOR 7 #define DRIVER_PATCHLEVEL 0 +#if defined(__linux__) +#define I915_HAVE_FENCE +#define I915_HAVE_BUFFER +#endif + typedef struct _drm_i915_ring_buffer { int tail_mask; unsigned long Start; @@ -71,6 +78,13 @@ struct mem_block { DRMFILE filp; /* 0: free, -1: heap, other: real files */ }; +typedef struct _drm_i915_vbl_swap { + struct list_head head; + drm_drawable_t drw_id; + unsigned int pipe; + unsigned int sequence; +} drm_i915_vbl_swap_t; + typedef struct drm_i915_private { drm_local_map_t *sarea; drm_local_map_t *mmio_map; @@ -81,8 +95,9 @@ typedef struct drm_i915_private { drm_dma_handle_t *status_page_dmah; void *hw_status_page; dma_addr_t dma_status_page; - unsigned long counter; + uint32_t counter; + unsigned int cpp; int back_offset; int front_offset; int current_page; @@ -98,6 +113,22 @@ typedef struct drm_i915_private { struct mem_block *agp_heap; unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; int vblank_pipe; + spinlock_t user_irq_lock; + int user_irq_refcount; + int fence_irq_on; + uint32_t irq_enable_reg; + int irq_enabled; + +#ifdef I915_HAVE_FENCE + uint32_t flush_sequence; + uint32_t flush_flags; + uint32_t flush_pending; + uint32_t saved_flush_status; +#endif + + spinlock_t swaps_lock; + drm_i915_vbl_swap_t vbl_swaps; + unsigned int swaps_pending; } drm_i915_private_t; extern drm_ioctl_desc_t i915_ioctls[]; @@ -111,18 +142,25 @@ extern void i915_driver_preclose(drm_device_t * dev, DRMFILE filp); extern int i915_driver_device_is_agp(drm_device_t * dev); extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); +extern int i915_emit_mi_flush(drm_device_t *dev, uint32_t flush); + /* i915_irq.c */ extern int i915_irq_emit(DRM_IOCTL_ARGS); extern int i915_irq_wait(DRM_IOCTL_ARGS); extern int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence); +extern int i915_driver_vblank_wait2(drm_device_t *dev, unsigned int *sequence); extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); extern void i915_driver_irq_preinstall(drm_device_t * dev); extern void i915_driver_irq_postinstall(drm_device_t * dev); extern void i915_driver_irq_uninstall(drm_device_t * dev); extern int i915_vblank_pipe_set(DRM_IOCTL_ARGS); extern int i915_vblank_pipe_get(DRM_IOCTL_ARGS); +extern int i915_emit_irq(drm_device_t * dev); +extern void i915_user_irq_on(drm_i915_private_t *dev_priv); +extern void i915_user_irq_off(drm_i915_private_t *dev_priv); +extern int i915_vblank_swap(DRM_IOCTL_ARGS); /* i915_mem.c */ extern int i915_mem_alloc(DRM_IOCTL_ARGS); @@ -132,6 +170,23 @@ extern int i915_mem_destroy_heap(DRM_IOCTL_ARGS); extern void i915_mem_takedown(struct mem_block **heap); extern void i915_mem_release(drm_device_t * dev, DRMFILE filp, struct mem_block *heap); +#ifdef I915_HAVE_FENCE +/* i915_fence.c */ + + +extern void i915_fence_handler(drm_device_t *dev); +extern int i915_fence_emit_sequence(drm_device_t *dev, uint32_t flags, + uint32_t *sequence, + uint32_t *native_type); +extern void i915_poke_flush(drm_device_t *dev); +#endif + +#ifdef I915_HAVE_BUFFER +/* i915_buffer.c */ +extern drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t *dev); +extern int i915_fence_types(uint32_t buffer_flags, uint32_t *class, uint32_t *type); +extern int i915_invalidate_caches(drm_device_t *dev, uint32_t buffer_flags); +#endif #define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg)) #define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val)) @@ -182,6 +237,11 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller); #define INST_OP_FLUSH 0x02000000 #define INST_FLUSH_MAP_CACHE 0x00000001 +#define CMD_MI_FLUSH (0x04 << 23) +#define MI_NO_WRITE_FLUSH (1 << 2) +#define MI_READ_FLUSH (1 << 0) +#define MI_EXE_FLUSH (1 << 1) + #define BB1_START_ADDR_MASK (~0x7) #define BB1_PROTECTED (1<<0) #define BB1_UNPROTECTED (0<<0) @@ -191,6 +251,7 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller); #define I915REG_INT_IDENTITY_R 0x020a4 #define I915REG_INT_MASK_R 0x020a8 #define I915REG_INT_ENABLE_R 0x020a0 +#define I915REG_INSTPM 0x020c0 #define SRX_INDEX 0x3c4 #define SRX_DATA 0x3c5 @@ -256,6 +317,10 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller); #define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) +#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) +#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21) +#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20) + #define MI_BATCH_BUFFER ((0x30<<23)|1) #define MI_BATCH_BUFFER_START (0x31<<23) #define MI_BATCH_BUFFER_END (0xA<<23) @@ -272,6 +337,6 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller); #define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) -#define READ_BREADCRUMB(dev_priv) (((u32*)(dev_priv->hw_status_page))[5]) - +#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5]) +#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) #endif diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c index 14213b58..a48e1ff8 100644 --- a/shared-core/i915_irq.c +++ b/shared-core/i915_irq.c @@ -37,6 +37,99 @@ #define MAX_NOPID ((u32)~0) +/** + * Emit blits for scheduled buffer swaps. + * + * This function will be called with the HW lock held. + */ +static void i915_vblank_tasklet(drm_device_t *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + unsigned long irqflags; + struct list_head *list, *tmp; + + DRM_DEBUG("\n"); + + spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); + + list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) { + drm_i915_vbl_swap_t *vbl_swap = + list_entry(list, drm_i915_vbl_swap_t, head); + atomic_t *counter = vbl_swap->pipe ? &dev->vbl_received2 : + &dev->vbl_received; + + if ((atomic_read(counter) - vbl_swap->sequence) <= (1<<23)) { + drm_drawable_info_t *drw; + + spin_unlock(&dev_priv->swaps_lock); + + spin_lock(&dev->drw_lock); + + drw = drm_get_drawable_info(dev, vbl_swap->drw_id); + + if (drw) { + int i, num_rects = drw->num_rects; + drm_clip_rect_t *rect = drw->rects; + drm_i915_sarea_t *sarea_priv = + dev_priv->sarea_priv; + u32 cpp = dev_priv->cpp; + u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD | + XY_SRC_COPY_BLT_WRITE_ALPHA | + XY_SRC_COPY_BLT_WRITE_RGB) + : XY_SRC_COPY_BLT_CMD; + u32 pitchropcpp = (sarea_priv->pitch * cpp) | + (0xcc << 16) | (cpp << 23) | + (1 << 24); + RING_LOCALS; + + i915_kernel_lost_context(dev); + + BEGIN_LP_RING(6); + + OUT_RING(GFX_OP_DRAWRECT_INFO); + OUT_RING(0); + OUT_RING(0); + OUT_RING(sarea_priv->width | + sarea_priv->height << 16); + OUT_RING(sarea_priv->width | + sarea_priv->height << 16); + OUT_RING(0); + + ADVANCE_LP_RING(); + + sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT; + + for (i = 0; i < num_rects; i++, rect++) { + BEGIN_LP_RING(8); + + OUT_RING(cmd); + OUT_RING(pitchropcpp); + OUT_RING((rect->y1 << 16) | rect->x1); + OUT_RING((rect->y2 << 16) | rect->x2); + OUT_RING(sarea_priv->front_offset); + OUT_RING((rect->y1 << 16) | rect->x1); + OUT_RING(pitchropcpp & 0xffff); + OUT_RING(sarea_priv->back_offset); + + ADVANCE_LP_RING(); + } + } + + spin_unlock(&dev->drw_lock); + + spin_lock(&dev_priv->swaps_lock); + + list_del(list); + + drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER); + + dev_priv->swaps_pending--; + } + } + + spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); +} + irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) { drm_device_t *dev = (drm_device_t *) arg; @@ -45,10 +138,11 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) temp = I915_READ16(I915REG_INT_IDENTITY_R); - temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG); + temp &= (dev_priv->irq_enable_reg | USER_INT_FLAG); +#if 0 DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp); - +#endif if (temp == 0) return IRQ_NONE; @@ -56,19 +150,40 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); - if (temp & USER_INT_FLAG) + if (temp & USER_INT_FLAG) { DRM_WAKEUP(&dev_priv->irq_queue); +#ifdef I915_HAVE_FENCE + i915_fence_handler(dev); +#endif + } if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) { - atomic_inc(&dev->vbl_received); + int vblank_pipe = dev_priv->vblank_pipe; + + if ((vblank_pipe & + (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) + == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) { + if (temp & VSYNC_PIPEA_FLAG) + atomic_inc(&dev->vbl_received); + if (temp & VSYNC_PIPEB_FLAG) + atomic_inc(&dev->vbl_received2); + } else if (((temp & VSYNC_PIPEA_FLAG) && + (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) || + ((temp & VSYNC_PIPEB_FLAG) && + (vblank_pipe & DRM_I915_VBLANK_PIPE_B))) + atomic_inc(&dev->vbl_received); + DRM_WAKEUP(&dev->vbl_queue); drm_vbl_send_signals(dev); + + if (dev_priv->swaps_pending > 0) + drm_locked_tasklet(dev, i915_vblank_tasklet); } return IRQ_HANDLED; } -static int i915_emit_irq(drm_device_t * dev) +int i915_emit_irq(drm_device_t * dev) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -98,6 +213,28 @@ static int i915_emit_irq(drm_device_t * dev) } +void i915_user_irq_on(drm_i915_private_t *dev_priv) +{ + spin_lock(&dev_priv->user_irq_lock); + if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1)){ + dev_priv->irq_enable_reg |= USER_INT_FLAG; + I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg); + } + spin_unlock(&dev_priv->user_irq_lock); + +} + +void i915_user_irq_off(drm_i915_private_t *dev_priv) +{ + spin_lock(&dev_priv->user_irq_lock); + if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { + // dev_priv->irq_enable_reg &= ~USER_INT_FLAG; + // I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg); + } + spin_unlock(&dev_priv->user_irq_lock); +} + + static int i915_wait_irq(drm_device_t * dev, int irq_nr) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -110,9 +247,11 @@ static int i915_wait_irq(drm_device_t * dev, int irq_nr) return 0; dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; - + + i915_user_irq_on(dev_priv); DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, READ_BREADCRUMB(dev_priv) >= irq_nr); + i915_user_irq_off(dev_priv); if (ret == DRM_ERR(EBUSY)) { DRM_ERROR("%s: EBUSY -- rec: %d emitted: %d\n", @@ -124,7 +263,8 @@ static int i915_wait_irq(drm_device_t * dev, int irq_nr) return ret; } -int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence) +static int i915_driver_vblank_do_wait(drm_device_t *dev, unsigned int *sequence, + atomic_t *counter) { drm_i915_private_t *dev_priv = dev->dev_private; unsigned int cur_vblank; @@ -136,7 +276,7 @@ int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence) } DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, - (((cur_vblank = atomic_read(&dev->vbl_received)) + (((cur_vblank = atomic_read(counter)) - *sequence) <= (1<<23))); *sequence = cur_vblank; @@ -144,6 +284,16 @@ int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence) return ret; } +int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence) +{ + return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received); +} + +int i915_driver_vblank_wait2(drm_device_t *dev, unsigned int *sequence) +{ + return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2); +} + /* Needs the lock as it touches the ring. */ int i915_irq_emit(DRM_IOCTL_ARGS) @@ -192,23 +342,18 @@ int i915_irq_wait(DRM_IOCTL_ARGS) return i915_wait_irq(dev, irqwait.irq_seq); } -static int i915_enable_interrupt (drm_device_t *dev) +static void i915_enable_interrupt (drm_device_t *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - u16 flag; - flag = 0; + dev_priv->irq_enable_reg = USER_INT_FLAG; if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A) - flag |= VSYNC_PIPEA_FLAG; + dev_priv->irq_enable_reg |= VSYNC_PIPEA_FLAG; if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B) - flag |= VSYNC_PIPEB_FLAG; - if (dev_priv->vblank_pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) { - DRM_ERROR("%s called with invalid pipe 0x%x\n", - __FUNCTION__, dev_priv->vblank_pipe); - return DRM_ERR(EINVAL); - } - I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | flag); - return 0; + dev_priv->irq_enable_reg |= VSYNC_PIPEB_FLAG; + + I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg); + dev_priv->irq_enabled = 1; } /* Set the vblank monitor pipe @@ -227,8 +372,17 @@ int i915_vblank_pipe_set(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(pipe, (drm_i915_vblank_pipe_t __user *) data, sizeof(pipe)); + if (pipe.pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) { + DRM_ERROR("%s called with invalid pipe 0x%x\n", + __FUNCTION__, pipe.pipe); + return DRM_ERR(EINVAL); + } + dev_priv->vblank_pipe = pipe.pipe; - return i915_enable_interrupt (dev); + + i915_enable_interrupt (dev); + + return 0; } int i915_vblank_pipe_get(DRM_IOCTL_ARGS) @@ -254,13 +408,125 @@ int i915_vblank_pipe_get(DRM_IOCTL_ARGS) return 0; } +/** + * Schedule buffer swap at given vertical blank. + */ +int i915_vblank_swap(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + drm_i915_private_t *dev_priv = dev->dev_private; + drm_i915_vblank_swap_t swap; + drm_i915_vbl_swap_t *vbl_swap; + unsigned int pipe, seqtype, curseq; + unsigned long irqflags; + struct list_head *list; + + if (!dev_priv) { + DRM_ERROR("%s called with no initialization\n", __func__); + return DRM_ERR(EINVAL); + } + + if (dev_priv->sarea_priv->rotation) { + DRM_DEBUG("Rotation not supported\n"); + return DRM_ERR(EINVAL); + } + + DRM_COPY_FROM_USER_IOCTL(swap, (drm_i915_vblank_swap_t __user *) data, + sizeof(swap)); + + if (swap.seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE | + _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) { + DRM_ERROR("Invalid sequence type 0x%x\n", swap.seqtype); + return DRM_ERR(EINVAL); + } + + pipe = (swap.seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0; + + seqtype = swap.seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE); + + if (!(dev_priv->vblank_pipe & (1 << pipe))) { + DRM_ERROR("Invalid pipe %d\n", pipe); + return DRM_ERR(EINVAL); + } + + spin_lock_irqsave(&dev->drw_lock, irqflags); + + if (!drm_get_drawable_info(dev, swap.drawable)) { + spin_unlock_irqrestore(&dev->drw_lock, irqflags); + DRM_ERROR("Invalid drawable ID %d\n", swap.drawable); + return DRM_ERR(EINVAL); + } + + spin_unlock_irqrestore(&dev->drw_lock, irqflags); + + curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received); + + if (seqtype == _DRM_VBLANK_RELATIVE) + swap.sequence += curseq; + + if ((curseq - swap.sequence) <= (1<<23)) { + if (swap.seqtype & _DRM_VBLANK_NEXTONMISS) { + swap.sequence = curseq + 1; + } else { + DRM_DEBUG("Missed target sequence\n"); + return DRM_ERR(EINVAL); + } + } + + spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); + + list_for_each(list, &dev_priv->vbl_swaps.head) { + vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head); + + if (vbl_swap->drw_id == swap.drawable && + vbl_swap->pipe == pipe && + vbl_swap->sequence == swap.sequence) { + spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); + DRM_DEBUG("Already scheduled\n"); + return 0; + } + } + + spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); + + if (dev_priv->swaps_pending >= 100) { + DRM_DEBUG("Too many swaps queued\n"); + return DRM_ERR(EBUSY); + } + + vbl_swap = drm_calloc(1, sizeof(vbl_swap), DRM_MEM_DRIVER); + + if (!vbl_swap) { + DRM_ERROR("Failed to allocate memory to queue swap\n"); + return DRM_ERR(ENOMEM); + } + + DRM_DEBUG("\n"); + + vbl_swap->drw_id = swap.drawable; + vbl_swap->pipe = pipe; + vbl_swap->sequence = swap.sequence; + + spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); + + list_add_tail((struct list_head *)vbl_swap, &dev_priv->vbl_swaps.head); + dev_priv->swaps_pending++; + + spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); + + DRM_COPY_TO_USER_IOCTL((drm_i915_vblank_swap_t __user *) data, swap, + sizeof(swap)); + + return 0; +} + /* drm_dma.h hooks */ void i915_driver_irq_preinstall(drm_device_t * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - I915_WRITE16(I915REG_HWSTAM, 0xfffe); + I915_WRITE16(I915REG_HWSTAM, 0xeffe); I915_WRITE16(I915REG_INT_MASK_R, 0x0); I915_WRITE16(I915REG_INT_ENABLE_R, 0x0); } @@ -269,8 +535,30 @@ void i915_driver_irq_postinstall(drm_device_t * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + dev_priv->swaps_lock = SPIN_LOCK_UNLOCKED; + INIT_LIST_HEAD(&dev_priv->vbl_swaps.head); + dev_priv->swaps_pending = 0; + + if (!dev_priv->vblank_pipe) + dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A; + + dev_priv->swaps_lock = SPIN_LOCK_UNLOCKED; + INIT_LIST_HEAD(&dev_priv->vbl_swaps.head); + dev_priv->swaps_pending = 0; + + dev_priv->user_irq_lock = SPIN_LOCK_UNLOCKED; + dev_priv->user_irq_refcount = 0; + + if (!dev_priv->vblank_pipe) + dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A; i915_enable_interrupt(dev); DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); + + /* + * Initialize the hardware status page IRQ location. + */ + + I915_WRITE(I915REG_INSTPM, ( 1 << 5) | ( 1 << 21)); } void i915_driver_irq_uninstall(drm_device_t * dev) @@ -280,6 +568,7 @@ void i915_driver_irq_uninstall(drm_device_t * dev) if (!dev_priv) return; + dev_priv->irq_enabled = 0; I915_WRITE16(I915REG_HWSTAM, 0xffff); I915_WRITE16(I915REG_INT_MASK_R, 0xffff); I915_WRITE16(I915REG_INT_ENABLE_R, 0x0); diff --git a/shared-core/mach64_dma.c b/shared-core/mach64_dma.c index 4c8edeab..3a5fdee8 100644 --- a/shared-core/mach64_dma.c +++ b/shared-core/mach64_dma.c @@ -815,17 +815,18 @@ static int mach64_do_dma_init(drm_device_t * dev, drm_mach64_init_t * init) return DRM_ERR(EINVAL); } + dev_priv->ring_map = drm_core_findmap(dev, init->ring_offset); + if (!dev_priv->ring_map) { + DRM_ERROR("can not find ring map!\n"); + dev->dev_private = (void *)dev_priv; + mach64_do_cleanup_dma(dev); + return DRM_ERR(EINVAL); + } + dev_priv->sarea_priv = (drm_mach64_sarea_t *) ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset); if (!dev_priv->is_pci) { - dev_priv->ring_map = drm_core_findmap(dev, init->ring_offset); - if (!dev_priv->ring_map) { - DRM_ERROR("can not find ring map!\n"); - dev->dev_private = (void *)dev_priv; - mach64_do_cleanup_dma(dev); - return DRM_ERR(EINVAL); - } drm_core_ioremap(dev_priv->ring_map, dev); if (!dev_priv->ring_map->handle) { DRM_ERROR("can not ioremap virtual address for" @@ -834,6 +835,7 @@ static int mach64_do_dma_init(drm_device_t * dev, drm_mach64_init_t * init) mach64_do_cleanup_dma(dev); return DRM_ERR(ENOMEM); } + dev->agp_buffer_token = init->buffers_offset; dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); if (!dev->agp_buffer_map) { @@ -890,27 +892,9 @@ static int mach64_do_dma_init(drm_device_t * dev, drm_mach64_init_t * init) } } - /* allocate descriptor memory from pci pool */ - DRM_DEBUG("Allocating dma descriptor ring\n"); dev_priv->ring.size = 0x4000; /* 16KB */ - - if (dev_priv->is_pci) { - dev_priv->ring.dmah = drm_pci_alloc(dev, dev_priv->ring.size, - dev_priv->ring.size, - 0xfffffffful); - - if (!dev_priv->ring.dmah) { - DRM_ERROR("Allocating dma descriptor ring failed\n"); - return DRM_ERR(ENOMEM); - } else { - dev_priv->ring.start = dev_priv->ring.dmah->vaddr; - dev_priv->ring.start_addr = - (u32) dev_priv->ring.dmah->busaddr; - } - } else { - dev_priv->ring.start = dev_priv->ring_map->handle; - dev_priv->ring.start_addr = (u32) dev_priv->ring_map->offset; - } + dev_priv->ring.start = dev_priv->ring_map->handle; + dev_priv->ring.start_addr = (u32) dev_priv->ring_map->offset; memset(dev_priv->ring.start, 0, dev_priv->ring.size); DRM_INFO("descriptor ring: cpu addr %p, bus addr: 0x%08x\n", @@ -1148,18 +1132,14 @@ int mach64_do_cleanup_dma(drm_device_t * dev) if (dev->dev_private) { drm_mach64_private_t *dev_priv = dev->dev_private; - if (dev_priv->is_pci) { - if (dev_priv->ring.dmah) { - drm_pci_free(dev, dev_priv->ring.dmah); - } - } else { + if (!dev_priv->is_pci) { if (dev_priv->ring_map) drm_core_ioremapfree(dev_priv->ring_map, dev); - } - if (dev->agp_buffer_map) { - drm_core_ioremapfree(dev->agp_buffer_map, dev); - dev->agp_buffer_map = NULL; + if (dev->agp_buffer_map) { + drm_core_ioremapfree(dev->agp_buffer_map, dev); + dev->agp_buffer_map = NULL; + } } mach64_destroy_freelist(dev); @@ -1328,17 +1308,88 @@ int mach64_do_release_used_buffers(drm_mach64_private_t * dev_priv) return 0; } +static int mach64_do_reclaim_completed(drm_mach64_private_t * dev_priv) +{ + drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; + struct list_head *ptr; + struct list_head *tmp; + drm_mach64_freelist_t *entry; + u32 head, tail, ofs; + + mach64_ring_tick(dev_priv, ring); + head = ring->head; + tail = ring->tail; + + if (head == tail) { +#if MACH64_EXTRA_CHECKING + if (MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE) { + DRM_ERROR("Empty ring with non-idle engine!\n"); + mach64_dump_ring_info(dev_priv); + return -1; + } +#endif + /* last pass is complete, so release everything */ + mach64_do_release_used_buffers(dev_priv); + DRM_DEBUG("%s: idle engine, freed all buffers.\n", + __FUNCTION__); + if (list_empty(&dev_priv->free_list)) { + DRM_ERROR("Freelist empty with idle engine\n"); + return -1; + } + return 0; + } + /* Look for a completed buffer and bail out of the loop + * as soon as we find one -- don't waste time trying + * to free extra bufs here, leave that to do_release_used_buffers + */ + list_for_each_safe(ptr, tmp, &dev_priv->pending) { + entry = list_entry(ptr, drm_mach64_freelist_t, list); + ofs = entry->ring_ofs; + if (entry->discard && + ((head < tail && (ofs < head || ofs >= tail)) || + (head > tail && (ofs < head && ofs >= tail)))) { +#if MACH64_EXTRA_CHECKING + int i; + + for (i = head; i != tail; i = (i + 4) & ring->tail_mask) + { + u32 o1 = le32_to_cpu(((u32 *) ring-> + start)[i + 1]); + u32 o2 = GETBUFADDR(entry->buf); + + if (o1 == o2) { + DRM_ERROR + ("Attempting to free used buffer: " + "i=%d buf=0x%08x\n", + i, o1); + mach64_dump_ring_info(dev_priv); + return -1; + } + } +#endif + /* found a processed buffer */ + entry->buf->pending = 0; + list_del(ptr); + list_add_tail(ptr, &dev_priv->free_list); + DRM_DEBUG + ("%s: freed processed buffer (head=%d tail=%d " + "buf ring ofs=%d).\n", + __FUNCTION__, head, tail, ofs); + return 0; + } + } + + return 1; +} + drm_buf_t *mach64_freelist_get(drm_mach64_private_t * dev_priv) { drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; drm_mach64_freelist_t *entry; struct list_head *ptr; - struct list_head *tmp; int t; if (list_empty(&dev_priv->free_list)) { - u32 head, tail, ofs; - if (list_empty(&dev_priv->pending)) { DRM_ERROR ("Couldn't get buffer - pending and free lists empty\n"); @@ -1350,81 +1401,15 @@ drm_buf_t *mach64_freelist_get(drm_mach64_private_t * dev_priv) return NULL; } - tail = ring->tail; for (t = 0; t < dev_priv->usec_timeout; t++) { - mach64_ring_tick(dev_priv, ring); - head = ring->head; + int ret; - if (head == tail) { -#if MACH64_EXTRA_CHECKING - if (MACH64_READ(MACH64_GUI_STAT) & - MACH64_GUI_ACTIVE) { - DRM_ERROR - ("Empty ring with non-idle engine!\n"); - mach64_dump_ring_info(dev_priv); - return NULL; - } -#endif - /* last pass is complete, so release everything */ - mach64_do_release_used_buffers(dev_priv); - DRM_DEBUG - ("%s: idle engine, freed all buffers.\n", - __FUNCTION__); - if (list_empty(&dev_priv->free_list)) { - DRM_ERROR - ("Freelist empty with idle engine\n"); - return NULL; - } + ret = mach64_do_reclaim_completed(dev_priv); + if (ret == 0) goto _freelist_entry_found; - } - /* Look for a completed buffer and bail out of the loop - * as soon as we find one -- don't waste time trying - * to free extra bufs here, leave that to do_release_used_buffers - */ - list_for_each_safe(ptr, tmp, &dev_priv->pending) { - entry = - list_entry(ptr, drm_mach64_freelist_t, - list); - ofs = entry->ring_ofs; - if (entry->discard && - ((head < tail - && (ofs < head || ofs >= tail)) - || (head > tail - && (ofs < head && ofs >= tail)))) { -#if MACH64_EXTRA_CHECKING - int i; - - for (i = head; i != tail; - i = (i + 4) & ring->tail_mask) { - u32 o1 = - le32_to_cpu(((u32 *) ring-> - start)[i + 1]); - u32 o2 = GETBUFADDR(entry->buf); - - if (o1 == o2) { - DRM_ERROR - ("Attempting to free used buffer: " - "i=%d buf=0x%08x\n", - i, o1); - mach64_dump_ring_info - (dev_priv); - return NULL; - } - } -#endif - /* found a processed buffer */ - entry->buf->pending = 0; - list_del(ptr); - entry->buf->used = 0; - list_add_tail(ptr, - &dev_priv->placeholders); - DRM_DEBUG - ("%s: freed processed buffer (head=%d tail=%d " - "buf ring ofs=%d).\n", - __FUNCTION__, head, tail, ofs); - return entry->buf; - } - } + if (ret < 0) + return NULL; + DRM_UDELAY(1); } mach64_dump_ring_info(dev_priv); @@ -1443,6 +1428,33 @@ drm_buf_t *mach64_freelist_get(drm_mach64_private_t * dev_priv) return entry->buf; } +int mach64_freelist_put(drm_mach64_private_t * dev_priv, drm_buf_t * copy_buf) +{ + struct list_head *ptr; + drm_mach64_freelist_t *entry; + +#if MACH64_EXTRA_CHECKING + list_for_each(ptr, &dev_priv->pending) { + entry = list_entry(ptr, drm_mach64_freelist_t, list); + if (copy_buf == entry->buf) { + DRM_ERROR("%s: Trying to release a pending buf\n", + __FUNCTION__); + return DRM_ERR(EFAULT); + } + } +#endif + ptr = dev_priv->placeholders.next; + entry = list_entry(ptr, drm_mach64_freelist_t, list); + copy_buf->pending = 0; + copy_buf->used = 0; + entry->buf = copy_buf; + entry->discard = 1; + list_del(ptr); + list_add_tail(ptr, &dev_priv->free_list); + + return 0; +} + /*@}*/ diff --git a/shared-core/mach64_drm.h b/shared-core/mach64_drm.h index d92ece2c..083f959d 100644 --- a/shared-core/mach64_drm.h +++ b/shared-core/mach64_drm.h @@ -68,7 +68,7 @@ /* Max number of swaps allowed on the ring * before the client must wait */ -#define MACH64_MAX_QUEUED_FRAMES 3 +#define MACH64_MAX_QUEUED_FRAMES 3U /* Byte offsets for host blit buffer data */ @@ -237,7 +237,7 @@ typedef struct drm_mach64_vertex { } drm_mach64_vertex_t; typedef struct drm_mach64_blit { - int idx; + void *buf; int pitch; int offset; int format; diff --git a/shared-core/mach64_drv.h b/shared-core/mach64_drv.h index 2a5f2ff9..bb8b309e 100644 --- a/shared-core/mach64_drv.h +++ b/shared-core/mach64_drv.h @@ -42,9 +42,9 @@ #define DRIVER_NAME "mach64" #define DRIVER_DESC "DRM module for the ATI Rage Pro" -#define DRIVER_DATE "20020904" +#define DRIVER_DATE "20060718" -#define DRIVER_MAJOR 1 +#define DRIVER_MAJOR 2 #define DRIVER_MINOR 0 #define DRIVER_PATCHLEVEL 0 @@ -61,7 +61,6 @@ typedef struct drm_mach64_freelist { } drm_mach64_freelist_t; typedef struct drm_mach64_descriptor_ring { - drm_dma_handle_t *dmah; /* Handle to pci dma memory */ void *start; /* write pointer (cpu address) to start of descriptor ring */ u32 start_addr; /* bus address of beginning of descriptor ring */ int size; /* size of ring in bytes */ @@ -123,6 +122,8 @@ extern void mach64_driver_lastclose(drm_device_t * dev); extern int mach64_init_freelist(drm_device_t * dev); extern void mach64_destroy_freelist(drm_device_t * dev); extern drm_buf_t *mach64_freelist_get(drm_mach64_private_t * dev_priv); +extern int mach64_freelist_put(drm_mach64_private_t * dev_priv, + drm_buf_t * copy_buf); extern int mach64_do_wait_for_fifo(drm_mach64_private_t * dev_priv, int entries); diff --git a/shared-core/mach64_state.c b/shared-core/mach64_state.c index ce44f0d5..38cefca9 100644 --- a/shared-core/mach64_state.c +++ b/shared-core/mach64_state.c @@ -480,16 +480,16 @@ static int mach64_do_get_frames_queued(drm_mach64_private_t * dev_priv) /* Copy and verify a client submited buffer. * FIXME: Make an assembly optimized version */ -static __inline__ int copy_and_verify_from_user(u32 *to, - const u32 __user *ufrom, - unsigned long bytes) +static __inline__ int copy_from_user_vertex(u32 *to, + const u32 __user *ufrom, + unsigned long bytes) { unsigned long n = bytes; /* dwords remaining in buffer */ u32 *from, *orig_from; from = drm_alloc(bytes, DRM_MEM_DRIVER); if (from == NULL) - return ENOMEM; + return DRM_ERR(ENOMEM); if (DRM_COPY_FROM_USER(from, ufrom, bytes)) { drm_free(from, bytes, DRM_MEM_DRIVER); @@ -546,12 +546,15 @@ static __inline__ int copy_and_verify_from_user(u32 *to, } static int mach64_dma_dispatch_vertex(DRMFILE filp, drm_device_t * dev, - int prim, void *buf, unsigned long used, - int discard) + drm_mach64_vertex_t * vertex) { drm_mach64_private_t *dev_priv = dev->dev_private; drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_buf_t *copy_buf; + void *buf = vertex->buf; + unsigned long used = vertex->used; + int ret = 0; + int i = 0; int done = 0; int verify_ret = 0; DMALOCALS; @@ -559,100 +562,92 @@ static int mach64_dma_dispatch_vertex(DRMFILE filp, drm_device_t * dev, DRM_DEBUG("%s: buf=%p used=%lu nbox=%d\n", __FUNCTION__, buf, used, sarea_priv->nbox); - if (used) { - int ret = 0; - int i = 0; + if (!used) + goto _vertex_done; - copy_buf = mach64_freelist_get(dev_priv); - if (copy_buf == NULL) { - DRM_ERROR("%s: couldn't get buffer in DMAGETPTR\n", - __FUNCTION__); - return DRM_ERR(EAGAIN); - } + copy_buf = mach64_freelist_get(dev_priv); + if (copy_buf == NULL) { + DRM_ERROR("%s: couldn't get buffer\n", __FUNCTION__); + return DRM_ERR(EAGAIN); + } + + verify_ret = copy_from_user_vertex(GETBUFPTR(copy_buf), buf, used); - if ((verify_ret = - copy_and_verify_from_user(GETBUFPTR(copy_buf), buf, - used)) == 0) { + if (verify_ret != 0) { + mach64_freelist_put(dev_priv, copy_buf); + goto _vertex_done; + } - copy_buf->used = used; + copy_buf->used = used; - DMASETPTR(copy_buf); + DMASETPTR(copy_buf); - if (sarea_priv->dirty & ~MACH64_UPLOAD_CLIPRECTS) { - ret = mach64_emit_state(filp, dev_priv); - if (ret < 0) - return ret; - } + if (sarea_priv->dirty & ~MACH64_UPLOAD_CLIPRECTS) { + ret = mach64_emit_state(filp, dev_priv); + if (ret < 0) + return ret; + } - do { - /* Emit the next cliprect */ - if (i < sarea_priv->nbox) { - ret = - mach64_emit_cliprect(filp, dev_priv, - &sarea_priv-> - boxes[i]); - if (ret < 0) { - /* failed to get buffer */ - return ret; - } else if (ret != 0) { - /* null intersection with scissor */ - continue; - } - } - if ((i >= sarea_priv->nbox - 1)) - done = 1; - - /* Add the buffer to the DMA queue */ - DMAADVANCE(dev_priv, done); - - } while (++i < sarea_priv->nbox); + do { + /* Emit the next cliprect */ + if (i < sarea_priv->nbox) { + ret = mach64_emit_cliprect(filp, dev_priv, + &sarea_priv->boxes[i]); + if (ret < 0) { + /* failed to get buffer */ + return ret; + } else if (ret != 0) { + /* null intersection with scissor */ + continue; + } } + if ((i >= sarea_priv->nbox - 1)) + done = 1; + + /* Add the buffer to the DMA queue */ + DMAADVANCE(dev_priv, done); + + } while (++i < sarea_priv->nbox); - if (copy_buf->pending && !done) { + if (!done) { + if (copy_buf->pending) { DMADISCARDBUF(); - } else if (!done) { - /* This buffer wasn't used (no cliprects or verify failed), so place it back - * on the free list + } else { + /* This buffer wasn't used (no cliprects), so place it + * back on the free list */ - struct list_head *ptr; - drm_mach64_freelist_t *entry; -#if MACH64_EXTRA_CHECKING - list_for_each(ptr, &dev_priv->pending) { - entry = - list_entry(ptr, drm_mach64_freelist_t, - list); - if (copy_buf == entry->buf) { - DRM_ERROR - ("%s: Trying to release a pending buf\n", - __FUNCTION__); - return DRM_ERR(EFAULT); - } - } -#endif - ptr = dev_priv->placeholders.next; - entry = list_entry(ptr, drm_mach64_freelist_t, list); - copy_buf->pending = 0; - copy_buf->used = 0; - entry->buf = copy_buf; - entry->discard = 1; - list_del(ptr); - list_add_tail(ptr, &dev_priv->free_list); + mach64_freelist_put(dev_priv, copy_buf); } } +_vertex_done: sarea_priv->dirty &= ~MACH64_UPLOAD_CLIPRECTS; sarea_priv->nbox = 0; return verify_ret; } +static __inline__ int copy_from_user_blit(u32 *to, + const u32 __user *ufrom, + unsigned long bytes) +{ + to = (u32 *)((char *)to + MACH64_HOSTDATA_BLIT_OFFSET); + + if (DRM_COPY_FROM_USER(to, ufrom, bytes)) { + return DRM_ERR(EFAULT); + } + + return 0; +} + static int mach64_dma_dispatch_blit(DRMFILE filp, drm_device_t * dev, drm_mach64_blit_t * blit) { drm_mach64_private_t *dev_priv = dev->dev_private; - drm_device_dma_t *dma = dev->dma; int dword_shift, dwords; - drm_buf_t *buf; + unsigned long used; + drm_buf_t *copy_buf; + int verify_ret = 0; DMALOCALS; /* The compiler won't optimize away a division by a variable, @@ -679,34 +674,34 @@ static int mach64_dma_dispatch_blit(DRMFILE filp, drm_device_t * dev, return DRM_ERR(EINVAL); } - /* Dispatch the blit buffer. - */ - buf = dma->buflist[blit->idx]; - - if (buf->filp != filp) { - DRM_ERROR("process %d (filp %p) using buffer with filp %p\n", - DRM_CURRENTPID, filp, buf->filp); - return DRM_ERR(EINVAL); - } - - if (buf->pending) { - DRM_ERROR("sending pending buffer %d\n", blit->idx); - return DRM_ERR(EINVAL); - } - /* Set buf->used to the bytes of blit data based on the blit dimensions * and verify the size. When the setup is emitted to the buffer with * the DMA* macros below, buf->used is incremented to include the bytes * used for setup as well as the blit data. */ dwords = (blit->width * blit->height) >> dword_shift; - buf->used = dwords << 2; - if (buf->used <= 0 || - buf->used > MACH64_BUFFER_SIZE - MACH64_HOSTDATA_BLIT_OFFSET) { - DRM_ERROR("Invalid blit size: %d bytes\n", buf->used); + used = dwords << 2; + if (used <= 0 || + used > MACH64_BUFFER_SIZE - MACH64_HOSTDATA_BLIT_OFFSET) { + DRM_ERROR("Invalid blit size: %lu bytes\n", used); return DRM_ERR(EINVAL); } + copy_buf = mach64_freelist_get(dev_priv); + if (copy_buf == NULL) { + DRM_ERROR("%s: couldn't get buffer\n", __FUNCTION__); + return DRM_ERR(EAGAIN); + } + + verify_ret = copy_from_user_blit(GETBUFPTR(copy_buf), blit->buf, used); + + if (verify_ret != 0) { + mach64_freelist_put(dev_priv, copy_buf); + goto _blit_done; + } + + copy_buf->used = used; + /* FIXME: Use a last buffer flag and reduce the state emitted for subsequent, * continuation buffers? */ @@ -715,7 +710,7 @@ static int mach64_dma_dispatch_blit(DRMFILE filp, drm_device_t * dev, * a register command every 16 dwords. State setup is added at the start of the * buffer -- the client leaves space for this based on MACH64_HOSTDATA_BLIT_OFFSET */ - DMASETPTR(buf); + DMASETPTR(copy_buf); DMAOUTREG(MACH64_Z_CNTL, 0); DMAOUTREG(MACH64_SCALE_3D_CNTL, 0); @@ -745,12 +740,13 @@ static int mach64_dma_dispatch_blit(DRMFILE filp, drm_device_t * dev, DMAOUTREG(MACH64_DST_X_Y, (blit->y << 16) | blit->x); DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (blit->height << 16) | blit->width); - DRM_DEBUG("%s: %d bytes\n", __FUNCTION__, buf->used); + DRM_DEBUG("%s: %lu bytes\n", __FUNCTION__, used); /* Add the buffer to the queue */ DMAADVANCEHOSTDATA(dev_priv); - return 0; +_blit_done: + return verify_ret; } /* ================================================================ @@ -842,14 +838,12 @@ int mach64_dma_vertex(DRM_IOCTL_ARGS) if (sarea_priv->nbox > MACH64_NR_SAREA_CLIPRECTS) sarea_priv->nbox = MACH64_NR_SAREA_CLIPRECTS; - return mach64_dma_dispatch_vertex(filp, dev, vertex.prim, vertex.buf, - vertex.used, vertex.discard); + return mach64_dma_dispatch_vertex(filp, dev, &vertex); } int mach64_dma_blit(DRM_IOCTL_ARGS) { DRM_DEVICE; - drm_device_dma_t *dma = dev->dma; drm_mach64_private_t *dev_priv = dev->dev_private; drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mach64_blit_t blit; @@ -860,15 +854,6 @@ int mach64_dma_blit(DRM_IOCTL_ARGS) DRM_COPY_FROM_USER_IOCTL(blit, (drm_mach64_blit_t *) data, sizeof(blit)); - DRM_DEBUG("%s: pid=%d index=%d\n", - __FUNCTION__, DRM_CURRENTPID, blit.idx); - - if (blit.idx < 0 || blit.idx >= dma->buf_count) { - DRM_ERROR("buffer index %d (of %d max)\n", - blit.idx, dma->buf_count - 1); - return DRM_ERR(EINVAL); - } - ret = mach64_dma_dispatch_blit(filp, dev, &blit); /* Make sure we restore the 3D state next time. diff --git a/shared-core/r300_cmdbuf.c b/shared-core/r300_cmdbuf.c index dc866823..c65ffd59 100644 --- a/shared-core/r300_cmdbuf.c +++ b/shared-core/r300_cmdbuf.c @@ -538,6 +538,36 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv, return 0; } +static __inline__ int r300_emit_indx_buffer(drm_radeon_private_t *dev_priv, + drm_radeon_kcmd_buffer_t *cmdbuf) +{ + u32 *cmd = (u32 *) cmdbuf->buf; + int count, ret; + RING_LOCALS; + + count=(cmd[0]>>16) & 0x3fff; + + if ((cmd[1] & 0x8000ffff) != 0x80000810) { + DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); + return DRM_ERR(EINVAL); + } + ret = r300_check_offset(dev_priv, cmd[2]); + if (ret) { + DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); + return DRM_ERR(EINVAL); + } + + BEGIN_RING(count+2); + OUT_RING(cmd[0]); + OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1); + ADVANCE_RING(); + + cmdbuf->buf += (count+2)*4; + cmdbuf->bufsz -= (count+2)*4; + + return 0; +} + static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv, drm_radeon_kcmd_buffer_t *cmdbuf) { @@ -578,10 +608,11 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv, case RADEON_CNTL_BITBLT_MULTI: return r300_emit_bitblt_multi(dev_priv, cmdbuf); + case RADEON_CP_INDX_BUFFER: /* DRAW_INDX_2 without INDX_BUFFER seems to lock up the gpu */ + return r300_emit_indx_buffer(dev_priv, cmdbuf); case RADEON_CP_3D_DRAW_IMMD_2: /* triggers drawing using in-packet vertex data */ case RADEON_CP_3D_DRAW_VBUF_2: /* triggers drawing of vertex buffers setup elsewhere */ case RADEON_CP_3D_DRAW_INDX_2: /* triggers drawing using indices to vertex buffer */ - case RADEON_CP_INDX_BUFFER: /* DRAW_INDX_2 without INDX_BUFFER seems to lock up the gpu */ case RADEON_WAIT_FOR_IDLE: case RADEON_CP_NOP: /* these packets are safe */ diff --git a/shared-core/radeon_cp.c b/shared-core/radeon_cp.c index 4e7b2823..4135f4d5 100644 --- a/shared-core/radeon_cp.c +++ b/shared-core/radeon_cp.c @@ -1130,7 +1130,7 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev, | (dev_priv->fb_location >> 16)); #if __OS_HAS_AGP - if (dev_priv->flags & CHIP_IS_AGP) { + if (dev_priv->flags & RADEON_IS_AGP) { RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base); RADEON_WRITE(RADEON_MC_AGP_LOCATION, (((dev_priv->gart_vm_start - 1 + @@ -1158,7 +1158,7 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev, dev_priv->ring.tail = cur_read_ptr; #if __OS_HAS_AGP - if (dev_priv->flags & CHIP_IS_AGP) { + if (dev_priv->flags & RADEON_IS_AGP) { RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, dev_priv->ring_rptr->offset - dev->agp->base + dev_priv->gart_vm_start); @@ -1301,7 +1301,7 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on) { u32 tmp; - if (dev_priv->flags & CHIP_IS_PCIE) { + if (dev_priv->flags & RADEON_IS_PCIE) { radeon_set_pciegart(dev_priv, on); return; } @@ -1339,26 +1339,26 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init) DRM_DEBUG("\n"); /* if we require new memory map but we don't have it fail */ - if ((dev_priv->flags & CHIP_NEW_MEMMAP) && !dev_priv->new_memmap) + if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) { DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n"); radeon_do_cleanup_cp(dev); return DRM_ERR(EINVAL); } - if (init->is_pci && (dev_priv->flags & CHIP_IS_AGP)) + if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) { DRM_DEBUG("Forcing AGP card to PCI mode\n"); - dev_priv->flags &= ~CHIP_IS_AGP; + dev_priv->flags &= ~RADEON_IS_AGP; } - else if (!(dev_priv->flags & (CHIP_IS_AGP | CHIP_IS_PCI | CHIP_IS_PCIE)) + else if (!(dev_priv->flags & (RADEON_IS_AGP | RADEON_IS_PCI | RADEON_IS_PCIE)) && !init->is_pci) { DRM_DEBUG("Restoring AGP flag\n"); - dev_priv->flags |= CHIP_IS_AGP; + dev_priv->flags |= RADEON_IS_AGP; } - if ((!(dev_priv->flags & CHIP_IS_AGP)) && !dev->sg) { + if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) { DRM_ERROR("PCI GART memory not allocated!\n"); radeon_do_cleanup_cp(dev); return DRM_ERR(EINVAL); @@ -1501,7 +1501,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init) init->sarea_priv_offset); #if __OS_HAS_AGP - if (dev_priv->flags & CHIP_IS_AGP) { + if (dev_priv->flags & RADEON_IS_AGP) { drm_core_ioremap(dev_priv->cp_ring, dev); drm_core_ioremap(dev_priv->ring_rptr, dev); drm_core_ioremap(dev->agp_buffer_map, dev); @@ -1560,7 +1560,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init) * align it down. */ #if __OS_HAS_AGP - if (dev_priv->flags & CHIP_IS_AGP) { + if (dev_priv->flags & RADEON_IS_AGP) { base = dev->agp->base; /* Check if valid */ if ((base + dev_priv->gart_size) > dev_priv->fb_location && @@ -1590,7 +1590,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init) } #if __OS_HAS_AGP - if (dev_priv->flags & CHIP_IS_AGP) + if (dev_priv->flags & RADEON_IS_AGP) dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset - dev->agp->base + dev_priv->gart_vm_start); @@ -1616,7 +1616,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init) dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK; #if __OS_HAS_AGP - if (dev_priv->flags & CHIP_IS_AGP) { + if (dev_priv->flags & RADEON_IS_AGP) { /* Turn off PCI GART */ radeon_set_pcigart(dev_priv, 0); } else @@ -1636,7 +1636,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init) dev_priv->gart_info.mapping.handle; dev_priv->gart_info.is_pcie = - !!(dev_priv->flags & CHIP_IS_PCIE); + !!(dev_priv->flags & RADEON_IS_PCIE); dev_priv->gart_info.gart_table_location = DRM_ATI_GART_FB; @@ -1648,7 +1648,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init) DRM_ATI_GART_MAIN; dev_priv->gart_info.addr = NULL; dev_priv->gart_info.bus_addr = 0; - if (dev_priv->flags & CHIP_IS_PCIE) { + if (dev_priv->flags & RADEON_IS_PCIE) { DRM_ERROR ("Cannot use PCI Express without GART in FB memory\n"); radeon_do_cleanup_cp(dev); @@ -1690,7 +1690,7 @@ static int radeon_do_cleanup_cp(drm_device_t * dev) drm_irq_uninstall(dev); #if __OS_HAS_AGP - if (dev_priv->flags & CHIP_IS_AGP) { + if (dev_priv->flags & RADEON_IS_AGP) { if (dev_priv->cp_ring != NULL) { drm_core_ioremapfree(dev_priv->cp_ring, dev); dev_priv->cp_ring = NULL; @@ -1745,7 +1745,7 @@ static int radeon_do_resume_cp(drm_device_t * dev) DRM_DEBUG("Starting radeon_do_resume_cp()\n"); #if __OS_HAS_AGP - if (dev_priv->flags & CHIP_IS_AGP) { + if (dev_priv->flags & RADEON_IS_AGP) { /* Turn off PCI GART */ radeon_set_pcigart(dev_priv, 0); } else @@ -2194,7 +2194,7 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags) dev->dev_private = (void *)dev_priv; dev_priv->flags = flags; - switch (flags & CHIP_FAMILY_MASK) { + switch (flags & RADEON_FAMILY_MASK) { case CHIP_R100: case CHIP_RV200: case CHIP_R200: @@ -2202,7 +2202,7 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags) case CHIP_R350: case CHIP_R420: case CHIP_RV410: - dev_priv->flags |= CHIP_HAS_HIERZ; + dev_priv->flags |= RADEON_HAS_HIERZ; break; default: /* all other chips have no hierarchical z buffer */ @@ -2210,14 +2210,14 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags) } if (drm_device_is_agp(dev)) - dev_priv->flags |= CHIP_IS_AGP; + dev_priv->flags |= RADEON_IS_AGP; else if (drm_device_is_pcie(dev)) - dev_priv->flags |= CHIP_IS_PCIE; + dev_priv->flags |= RADEON_IS_PCIE; else - dev_priv->flags |= CHIP_IS_PCI; + dev_priv->flags |= RADEON_IS_PCI; DRM_DEBUG("%s card detected\n", - ((dev_priv->flags & CHIP_IS_AGP) ? "AGP" : (((dev_priv->flags & CHIP_IS_PCIE) ? "PCIE" : "PCI")))); + ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI")))); return ret; } diff --git a/shared-core/radeon_drv.h b/shared-core/radeon_drv.h index ff75480a..6ea2a175 100644 --- a/shared-core/radeon_drv.h +++ b/shared-core/radeon_drv.h @@ -134,16 +134,16 @@ enum radeon_cp_microcode_version { * Chip flags */ enum radeon_chip_flags { - CHIP_FAMILY_MASK = 0x0000ffffUL, - CHIP_FLAGS_MASK = 0xffff0000UL, - CHIP_IS_MOBILITY = 0x00010000UL, - CHIP_IS_IGP = 0x00020000UL, - CHIP_SINGLE_CRTC = 0x00040000UL, - CHIP_IS_AGP = 0x00080000UL, - CHIP_HAS_HIERZ = 0x00100000UL, - CHIP_IS_PCIE = 0x00200000UL, - CHIP_NEW_MEMMAP = 0x00400000UL, - CHIP_IS_PCI = 0x00800000UL, + RADEON_FAMILY_MASK = 0x0000ffffUL, + RADEON_FLAGS_MASK = 0xffff0000UL, + RADEON_IS_MOBILITY = 0x00010000UL, + RADEON_IS_IGP = 0x00020000UL, + RADEON_SINGLE_CRTC = 0x00040000UL, + RADEON_IS_AGP = 0x00080000UL, + RADEON_HAS_HIERZ = 0x00100000UL, + RADEON_IS_PCIE = 0x00200000UL, + RADEON_NEW_MEMMAP = 0x00400000UL, + RADEON_IS_PCI = 0x00800000UL, }; #define GET_RING_HEAD(dev_priv) (dev_priv->writeback_works ? \ @@ -423,6 +423,8 @@ extern int r300_do_cp_cmdbuf(drm_device_t *dev, DRMFILE filp, #define RADEON_RB3D_COLOROFFSET 0x1c40 #define RADEON_RB3D_COLORPITCH 0x1c48 +#define RADEON_SRC_X_Y 0x1590 + #define RADEON_DP_GUI_MASTER_CNTL 0x146c # define RADEON_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0) # define RADEON_GMC_DST_PITCH_OFFSET_CNTL (1 << 1) @@ -440,6 +442,7 @@ extern int r300_do_cp_cmdbuf(drm_device_t *dev, DRMFILE filp, # define RADEON_ROP3_S 0x00cc0000 # define RADEON_ROP3_P 0x00f00000 #define RADEON_DP_WRITE_MASK 0x16cc +#define RADEON_SRC_PITCH_OFFSET 0x1428 #define RADEON_DST_PITCH_OFFSET 0x142c #define RADEON_DST_PITCH_OFFSET_C 0x1c80 # define RADEON_DST_TILE_LINEAR (0 << 30) @@ -1095,7 +1098,7 @@ do { \ n, __FUNCTION__ ); \ } \ if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \ - COMMIT_RING(); \ + COMMIT_RING(); \ radeon_wait_ring( dev_priv, (n) * sizeof(u32) ); \ } \ _nr = n; dev_priv->ring.space -= (n) * sizeof(u32); \ diff --git a/shared-core/radeon_state.c b/shared-core/radeon_state.c index 58251dd7..bf5e3d29 100644 --- a/shared-core/radeon_state.c +++ b/shared-core/radeon_state.c @@ -42,7 +42,11 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t * drm_file_t * filp_priv, u32 * offset) { - u32 off = *offset; + u64 off = *offset; + u32 fb_start = dev_priv->fb_location; + u32 fb_end = fb_start + dev_priv->fb_size - 1; + u32 gart_start = dev_priv->gart_vm_start; + u32 gart_end = gart_start + dev_priv->gart_size - 1; struct drm_radeon_driver_file_fields *radeon_priv; /* Hrm ... the story of the offset ... So this function converts @@ -62,10 +66,8 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t * /* First, the best case, the offset already lands in either the * framebuffer or the GART mapped space */ - if ((off >= dev_priv->fb_location && - off < (dev_priv->fb_location + dev_priv->fb_size)) || - (off >= dev_priv->gart_vm_start && - off < (dev_priv->gart_vm_start + dev_priv->gart_size))) + if ((off >= fb_start && off <= fb_end) || + (off >= gart_start && off <= gart_end)) return 0; /* Ok, that didn't happen... now check if we have a zero based @@ -78,16 +80,13 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t * } /* Finally, assume we aimed at a GART offset if beyond the fb */ - if (off > (dev_priv->fb_location + dev_priv->fb_size)) - off = off - (dev_priv->fb_location + dev_priv->fb_size) + - dev_priv->gart_vm_start; + if (off > fb_end) + off = off - fb_end - 1 + gart_start; /* Now recheck and fail if out of bounds */ - if ((off >= dev_priv->fb_location && - off < (dev_priv->fb_location + dev_priv->fb_size)) || - (off >= dev_priv->gart_vm_start && - off < (dev_priv->gart_vm_start + dev_priv->gart_size))) { - DRM_DEBUG("offset fixed up to 0x%x\n", off); + if ((off >= fb_start && off <= fb_end) || + (off >= gart_start && off <= gart_end)) { + DRM_DEBUG("offset fixed up to 0x%x\n", (unsigned int)off); *offset = off; return 0; } @@ -175,6 +174,14 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * } break; + case R200_EMIT_VAP_CTL: { + RING_LOCALS; + BEGIN_RING(2); + OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0); + ADVANCE_RING(); + } + break; + case RADEON_EMIT_RB3D_COLORPITCH: case RADEON_EMIT_RE_LINE_PATTERN: case RADEON_EMIT_SE_LINE_WIDTH: @@ -202,7 +209,6 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * case R200_EMIT_TCL_LIGHT_MODEL_CTL_0: case R200_EMIT_TFACTOR_0: case R200_EMIT_VTX_FMT_0: - case R200_EMIT_VAP_CTL: case R200_EMIT_MATRIX_SELECT_0: case R200_EMIT_TEX_PROC_CTL_2: case R200_EMIT_TCL_UCP_VERT_BLEND_CTL: @@ -269,6 +275,8 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * unsigned int *cmdsz) { u32 *cmd = (u32 *) cmdbuf->buf; + u32 offset, narrays; + int count, i, k; *cmdsz = 2 + ((cmd[0] & RADEON_CP_PACKET_COUNT_MASK) >> 16); @@ -282,10 +290,106 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * return DRM_ERR(EINVAL); } - /* Check client state and fix it up if necessary */ - if (cmd[0] & 0x8000) { /* MSB of opcode: next DWORD GUI_CNTL */ - u32 offset; + switch(cmd[0] & 0xff00) { + /* XXX Are there old drivers needing other packets? */ + + case RADEON_3D_DRAW_IMMD: + case RADEON_3D_DRAW_VBUF: + case RADEON_3D_DRAW_INDX: + case RADEON_WAIT_FOR_IDLE: + case RADEON_CP_NOP: + case RADEON_3D_CLEAR_ZMASK: +/* case RADEON_CP_NEXT_CHAR: + case RADEON_CP_PLY_NEXTSCAN: + case RADEON_CP_SET_SCISSORS: */ /* probably safe but will never need them? */ + /* these packets are safe */ + break; + + case RADEON_CP_3D_DRAW_IMMD_2: + case RADEON_CP_3D_DRAW_VBUF_2: + case RADEON_CP_3D_DRAW_INDX_2: + case RADEON_3D_CLEAR_HIZ: + /* safe but r200 only */ + if (dev_priv->microcode_version != UCODE_R200) { + DRM_ERROR("Invalid 3d packet for r100-class chip\n"); + return DRM_ERR(EINVAL); + } + break; + + case RADEON_3D_LOAD_VBPNTR: + count = (cmd[0] >> 16) & 0x3fff; + + if (count > 18) { /* 12 arrays max */ + DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", + count); + return DRM_ERR(EINVAL); + } + + /* carefully check packet contents */ + narrays = cmd[1] & ~0xc000; + k = 0; + i = 2; + while ((k < narrays) && (i < (count + 2))) { + i++; /* skip attribute field */ + if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[i])) { + DRM_ERROR + ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n", + k, i); + return DRM_ERR(EINVAL); + } + k++; + i++; + if (k == narrays) + break; + /* have one more to process, they come in pairs */ + if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[i])) { + DRM_ERROR + ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n", + k, i); + return DRM_ERR(EINVAL); + } + k++; + i++; + } + /* do the counts match what we expect ? */ + if ((k != narrays) || (i != (count + 2))) { + DRM_ERROR + ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n", + k, i, narrays, count + 1); + return DRM_ERR(EINVAL); + } + break; + + case RADEON_3D_RNDR_GEN_INDX_PRIM: + if (dev_priv->microcode_version != UCODE_R100) { + DRM_ERROR("Invalid 3d packet for r200-class chip\n"); + return DRM_ERR(EINVAL); + } + if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[1])) { + DRM_ERROR("Invalid rndr_gen_indx offset\n"); + return DRM_ERR(EINVAL); + } + break; + case RADEON_CP_INDX_BUFFER: + if (dev_priv->microcode_version != UCODE_R200) { + DRM_ERROR("Invalid 3d packet for r100-class chip\n"); + return DRM_ERR(EINVAL); + } + if ((cmd[1] & 0x8000ffff) != 0x80000810) { + DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); + return DRM_ERR(EINVAL); + } + if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[2])) { + DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); + return DRM_ERR(EINVAL); + } + break; + + case RADEON_CNTL_HOSTDATA_BLT: + case RADEON_CNTL_PAINT_MULTI: + case RADEON_CNTL_BITBLT_MULTI: + /* MSB of opcode: next DWORD GUI_CNTL */ if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { offset = cmd[2] << 10; @@ -307,6 +411,11 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * } cmd[3] = (cmd[3] & 0xffc00000) | offset >> 10; } + break; + + default: + DRM_ERROR("Invalid packet type %x\n", cmd[0] & 0xff00); + return DRM_ERR(EINVAL); } return 0; @@ -862,7 +971,7 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev, */ dev_priv->sarea_priv->ctx_owner = 0; - if ((dev_priv->flags & CHIP_HAS_HIERZ) + if ((dev_priv->flags & RADEON_HAS_HIERZ) && (flags & RADEON_USE_HIERZ)) { /* FIXME : reverse engineer that for Rx00 cards */ /* FIXME : the mask supposedly contains low-res z values. So can't set @@ -907,7 +1016,7 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev, for (i = 0; i < nbox; i++) { int tileoffset, nrtilesx, nrtilesy, j; /* it looks like r200 needs rv-style clears, at least if hierz is not enabled? */ - if ((dev_priv->flags & CHIP_HAS_HIERZ) + if ((dev_priv->flags & RADEON_HAS_HIERZ) && !(dev_priv->microcode_version == UCODE_R200)) { /* FIXME : figure this out for r200 (when hierz is enabled). Or maybe r200 actually doesn't need to put the low-res z value into @@ -991,7 +1100,7 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev, } /* TODO don't always clear all hi-level z tiles */ - if ((dev_priv->flags & CHIP_HAS_HIERZ) + if ((dev_priv->flags & RADEON_HAS_HIERZ) && (dev_priv->microcode_version == UCODE_R200) && (flags & RADEON_USE_HIERZ)) /* r100 and cards without hierarchical z-buffer have no high-level z-buffer */ @@ -1263,9 +1372,9 @@ static void radeon_cp_dispatch_swap(drm_device_t * dev) DRM_DEBUG("dispatch swap %d,%d-%d,%d\n", x, y, w, h); - BEGIN_RING(7); + BEGIN_RING(9); - OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 5)); + OUT_RING(CP_PACKET0(RADEON_DP_GUI_MASTER_CNTL, 0)); OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL | RADEON_GMC_DST_PITCH_OFFSET_CNTL | RADEON_GMC_BRUSH_NONE | @@ -1277,6 +1386,7 @@ static void radeon_cp_dispatch_swap(drm_device_t * dev) /* Make this work even if front & back are flipped: */ + OUT_RING(CP_PACKET0(RADEON_SRC_PITCH_OFFSET, 1)); if (dev_priv->current_page == 0) { OUT_RING(dev_priv->back_pitch_offset); OUT_RING(dev_priv->front_pitch_offset); @@ -1285,6 +1395,7 @@ static void radeon_cp_dispatch_swap(drm_device_t * dev) OUT_RING(dev_priv->back_pitch_offset); } + OUT_RING(CP_PACKET0(RADEON_SRC_X_Y, 2)); OUT_RING((x << 16) | y); OUT_RING((x << 16) | y); OUT_RING((w << 16) | h); @@ -2653,10 +2764,10 @@ static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv, int start = header.veclinear.addr_lo | (header.veclinear.addr_hi << 8); RING_LOCALS; - if (!sz) - return 0; - if (sz * 4 > cmdbuf->bufsz) - return DRM_ERR(EINVAL); + if (!sz) + return 0; + if (sz * 4 > cmdbuf->bufsz) + return DRM_ERR(EINVAL); BEGIN_RING(5 + sz); OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0); @@ -3032,9 +3143,9 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS) break; case RADEON_PARAM_CARD_TYPE: - if (dev_priv->flags & CHIP_IS_PCIE) + if (dev_priv->flags & RADEON_IS_PCIE) value = RADEON_CARD_PCIE; - else if (dev_priv->flags & CHIP_IS_AGP) + else if (dev_priv->flags & RADEON_IS_AGP) value = RADEON_CARD_AGP; else value = RADEON_CARD_PCI; diff --git a/shared-core/savage_bci.c b/shared-core/savage_bci.c index 20fea40f..01121b92 100644 --- a/shared-core/savage_bci.c +++ b/shared-core/savage_bci.c @@ -725,6 +725,7 @@ static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init) dev_priv->status = NULL; } if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) { + dev->agp_buffer_token = init->buffers_offset; dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); if (!dev->agp_buffer_map) { diff --git a/shared-core/sis_drv.h b/shared-core/sis_drv.h index b739a483..d1cdc19c 100644 --- a/shared-core/sis_drv.h +++ b/shared-core/sis_drv.h @@ -40,8 +40,8 @@ #define DRIVER_PATCHLEVEL 1 enum sis_family { - SIS_OTHER = 0, - SIS_CHIP_315 = 1, + SIS_OTHER = 0, + SIS_CHIP_315 = 1, }; #if defined(__linux__) @@ -52,18 +52,18 @@ enum sis_family { #include "drm_sman.h" #define SIS_BASE (dev_priv->mmio) -#define SIS_READ(reg) DRM_READ32(SIS_BASE, reg); +#define SIS_READ(reg) DRM_READ32(SIS_BASE, reg); #define SIS_WRITE(reg, val) DRM_WRITE32(SIS_BASE, reg, val); typedef struct drm_sis_private { - drm_local_map_t *mmio; - unsigned idle_fault; - drm_sman_t sman; - unsigned long chipset; - int vram_initialized; - int agp_initialized; - unsigned long vram_offset; - unsigned long agp_offset; + drm_local_map_t *mmio; + unsigned idle_fault; + drm_sman_t sman; + unsigned long chipset; + int vram_initialized; + int agp_initialized; + unsigned long vram_offset; + unsigned long agp_offset; } drm_sis_private_t; extern int sis_idle(drm_device_t *dev); diff --git a/shared-core/via_dma.c b/shared-core/via_dma.c index 11fa7388..c6e7aa77 100644 --- a/shared-core/via_dma.c +++ b/shared-core/via_dma.c @@ -44,14 +44,14 @@ #define CMDBUF_ALIGNMENT_MASK (0x0ff) /* defines for VIA 3D registers */ -#define VIA_REG_STATUS 0x400 -#define VIA_REG_TRANSET 0x43C +#define VIA_REG_STATUS 0x400 +#define VIA_REG_TRANSET 0x43C #define VIA_REG_TRANSPACE 0x440 /* VIA_REG_STATUS(0x400): Engine Status */ #define VIA_CMD_RGTR_BUSY 0x00000080 /* Command Regulator is busy */ -#define VIA_2D_ENG_BUSY 0x00000001 /* 2D Engine is busy */ -#define VIA_3D_ENG_BUSY 0x00000002 /* 3D Engine is busy */ +#define VIA_2D_ENG_BUSY 0x00000001 /* 2D Engine is busy */ +#define VIA_3D_ENG_BUSY 0x00000002 /* 3D Engine is busy */ #define VIA_VR_QUEUE_BUSY 0x00020000 /* Virtual Queue is busy */ #define SetReg2DAGP(nReg, nData) { \ @@ -120,7 +120,7 @@ via_cmdbuf_wait(drm_via_private_t * dev_priv, unsigned int size) next_addr = cur_addr + size + 512*1024; count = 1000000; do { - hw_addr = *hw_addr_ptr - agp_base; + hw_addr = *hw_addr_ptr - agp_base; if (count-- == 0) { DRM_ERROR ("via_cmdbuf_wait timed out hw %x cur_addr %x next_addr %x\n", @@ -247,10 +247,10 @@ static int via_dma_init(DRM_IOCTL_ARGS) else retcode = via_dma_cleanup(dev); break; - case VIA_DMA_INITIALIZED: + case VIA_DMA_INITIALIZED: retcode = (dev_priv->ring.virtual_start != NULL) ? 0: DRM_ERR( EFAULT ); - break; + break; default: retcode = DRM_ERR(EINVAL); break; @@ -406,7 +406,7 @@ static int via_pci_cmdbuffer(DRM_IOCTL_ARGS) static inline uint32_t *via_align_buffer(drm_via_private_t * dev_priv, uint32_t * vb, int qw_count) { - for (; qw_count > 0; --qw_count) { + for (; qw_count > 0; --qw_count) { VIA_OUT_RING_QW(HC_DUMMY, HC_DUMMY); } return vb; @@ -471,7 +471,7 @@ static int via_hook_segment(drm_via_private_t *dev_priv, } if (paused && !no_pci_fire) { - uint32_t rgtr,ptr; + uint32_t rgtr,ptr; uint32_t ptr_low; count = 1000000; diff --git a/shared-core/via_drm.h b/shared-core/via_drm.h index 049c63e1..ee92ff69 100644 --- a/shared-core/via_drm.h +++ b/shared-core/via_drm.h @@ -47,12 +47,12 @@ #define VIA_DRM_DRIVER_MAJOR 2 #define VIA_DRM_DRIVER_MINOR 10 #define VIA_DRM_DRIVER_PATCHLEVEL 2 -#define VIA_DRM_DRIVER_VERSION (((VIA_DRM_DRIVER_MAJOR) << 16) | (VIA_DRM_DRIVER_MINOR)) +#define VIA_DRM_DRIVER_VERSION (((VIA_DRM_DRIVER_MAJOR) << 16) | (VIA_DRM_DRIVER_MINOR)) #define VIA_NR_SAREA_CLIPRECTS 8 -#define VIA_NR_XVMC_PORTS 10 -#define VIA_NR_XVMC_LOCKS 5 -#define VIA_MAX_CACHELINE_SIZE 64 +#define VIA_NR_XVMC_PORTS 10 +#define VIA_NR_XVMC_LOCKS 5 +#define VIA_MAX_CACHELINE_SIZE 64 #define XVMCLOCKPTR(saPriv,lockNo) \ ((volatile drm_hw_lock_t *)(((((unsigned long) (saPriv)->XvMCLockArea) + \ (VIA_MAX_CACHELINE_SIZE - 1)) & \ @@ -67,29 +67,29 @@ #define VIA_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */ #define VIA_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */ -#define VIA_UPLOAD_CTX 0x4 +#define VIA_UPLOAD_CTX 0x4 #define VIA_UPLOAD_BUFFERS 0x8 #define VIA_UPLOAD_TEX0 0x10 #define VIA_UPLOAD_TEX1 0x20 #define VIA_UPLOAD_CLIPRECTS 0x40 -#define VIA_UPLOAD_ALL 0xff +#define VIA_UPLOAD_ALL 0xff /* VIA specific ioctls */ #define DRM_VIA_ALLOCMEM 0x00 -#define DRM_VIA_FREEMEM 0x01 +#define DRM_VIA_FREEMEM 0x01 #define DRM_VIA_AGP_INIT 0x02 -#define DRM_VIA_FB_INIT 0x03 +#define DRM_VIA_FB_INIT 0x03 #define DRM_VIA_MAP_INIT 0x04 #define DRM_VIA_DEC_FUTEX 0x05 #define NOT_USED #define DRM_VIA_DMA_INIT 0x07 #define DRM_VIA_CMDBUFFER 0x08 -#define DRM_VIA_FLUSH 0x09 -#define DRM_VIA_PCICMD 0x0a +#define DRM_VIA_FLUSH 0x09 +#define DRM_VIA_PCICMD 0x0a #define DRM_VIA_CMDBUF_SIZE 0x0b #define NOT_USED -#define DRM_VIA_WAIT_IRQ 0x0d -#define DRM_VIA_DMA_BLIT 0x0e +#define DRM_VIA_WAIT_IRQ 0x0d +#define DRM_VIA_DMA_BLIT 0x0e #define DRM_VIA_BLIT_SYNC 0x0f #define DRM_IOCTL_VIA_ALLOCMEM DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_ALLOCMEM, drm_via_mem_t) @@ -172,7 +172,7 @@ typedef struct _drm_via_dma_init { enum { VIA_INIT_DMA = 0x01, VIA_CLEANUP_DMA = 0x02, - VIA_DMA_INITIALIZED = 0x03 + VIA_DMA_INITIALIZED = 0x03 } func; unsigned long offset; @@ -217,7 +217,7 @@ typedef struct _drm_via_sarea { /* Used by the 3d driver only at this point, for pageflipping: */ - unsigned int pfCurrentOffset; + unsigned int pfCurrentOffset; } drm_via_sarea_t; typedef struct _drm_via_cmdbuf_size { @@ -273,17 +273,17 @@ typedef struct drm_via_blitsync { */ typedef struct drm_via_dmablit { - uint32_t num_lines; - uint32_t line_length; + uint32_t num_lines; + uint32_t line_length; - uint32_t fb_addr; - uint32_t fb_stride; + uint32_t fb_addr; + uint32_t fb_stride; - unsigned char *mem_addr; - uint32_t mem_stride; + unsigned char *mem_addr; + uint32_t mem_stride; uint32_t flags; - int to_fb; + int to_fb; drm_via_blitsync_t sync; } drm_via_dmablit_t; diff --git a/shared-core/via_drv.c b/shared-core/via_drv.c index bacfe37d..33b0a42d 100644 --- a/shared-core/via_drv.c +++ b/shared-core/via_drv.c @@ -22,7 +22,6 @@ * DEALINGS IN THE SOFTWARE. */ -#include <linux/config.h> #include "drmP.h" #include "via_drm.h" #include "via_drv.h" diff --git a/shared-core/via_drv.h b/shared-core/via_drv.h index 86cd049d..18d2a331 100644 --- a/shared-core/via_drv.h +++ b/shared-core/via_drv.h @@ -76,9 +76,9 @@ typedef struct drm_via_private { volatile uint32_t *last_pause_ptr; volatile uint32_t *hw_addr_ptr; drm_via_ring_buffer_t ring; - struct timeval last_vblank; - int last_vblank_valid; - unsigned usec_per_vblank; + struct timeval last_vblank; + int last_vblank_valid; + unsigned usec_per_vblank; drm_via_state_t hc_state; char pci_buf[VIA_PCI_BUF_SIZE]; const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE]; @@ -90,14 +90,14 @@ typedef struct drm_via_private { uint32_t irq_enable_mask; uint32_t irq_pending_mask; int *irq_map; - /* Memory manager stuff */ + /* Memory manager stuff */ #ifdef VIA_HAVE_CORE_MM - unsigned idle_fault; + unsigned int idle_fault; drm_sman_t sman; int vram_initialized; int agp_initialized; - unsigned long vram_offset; - unsigned long agp_offset; + unsigned long vram_offset; + unsigned long agp_offset; #endif #ifdef VIA_HAVE_DMABLIT drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES]; @@ -148,8 +148,6 @@ extern int via_driver_dma_quiescent(drm_device_t * dev); extern void via_init_futex(drm_via_private_t *dev_priv); extern void via_cleanup_futex(drm_via_private_t *dev_priv); extern void via_release_futex(drm_via_private_t *dev_priv, int context); -extern int via_driver_irq_wait(drm_device_t * dev, unsigned int irq, - int force_sequence, unsigned int *sequence); #ifdef VIA_HAVE_CORE_MM extern void via_reclaim_buffers_locked(drm_device_t *dev, struct file *filp); diff --git a/shared-core/via_irq.c b/shared-core/via_irq.c index aaba0cb6..db60b28e 100644 --- a/shared-core/via_irq.c +++ b/shared-core/via_irq.c @@ -43,7 +43,7 @@ #define VIA_REG_INTERRUPT 0x200 /* VIA_REG_INTERRUPT */ -#define VIA_IRQ_GLOBAL (1 << 31) +#define VIA_IRQ_GLOBAL (1 << 31) #define VIA_IRQ_VBLANK_ENABLE (1 << 19) #define VIA_IRQ_VBLANK_PENDING (1 << 3) #define VIA_IRQ_HQV0_ENABLE (1 << 11) @@ -93,8 +93,8 @@ static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1}; static unsigned time_diff(struct timeval *now,struct timeval *then) { return (now->tv_usec >= then->tv_usec) ? - now->tv_usec - then->tv_usec : - 1000000 - (then->tv_usec - now->tv_usec); + now->tv_usec - then->tv_usec : + 1000000 - (then->tv_usec - now->tv_usec); } irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) @@ -110,21 +110,21 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) status = VIA_READ(VIA_REG_INTERRUPT); if (status & VIA_IRQ_VBLANK_PENDING) { atomic_inc(&dev->vbl_received); - if (!(atomic_read(&dev->vbl_received) & 0x0F)) { + if (!(atomic_read(&dev->vbl_received) & 0x0F)) { #ifdef __linux__ do_gettimeofday(&cur_vblank); #else microtime(&cur_vblank); #endif - if (dev_priv->last_vblank_valid) { + if (dev_priv->last_vblank_valid) { dev_priv->usec_per_vblank = time_diff(&cur_vblank, &dev_priv->last_vblank) >> 4; } dev_priv->last_vblank = cur_vblank; dev_priv->last_vblank_valid = 1; - } - if (!(atomic_read(&dev->vbl_received) & 0xFF)) { + } + if (!(atomic_read(&dev->vbl_received) & 0xFF)) { DRM_DEBUG("US per vblank is: %u\n", dev_priv->usec_per_vblank); } @@ -198,13 +198,13 @@ int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence) return ret; } -int +static int via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence, unsigned int *sequence) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; unsigned int cur_irq_sequence; - drm_via_irq_t *cur_irq = dev_priv->via_irqs; + drm_via_irq_t *cur_irq; int ret = 0; maskarray_t *masks; int real_irq; @@ -231,7 +231,7 @@ via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence, } masks = dev_priv->irq_masks; - cur_irq += real_irq; + cur_irq = dev_priv->via_irqs + real_irq; if (masks[real_irq][2] && !force_sequence) { DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ, @@ -257,11 +257,12 @@ void via_driver_irq_preinstall(drm_device_t * dev) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; u32 status; - drm_via_irq_t *cur_irq = dev_priv->via_irqs; + drm_via_irq_t *cur_irq; int i; DRM_DEBUG("driver_irq_preinstall: dev_priv: %p\n", dev_priv); if (dev_priv) { + cur_irq = dev_priv->via_irqs; dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE; dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING; @@ -285,7 +286,7 @@ void via_driver_irq_preinstall(drm_device_t * dev) DRM_DEBUG("Initializing IRQ %d\n", i); } - dev_priv->last_vblank_valid = 0; + dev_priv->last_vblank_valid = 0; /* Clear VSync interrupt regs */ status = VIA_READ(VIA_REG_INTERRUPT); diff --git a/shared-core/via_verifier.h b/shared-core/via_verifier.h index c0ffc97f..96708a39 100644 --- a/shared-core/via_verifier.h +++ b/shared-core/via_verifier.h @@ -26,20 +26,20 @@ #ifndef _VIA_VERIFIER_H_ #define _VIA_VERIFIER_H_ -typedef enum{ +typedef enum { no_sequence = 0, z_address, dest_address, tex_address -}drm_via_sequence_t; +} drm_via_sequence_t; -typedef struct{ +typedef struct { unsigned texture; uint32_t z_addr; uint32_t d_addr; - uint32_t t_addr[2][10]; + uint32_t t_addr[2][10]; uint32_t pitch[2][10]; uint32_t height[2][10]; uint32_t tex_level_lo[2]; |