diff options
author | Keith Packard <keithp@neko.keithp.com> | 2007-01-07 22:37:40 -0800 |
---|---|---|
committer | Keith Packard <keithp@neko.keithp.com> | 2007-01-07 22:37:40 -0800 |
commit | c5aaf7648df82665851c9e67f5509b427ca34c8e (patch) | |
tree | 55b317738a99097cb02ba6a736b9ef20de6b34f3 /shared-core | |
parent | 63c0f3946056d044b7c5688fa5cb670782212c77 (diff) | |
parent | d0080d71b9f3df0d4f743324b7e8f1ce580bdcaf (diff) |
Merge branch 'master' into crestline
Conflicts:
shared-core/i915_drm.h
Whitespace change only
Diffstat (limited to 'shared-core')
32 files changed, 4421 insertions, 473 deletions
diff --git a/shared-core/Makefile.am b/shared-core/Makefile.am index cd278643..f0ebf2a3 100644 --- a/shared-core/Makefile.am +++ b/shared-core/Makefile.am @@ -29,6 +29,7 @@ klibdrminclude_HEADERS = \ i915_drm.h \ mach64_drm.h \ mga_drm.h \ + nouveau_drm.h \ r128_drm.h \ radeon_drm.h \ savage_drm.h \ diff --git a/shared-core/drm_drawable.c b/shared-core/drm_drawable.c deleted file mode 100644 index 0817e321..00000000 --- a/shared-core/drm_drawable.c +++ /dev/null @@ -1,330 +0,0 @@ -/** - * \file drm_drawable.c - * IOCTLs for drawables - * - * \author Rickard E. (Rik) Faith <faith@valinux.com> - * \author Gareth Hughes <gareth@valinux.com> - * \author Michel Dänzer <michel@tungstengraphics.com> - */ - -/* - * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com - * - * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. - * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. - * Copyright 2006 Tungsten Graphics, Inc., Bismarck, North Dakota. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include "drmP.h" - -/** - * Allocate drawable ID and memory to store information about it. - */ -int drm_adddraw(DRM_IOCTL_ARGS) -{ - DRM_DEVICE; - unsigned long irqflags; - int i, j; - u32 *bitfield = dev->drw_bitfield; - unsigned int bitfield_length = dev->drw_bitfield_length; - drm_drawable_info_t **info = dev->drw_info; - unsigned int info_length = dev->drw_info_length; - drm_draw_t draw; - - for (i = 0, j = 0; i < bitfield_length; i++) { - if (bitfield[i] == ~0) - continue; - - for (; j < 8 * sizeof(*bitfield); j++) - if (!(bitfield[i] & (1 << j))) - goto done; - } -done: - - if (i == bitfield_length) { - bitfield_length++; - - bitfield = drm_alloc(bitfield_length * sizeof(*bitfield), - DRM_MEM_BUFS); - - if (!bitfield) { - DRM_ERROR("Failed to allocate new drawable bitfield\n"); - return DRM_ERR(ENOMEM); - } - - if (8 * sizeof(*bitfield) * bitfield_length > info_length) { - info_length += 8 * sizeof(*bitfield); - - info = drm_alloc(info_length * sizeof(*info), - DRM_MEM_BUFS); - - if (!info) { - DRM_ERROR("Failed to allocate new drawable info" - " array\n"); - - drm_free(bitfield, - bitfield_length * sizeof(*bitfield), - DRM_MEM_BUFS); - return DRM_ERR(ENOMEM); - } - } - - bitfield[i] = 0; - } - - draw.handle = i * 8 * sizeof(*bitfield) + j + 1; - DRM_DEBUG("%d\n", draw.handle); - - spin_lock_irqsave(&dev->drw_lock, irqflags); - - bitfield[i] |= 1 << j; - info[draw.handle - 1] = NULL; - - if (bitfield != dev->drw_bitfield) { - memcpy(bitfield, dev->drw_bitfield, dev->drw_bitfield_length * - sizeof(*bitfield)); - drm_free(dev->drw_bitfield, sizeof(*bitfield) * - dev->drw_bitfield_length, DRM_MEM_BUFS); - dev->drw_bitfield = bitfield; - dev->drw_bitfield_length = bitfield_length; - } - - if (info != dev->drw_info) { - memcpy(info, dev->drw_info, dev->drw_info_length * - sizeof(*info)); - drm_free(dev->drw_info, sizeof(*info) * dev->drw_info_length, - DRM_MEM_BUFS); - dev->drw_info = info; - dev->drw_info_length = info_length; - } - - spin_unlock_irqrestore(&dev->drw_lock, irqflags); - - DRM_COPY_TO_USER_IOCTL((drm_draw_t __user *)data, draw, sizeof(draw)); - - return 0; -} - -/** - * Free drawable ID and memory to store information about it. - */ -int drm_rmdraw(DRM_IOCTL_ARGS) -{ - DRM_DEVICE; - drm_draw_t draw; - int id, idx; - unsigned int shift; - unsigned long irqflags; - u32 *bitfield = dev->drw_bitfield; - unsigned int bitfield_length = dev->drw_bitfield_length; - drm_drawable_info_t **info = dev->drw_info; - unsigned int info_length = dev->drw_info_length; - - DRM_COPY_FROM_USER_IOCTL(draw, (drm_draw_t __user *) data, - sizeof(draw)); - - id = draw.handle - 1; - idx = id / (8 * sizeof(*bitfield)); - shift = id % (8 * sizeof(*bitfield)); - - if (idx < 0 || idx >= bitfield_length || - !(bitfield[idx] & (1 << shift))) { - DRM_DEBUG("No such drawable %d\n", draw.handle); - return 0; - } - - spin_lock_irqsave(&dev->drw_lock, irqflags); - - bitfield[idx] &= ~(1 << shift); - - spin_unlock_irqrestore(&dev->drw_lock, irqflags); - - if (info[id]) { - drm_free(info[id]->rects, info[id]->num_rects * - sizeof(drm_clip_rect_t), DRM_MEM_BUFS); - drm_free(info[id], sizeof(**info), DRM_MEM_BUFS); - } - - /* Can we shrink the arrays? */ - if (idx == bitfield_length - 1) { - while (idx >= 0 && !bitfield[idx]) - --idx; - - bitfield_length = idx + 1; - - if (idx != id / (8 * sizeof(*bitfield))) - bitfield = drm_alloc(bitfield_length * - sizeof(*bitfield), DRM_MEM_BUFS); - - if (!bitfield && bitfield_length) { - bitfield = dev->drw_bitfield; - bitfield_length = dev->drw_bitfield_length; - } - } - - if (bitfield != dev->drw_bitfield) { - info_length = 8 * sizeof(*bitfield) * bitfield_length; - - info = drm_alloc(info_length * sizeof(*info), DRM_MEM_BUFS); - - if (!info && info_length) { - info = dev->drw_info; - info_length = dev->drw_info_length; - } - - spin_lock_irqsave(&dev->drw_lock, irqflags); - - memcpy(bitfield, dev->drw_bitfield, bitfield_length * - sizeof(*bitfield)); - drm_free(dev->drw_bitfield, sizeof(*bitfield) * - dev->drw_bitfield_length, DRM_MEM_BUFS); - dev->drw_bitfield = bitfield; - dev->drw_bitfield_length = bitfield_length; - - if (info != dev->drw_info) { - memcpy(info, dev->drw_info, info_length * - sizeof(*info)); - drm_free(dev->drw_info, sizeof(*info) * - dev->drw_info_length, DRM_MEM_BUFS); - dev->drw_info = info; - dev->drw_info_length = info_length; - } - - spin_unlock_irqrestore(&dev->drw_lock, irqflags); - } - - DRM_DEBUG("%d\n", draw.handle); - return 0; -} - -int drm_update_drawable_info(DRM_IOCTL_ARGS) { - DRM_DEVICE; - drm_update_draw_t update; - unsigned int id, idx, shift, bitfield_length = dev->drw_bitfield_length; - u32 *bitfield = dev->drw_bitfield; - unsigned long irqflags; - drm_drawable_info_t *info; - drm_clip_rect_t *rects; - int err; - - DRM_COPY_FROM_USER_IOCTL(update, (drm_update_draw_t __user *) data, - sizeof(update)); - - id = update.handle - 1; - idx = id / (8 * sizeof(*bitfield)); - shift = id % (8 * sizeof(*bitfield)); - - if (idx < 0 || idx >= bitfield_length || - !(bitfield[idx] & (1 << shift))) { - DRM_ERROR("No such drawable %d\n", update.handle); - return DRM_ERR(EINVAL); - } - - info = dev->drw_info[id]; - - if (!info) { - info = drm_calloc(1, sizeof(drm_drawable_info_t), DRM_MEM_BUFS); - - if (!info) { - DRM_ERROR("Failed to allocate drawable info memory\n"); - return DRM_ERR(ENOMEM); - } - } - - switch (update.type) { - case DRM_DRAWABLE_CLIPRECTS: - if (update.num != info->num_rects) { - rects = drm_alloc(update.num * sizeof(drm_clip_rect_t), - DRM_MEM_BUFS); - } else - rects = info->rects; - - if (update.num && !rects) { - DRM_ERROR("Failed to allocate cliprect memory\n"); - err = DRM_ERR(ENOMEM); - goto error; - } - - if (update.num && DRM_COPY_FROM_USER(rects, - (drm_clip_rect_t __user *) - (unsigned long)update.data, - update.num * - sizeof(*rects))) { - DRM_ERROR("Failed to copy cliprects from userspace\n"); - err = DRM_ERR(EFAULT); - goto error; - } - - spin_lock_irqsave(&dev->drw_lock, irqflags); - - if (rects != info->rects) { - drm_free(info->rects, info->num_rects * - sizeof(drm_clip_rect_t), DRM_MEM_BUFS); - } - - info->rects = rects; - info->num_rects = update.num; - dev->drw_info[id] = info; - - spin_unlock_irqrestore(&dev->drw_lock, irqflags); - - DRM_DEBUG("Updated %d cliprects for drawable %d\n", - info->num_rects, id); - break; - default: - DRM_ERROR("Invalid update type %d\n", update.type); - return DRM_ERR(EINVAL); - } - - return 0; - -error: - if (!dev->drw_info[id]) - drm_free(info, sizeof(*info), DRM_MEM_BUFS); - else if (rects != dev->drw_info[id]->rects) - drm_free(rects, update.num * - sizeof(drm_clip_rect_t), DRM_MEM_BUFS); - - return err; -} - -/** - * Caller must hold the drawable spinlock! - */ -drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id) { - u32 *bitfield = dev->drw_bitfield; - unsigned int idx, shift; - - id--; - idx = id / (8 * sizeof(*bitfield)); - shift = id % (8 * sizeof(*bitfield)); - - if (idx < 0 || idx >= dev->drw_bitfield_length || - !(bitfield[idx] & (1 << shift))) { - DRM_DEBUG("No such drawable %d\n", id); - return NULL; - } - - return dev->drw_info[id]; -} -EXPORT_SYMBOL(drm_get_drawable_info); diff --git a/shared-core/drm_pciids.txt b/shared-core/drm_pciids.txt index 7c185a30..dc921d9d 100644 --- a/shared-core/drm_pciids.txt +++ b/shared-core/drm_pciids.txt @@ -219,7 +219,9 @@ 0x1106 0x3122 0 "VIA CLE266" 0x1106 0x7205 0 "VIA KM400" 0x1106 0x3108 0 "VIA K8M800" -0x1106 0x3344 0 "VIA P4VM800PRO" +0x1106 0x3344 0 "VIA CN700 / VM800 / P4M800Pro" +0x1106 0x3343 0 "VIA P4M890" +0x1106 0x3230 VIA_DX9_0 "VIA K8M890" [i810] 0x8086 0x7121 0 "Intel i810 GMCH" @@ -463,3 +465,233 @@ 0x10DE 0x009C NV40 "NVidia 0x009C" 0x10DE 0x009D NV40 "NVidia Quadro FX 4500" 0x10DE 0x009E NV40 "NVidia 0x009E" + +[nouveau] +0x10de 0x0008 NV_03 "EDGE 3D" +0x10de 0x0009 NV_03 "EDGE 3D" +0x10de 0x0010 NV_03 "Mutara V08" +0x10de 0x0020 NV_04 "RIVA TNT" +0x10de 0x0028 NV_04 "RIVA TNT2/TNT2 Pro" +0x10de 0x0029 NV_04 "RIVA TNT2 Ultra" +0x10de 0x002a NV_04 "Riva TnT2" +0x10de 0x002b NV_04 "Riva TnT2" +0x10de 0x002c NV_04 "Vanta/Vanta LT" +0x10de 0x002d NV_04 "RIVA TNT2 Model 64/Model 64 Pro" +0x10de 0x002e NV_04 "Vanta" +0x10de 0x002f NV_04 "Vanta" +0x10de 0x0040 NV_40 "GeForce 6800 Ultra" +0x10de 0x0041 NV_40 "GeForce 6800" +0x10de 0x0042 NV_40 "GeForce 6800 LE" +0x10de 0x0043 NV_40 "NV40.3" +0x10de 0x0044 NV_40 "GeForce 6800 XT" +0x10de 0x0045 NV_40 "GeForce 6800 GT" +0x10de 0x0046 NV_40 "GeForce 6800 GT" +0x10de 0x0047 NV_40 "GeForce 6800 GS" +0x10de 0x0048 NV_40 "GeForce 6800 XT" +0x10de 0x0049 NV_40 "NV40GL" +0x10de 0x004d NV_40 "Quadro FX 4000" +0x10de 0x004e NV_40 "Quadro FX 4000" +0x10de 0x0090 NV_40 "GeForce 7800 GTX" +0x10de 0x0091 NV_40 "GeForce 7800 GTX" +0x10de 0x0092 NV_40 "GeForce 7800 GT" +0x10de 0x0093 NV_40 "GeForce 7800 GS" +0x10de 0x0098 NV_40 "GeForce Go 7800" +0x10de 0x0099 NV_40 "GE Force Go 7800 GTX" +0x10de 0x009d NV_40 "Quadro FX4500" +0x10de 0x00a0 NV_04 "Aladdin TNT2" +0x10de 0x00c0 NV_40 "GeForce 6800 GS" +0x10de 0x00c1 NV_40 "GeForce 6800" +0x10de 0x00c2 NV_40 "GeForce 6800 LE" +0x10de 0x00c3 NV_40 "Geforce 6800 XT" +0x10de 0x00c8 NV_40 "GeForce Go 6800" +0x10de 0x00c9 NV_40 "GeForce Go 6800 Ultra" +0x10de 0x00cc NV_40 "Quadro FX Go1400" +0x10de 0x00cd NV_40 "Quadro FX 3450/4000 SDI" +0x10de 0x00ce NV_40 "Quadro FX 1400" +0x10de 0x00f0 NV_40 "GeForce 6800/GeForce 6800 Ultra" +0x10de 0x00f1 NV_40 "GeForce 6600/GeForce 6600 GT" +0x10de 0x00f2 NV_40 "GeForce 6600/GeForce 6600 GT" +0x10de 0x00f3 NV_40 "GeForce 6200" +0x10de 0x00f4 NV_40 "GeForce 6600 LE" +0x10de 0x00f5 NV_40 "GeForce 7800 GS" +0x10de 0x00f6 NV_40 "GeForce 6600 GS" +0x10de 0x00f8 NV_40 "Quadro FX 3400/4400" +0x10de 0x00f9 NV_40 "GeForce 6800 Ultra/GeForce 6800 GT" +0x10de 0x00fa NV_30 "GeForce PCX 5750" +0x10de 0x00fb NV_30 "GeForce PCX 5900" +0x10de 0x00fc NV_30 "Quadro FX 330/GeForce PCX 5300" +0x10de 0x00fd NV_30 "Quadro FX 330/Quadro NVS280" +0x10de 0x00fe NV_30 "Quadro FX 1300" +0x10de 0x00ff NV_17 "GeForce PCX 4300" +0x10de 0x0100 NV_10 "GeForce 256 SDR" +0x10de 0x0101 NV_10 "GeForce 256 DDR" +0x10de 0x0103 NV_10 "Quadro" +0x10de 0x0110 NV_11 "GeForce2 MX/MX 400" +0x10de 0x0111 NV_11 "GeForce2 MX 100 DDR/200 DDR" +0x10de 0x0112 NV_11 "GeForce2 Go" +0x10de 0x0113 NV_11 "Quadro2 MXR/EX/Go" +0x10de 0x0140 NV_40 "GeForce 6600 GT" +0x10de 0x0141 NV_40 "GeForce 6600" +0x10de 0x0142 NV_40 "GeForce 6600 PCIe" +0x10de 0x0144 NV_40 "GeForce Go 6600" +0x10de 0x0145 NV_40 "GeForce 6610 XL" +0x10de 0x0146 NV_40 "Geforce Go 6600TE/6200TE" +0x10de 0x0148 NV_40 "GeForce Go 6600" +0x10de 0x0149 NV_40 "GeForce Go 6600 GT" +0x10de 0x014a NV_40 "Quadro NVS 440" +0x10de 0x014d NV_17 "Quadro FX 550" +0x10de 0x014e NV_40 "Quadro FX 540" +0x10de 0x014f NV_40 "GeForce 6200" +0x10de 0x0150 NV_15 "GeForce2 GTS/Pro" +0x10de 0x0151 NV_15 "GeForce2 Ti" +0x10de 0x0152 NV_15 "GeForce2 Ultra, Bladerunner" +0x10de 0x0153 NV_15 "Quadro2 Pro" +0x10de 0x0161 NV_44 "GeForce 6200 TurboCache(TM)" +0x10de 0x0162 NV_44 "GeForce 6200 SE TurboCache (TM)" +0x10de 0x0163 NV_44 "GeForce 6200 LE" +0x10de 0x0164 NV_44 "GeForce Go 6200" +0x10de 0x0165 NV_44 "Quadro NVS 285" +0x10de 0x0166 NV_44 "GeForce Go 6400" +0x10de 0x0167 NV_44 "GeForce Go 6200 TurboCache" +0x10de 0x0168 NV_44 "GeForce Go 6200 TurboCache" +0x10de 0x0170 NV_17 "GeForce4 MX 460" +0x10de 0x0171 NV_17 "GeForce4 MX 440" +0x10de 0x0172 NV_17 "GeForce4 MX 420" +0x10de 0x0173 NV_17 "GeForce4 MX 440-SE" +0x10de 0x0174 NV_17 "GeForce4 440 Go" +0x10de 0x0175 NV_17 "GeForce4 420 Go" +0x10de 0x0176 NV_17 "GeForce4 420 Go 32M" +0x10de 0x0177 NV_17 "GeForce4 460 Go" +0x10de 0x0178 NV_17 "Quadro4 550 XGL" +0x10de 0x0179 NV_17 "GeForce4 420 Go 32M" +0x10de 0x017a NV_17 "Quadro4 200/400 NVS" +0x10de 0x017b NV_17 "Quadro4 550 XGL" +0x10de 0x017c NV_17 "Quadro4 500 GoGL" +0x10de 0x017d NV_17 "GeForce4 410 Go 16M" +0x10de 0x0181 NV_17 "GeForce4 MX 440 AGP 8x" +0x10de 0x0182 NV_17 "GeForce4 MX 440SE AGP 8x" +0x10de 0x0183 NV_17 "GeForce4 MX 420 AGP 8x" +0x10de 0x0185 NV_17 "GeForce4 MX 4000 AGP 8x" +0x10de 0x0186 NV_17 "GeForce4 448 Go" +0x10de 0x0187 NV_17 "GeForce4 488 Go" +0x10de 0x0188 NV_17 "Quadro4 580 XGL" +0x10de 0x018a NV_17 "Quadro4 NVS AGP 8x" +0x10de 0x018b NV_17 "Quadro4 380 XGL" +0x10de 0x018c NV_17 "Quadro NVS 50 PCI" +0x10de 0x018d NV_17 "GeForce4 448 Go" +0x10de 0x0191 NV_50 "GeForce 8800 GTX" +0x10de 0x0193 NV_50 "GeForce 8800 GTS" +0x10de 0x01a0 NV_11|NV_NFORCE "GeForce2 MX Integrated Graphics" +0x10de 0x01d1 NV_44 "GeForce 7300 LE" +0x10de 0x01d6 NV_44 "GeForce Go 7200" +0x10de 0x01d7 NV_44 "Quadro NVS 110M / GeForce Go 7300" +0x10de 0x01d8 NV_44 "GeForce Go 7400" +0x10de 0x01da NV_44 "Quadro NVS 110M" +0x10de 0x01df NV_44 "GeForce 7300 GS" +0x10de 0x01f0 NV_17|NV_NFORCE2 "GeForce4 MX - nForce GPU" +0x10de 0x0200 NV_20 "GeForce3" +0x10de 0x0201 NV_20 "GeForce3 Ti 200" +0x10de 0x0202 NV_20 "GeForce3 Ti 500" +0x10de 0x0203 NV_20 "Quadro DCC" +0x10de 0x0211 NV_40 "GeForce 6800" +0x10de 0x0212 NV_40 "GeForce 6800 LE" +0x10de 0x0215 NV_40 "GeForce 6800 GT" +0x10de 0x0218 NV_40 "GeForce 6800 XT" +0x10de 0x0221 NV_44 "GeForce 6200" +0x10de 0x0240 NV_44 "GeForce 6150" +0x10de 0x0242 NV_44 "GeForce 6100" +0x10de 0x0250 NV_25 "GeForce4 Ti 4600" +0x10de 0x0251 NV_25 "GeForce4 Ti 4400" +0x10de 0x0252 NV_25 "GeForce4 Ti" +0x10de 0x0253 NV_25 "GeForce4 Ti 4200" +0x10de 0x0258 NV_25 "Quadro4 900 XGL" +0x10de 0x0259 NV_25 "Quadro4 750 XGL" +0x10de 0x025b NV_25 "Quadro4 700 XGL" +0x10de 0x0280 NV_25 "GeForce4 Ti 4800" +0x10de 0x0281 NV_25 "GeForce4 Ti 4200 AGP 8x" +0x10de 0x0282 NV_25 "GeForce4 Ti 4800 SE" +0x10de 0x0286 NV_25 "GeForce4 Ti 4200 Go AGP 8x" +0x10de 0x0288 NV_25 "Quadro4 980 XGL" +0x10de 0x0289 NV_25 "Quadro4 780 XGL" +0x10de 0x028c NV_25 "Quadro4 700 GoGL" +0x10de 0x0290 NV_40 "GeForce 7900 GTX" +0x10de 0x0291 NV_40 "GeForce 7900 GT" +0x10de 0x0292 NV_40 "GeForce 7900 GS" +0x10de 0x0298 NV_40 "GeForce Go 7900 GS" +0x10de 0x0299 NV_40 "GeForce Go 7900 GTX" +0x10de 0x029a NV_40 "Quadro FX 2500M" +0x10de 0x029b NV_40 "Quadro FX 1500M" +0x10de 0x029c NV_40 "Quadro FX 5500" +0x10de 0x029d NV_40 "Quadro FX 3500" +0x10de 0x029e NV_40 "Quadro FX 1500" +0x10de 0x029f NV_40 "Quadro FX 4500 X2" +0x10de 0x02a0 NV_20 "XGPU" +0x10de 0x02e1 NV_40 "GeForce 7600 GS" +0x10de 0x0300 NV_30 "GeForce FX" +0x10de 0x0301 NV_30 "GeForce FX 5800 Ultra" +0x10de 0x0302 NV_30 "GeForce FX 5800" +0x10de 0x0308 NV_30 "Quadro FX 2000" +0x10de 0x0309 NV_30 "Quadro FX 1000" +0x10de 0x0311 NV_30 "GeForce FX 5600 Ultra" +0x10de 0x0312 NV_30 "GeForce FX 5600" +0x10de 0x0313 NV_30 "NV31" +0x10de 0x0314 NV_30 "GeForce FX 5600XT" +0x10de 0x0316 NV_30 "NV31M" +0x10de 0x0317 NV_30 "NV31M Pro" +0x10de 0x031a NV_30 "GeForce FX Go5600" +0x10de 0x031b NV_30 "GeForce FX Go5650" +0x10de 0x031d NV_30 "NV31GLM" +0x10de 0x031e NV_30 "NV31GLM Pro" +0x10de 0x031f NV_30 "NV31GLM Pro" +0x10de 0x0320 NV_34 "GeForce FX 5200" +0x10de 0x0321 NV_34 "GeForce FX 5200 Ultra" +0x10de 0x0322 NV_34 "GeForce FX 5200" +0x10de 0x0323 NV_34 "GeForce FX 5200LE" +0x10de 0x0324 NV_34 "GeForce FX Go5200" +0x10de 0x0325 NV_34 "GeForce FX Go5250" +0x10de 0x0326 NV_34 "GeForce FX 5500" +0x10de 0x0327 NV_34 "GeForce FX 5100" +0x10de 0x0328 NV_34 "GeForce FX Go5200 32M/64M" +0x10de 0x0329 NV_34 "GeForce FX Go5200" +0x10de 0x032a NV_34 "Quadro NVS 280 PCI" +0x10de 0x032b NV_34 "Quadro FX 500/600 PCI" +0x10de 0x032c NV_34 "GeForce FX Go 5300" +0x10de 0x032d NV_34 "GeForce FX Go5100" +0x10de 0x032f NV_34 "NV34GL" +0x10de 0x0330 NV_30 "GeForce FX 5900 Ultra" +0x10de 0x0331 NV_30 "GeForce FX 5900" +0x10de 0x0332 NV_30 "GeForce FX 5900XT" +0x10de 0x0333 NV_30 "GeForce FX 5950 Ultra" +0x10de 0x0334 NV_30 "GeForce FX 5900ZT" +0x10de 0x0338 NV_30 "Quadro FX 3000" +0x10de 0x033f NV_30 "Quadro FX 700" +0x10de 0x0341 NV_30 "GeForce FX 5700 Ultra" +0x10de 0x0342 NV_30 "GeForce FX 5700" +0x10de 0x0343 NV_30 "GeForce FX 5700LE" +0x10de 0x0344 NV_30 "GeForce FX 5700VE" +0x10de 0x0345 NV_30 "NV36.5" +0x10de 0x0347 NV_30 "GeForce FX Go5700" +0x10de 0x0348 NV_30 "GeForce FX Go5700" +0x10de 0x0349 NV_30 "NV36M Pro" +0x10de 0x034b NV_30 "NV36MAP" +0x10de 0x034c NV_30 "Quadro FX Go1000" +0x10de 0x034e NV_30 "Quadro FX 1100" +0x10de 0x034f NV_30 "NV36GL" +0x10de 0x0391 NV_40 "GeForce 7600 GT" +0x10de 0x0392 NV_40 "GeForce 7600 GS" +0x10de 0x0393 NV_40 "GeForce 7300 GT" +0x10de 0x0398 NV_40 "GeForce Go 7600" +0x10de 0x03d0 NV_44 "GeForce 6100 nForce 430" +0x10de 0x03d1 NV_44 "GeForce 6100 nForce 405" +0x10de 0x03d2 NV_44 "GeForce 6100 nForce 400" +0x10de 0x03d5 NV_44 "GeForce 6100 nForce 420" +0x12d2 0x0008 NV_03 "NV1" +0x12d2 0x0009 NV_03 "DAC64" +0x12d2 0x0018 NV_03 "Riva128" +0x12d2 0x0019 NV_03 "Riva128ZX" +0x12d2 0x0020 NV_04 "TNT" +0x12d2 0x0028 NV_04 "TNT2" +0x12d2 0x0029 NV_04 "UTNT2" +0x12d2 0x002c NV_04 "VTNT2" +0x12d2 0x00a0 NV_04 "ITNT2" + diff --git a/shared-core/drm_sarea.h b/shared-core/drm_sarea.h index 0d5baf69..43d1114f 100644 --- a/shared-core/drm_sarea.h +++ b/shared-core/drm_sarea.h @@ -41,7 +41,7 @@ #define SAREA_MAX 0x10000 /* 64kB */ #else /* Intel 830M driver needs at least 8k SAREA */ -#define SAREA_MAX 0x2000 +#define SAREA_MAX 0x2000UL #endif /** Maximum number of drawables in the SAREA */ diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index 449747a2..cd7d2b43 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -264,7 +264,7 @@ static int i915_dma_init(DRM_IOCTL_ARGS) retcode = i915_dma_resume(dev); break; default: - retcode = -EINVAL; + retcode = DRM_ERR(EINVAL); break; } @@ -361,10 +361,9 @@ static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords) for (i = 0; i < dwords;) { int cmd, sz; - if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) { - + if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) return DRM_ERR(EINVAL); - } + if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) return DRM_ERR(EINVAL); @@ -396,7 +395,7 @@ static int i915_emit_box(drm_device_t * dev, RING_LOCALS; if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) { - return EFAULT; + return DRM_ERR(EFAULT); } if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index 85804ce7..98f58940 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -35,7 +35,7 @@ #define DRIVER_AUTHOR "Tungsten Graphics, Inc." -#define DRIVER_NAME "i915-mm" +#define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" #define DRIVER_DATE "20060929" @@ -48,9 +48,10 @@ * 1.5: Add vblank pipe configuration * 1.6: - New ioctl for scheduling buffer swaps on vertical blank * - Support vertical blank on secondary display pipe + * 1.8: New ioctl for ARB_Occlusion_Query */ #define DRIVER_MAJOR 1 -#define DRIVER_MINOR 7 +#define DRIVER_MINOR 8 #define DRIVER_PATCHLEVEL 0 #if defined(__linux__) diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c index a48e1ff8..97723653 100644 --- a/shared-core/i915_irq.c +++ b/shared-core/i915_irq.c @@ -46,88 +46,167 @@ static void i915_vblank_tasklet(drm_device_t *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; - struct list_head *list, *tmp; + struct list_head *list, *tmp, hits, *hit; + int nhits, nrects, slice[2], upper[2], lower[2], i; + unsigned counter[2] = { atomic_read(&dev->vbl_received), + atomic_read(&dev->vbl_received2) }; + drm_drawable_info_t *drw; + drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; + u32 cpp = dev_priv->cpp; + u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD | + XY_SRC_COPY_BLT_WRITE_ALPHA | + XY_SRC_COPY_BLT_WRITE_RGB) + : XY_SRC_COPY_BLT_CMD; + u32 pitchropcpp = (sarea_priv->pitch * cpp) | (0xcc << 16) | + (cpp << 23) | (1 << 24); + RING_LOCALS; DRM_DEBUG("\n"); + INIT_LIST_HEAD(&hits); + + nhits = nrects = 0; + spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); + /* Find buffer swaps scheduled for this vertical blank */ list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) { drm_i915_vbl_swap_t *vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head); - atomic_t *counter = vbl_swap->pipe ? &dev->vbl_received2 : - &dev->vbl_received; - - if ((atomic_read(counter) - vbl_swap->sequence) <= (1<<23)) { - drm_drawable_info_t *drw; - - spin_unlock(&dev_priv->swaps_lock); - - spin_lock(&dev->drw_lock); - - drw = drm_get_drawable_info(dev, vbl_swap->drw_id); - - if (drw) { - int i, num_rects = drw->num_rects; - drm_clip_rect_t *rect = drw->rects; - drm_i915_sarea_t *sarea_priv = - dev_priv->sarea_priv; - u32 cpp = dev_priv->cpp; - u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD | - XY_SRC_COPY_BLT_WRITE_ALPHA | - XY_SRC_COPY_BLT_WRITE_RGB) - : XY_SRC_COPY_BLT_CMD; - u32 pitchropcpp = (sarea_priv->pitch * cpp) | - (0xcc << 16) | (cpp << 23) | - (1 << 24); - RING_LOCALS; - - i915_kernel_lost_context(dev); - - BEGIN_LP_RING(6); - - OUT_RING(GFX_OP_DRAWRECT_INFO); - OUT_RING(0); - OUT_RING(0); - OUT_RING(sarea_priv->width | - sarea_priv->height << 16); - OUT_RING(sarea_priv->width | - sarea_priv->height << 16); - OUT_RING(0); - ADVANCE_LP_RING(); + if ((counter[vbl_swap->pipe] - vbl_swap->sequence) > (1<<23)) + continue; - sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT; + list_del(list); + dev_priv->swaps_pending--; - for (i = 0; i < num_rects; i++, rect++) { - BEGIN_LP_RING(8); + spin_unlock(&dev_priv->swaps_lock); + spin_lock(&dev->drw_lock); - OUT_RING(cmd); - OUT_RING(pitchropcpp); - OUT_RING((rect->y1 << 16) | rect->x1); - OUT_RING((rect->y2 << 16) | rect->x2); - OUT_RING(sarea_priv->front_offset); - OUT_RING((rect->y1 << 16) | rect->x1); - OUT_RING(pitchropcpp & 0xffff); - OUT_RING(sarea_priv->back_offset); + drw = drm_get_drawable_info(dev, vbl_swap->drw_id); - ADVANCE_LP_RING(); - } + if (!drw) { + spin_unlock(&dev->drw_lock); + drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER); + spin_lock(&dev_priv->swaps_lock); + continue; + } + + list_for_each(hit, &hits) { + drm_i915_vbl_swap_t *swap_cmp = + list_entry(hit, drm_i915_vbl_swap_t, head); + drm_drawable_info_t *drw_cmp = + drm_get_drawable_info(dev, swap_cmp->drw_id); + + if (drw_cmp && + drw_cmp->rects[0].y1 > drw->rects[0].y1) { + list_add_tail(list, hit); + break; } + } - spin_unlock(&dev->drw_lock); + spin_unlock(&dev->drw_lock); - spin_lock(&dev_priv->swaps_lock); + /* List of hits was empty, or we reached the end of it */ + if (hit == &hits) + list_add_tail(list, hits.prev); - list_del(list); + nhits++; - drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER); + spin_lock(&dev_priv->swaps_lock); + } + + if (nhits == 0) { + spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); + return; + } + + spin_unlock(&dev_priv->swaps_lock); + + i915_kernel_lost_context(dev); - dev_priv->swaps_pending--; + BEGIN_LP_RING(6); + + OUT_RING(GFX_OP_DRAWRECT_INFO); + OUT_RING(0); + OUT_RING(0); + OUT_RING(sarea_priv->width | sarea_priv->height << 16); + OUT_RING(sarea_priv->width | sarea_priv->height << 16); + OUT_RING(0); + + ADVANCE_LP_RING(); + + sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT; + + upper[0] = upper[1] = 0; + slice[0] = max(sarea_priv->pipeA_h / nhits, 1); + slice[1] = max(sarea_priv->pipeB_h / nhits, 1); + lower[0] = sarea_priv->pipeA_y + slice[0]; + lower[1] = sarea_priv->pipeB_y + slice[0]; + + spin_lock(&dev->drw_lock); + + /* Emit blits for buffer swaps, partitioning both outputs into as many + * slices as there are buffer swaps scheduled in order to avoid tearing + * (based on the assumption that a single buffer swap would always + * complete before scanout starts). + */ + for (i = 0; i++ < nhits; + upper[0] = lower[0], lower[0] += slice[0], + upper[1] = lower[1], lower[1] += slice[1]) { + if (i == nhits) + lower[0] = lower[1] = sarea_priv->height; + + list_for_each(hit, &hits) { + drm_i915_vbl_swap_t *swap_hit = + list_entry(hit, drm_i915_vbl_swap_t, head); + drm_clip_rect_t *rect; + int num_rects, pipe; + unsigned short top, bottom; + + drw = drm_get_drawable_info(dev, swap_hit->drw_id); + + if (!drw) + continue; + + rect = drw->rects; + pipe = swap_hit->pipe; + top = upper[pipe]; + bottom = lower[pipe]; + + for (num_rects = drw->num_rects; num_rects--; rect++) { + int y1 = max(rect->y1, top); + int y2 = min(rect->y2, bottom); + + if (y1 >= y2) + continue; + + BEGIN_LP_RING(8); + + OUT_RING(cmd); + OUT_RING(pitchropcpp); + OUT_RING((y1 << 16) | rect->x1); + OUT_RING((y2 << 16) | rect->x2); + OUT_RING(sarea_priv->front_offset); + OUT_RING((y1 << 16) | rect->x1); + OUT_RING(pitchropcpp & 0xffff); + OUT_RING(sarea_priv->back_offset); + + ADVANCE_LP_RING(); + } } } - spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); + spin_unlock_irqrestore(&dev->drw_lock, irqflags); + + list_for_each_safe(hit, tmp, &hits) { + drm_i915_vbl_swap_t *swap_hit = + list_entry(hit, drm_i915_vbl_swap_t, head); + + list_del(hit); + + drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER); + } } irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) @@ -453,7 +532,7 @@ int i915_vblank_swap(DRM_IOCTL_ARGS) if (!drm_get_drawable_info(dev, swap.drawable)) { spin_unlock_irqrestore(&dev->drw_lock, irqflags); - DRM_ERROR("Invalid drawable ID %d\n", swap.drawable); + DRM_DEBUG("Invalid drawable ID %d\n", swap.drawable); return DRM_ERR(EINVAL); } diff --git a/shared-core/nouveau_drm.h b/shared-core/nouveau_drm.h new file mode 100644 index 00000000..3f363192 --- /dev/null +++ b/shared-core/nouveau_drm.h @@ -0,0 +1,152 @@ +/* + * Copyright 2005 Stephane Marchesin. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NOUVEAU_DRM_H__ +#define __NOUVEAU_DRM_H__ + +typedef struct drm_nouveau_fifo_alloc { + int channel; + uint32_t put_base; + /* FIFO control regs */ + drm_handle_t ctrl; + int ctrl_size; + /* DMA command buffer */ + drm_handle_t cmdbuf; + int cmdbuf_size; +} +drm_nouveau_fifo_alloc_t; + +#define NV_DMA_CONTEXT_FLAGS_PATCH_ROP_AND 0x1 +#define NV_DMA_CONTEXT_FLAGS_PATCH_SRCCOPY 0x2 +#define NV_DMA_CONTEXT_FLAGS_CLIP_ENABLE 0x4 +#define NV_DMA_CONTEXT_FLAGS_MONO 0x8 + +typedef struct drm_nouveau_object_init { + uint32_t handle; + int class; + uint32_t flags; + /* these are object handles */ + uint32_t dma0; + uint32_t dma1; + uint32_t dma_notifier; +} +drm_nouveau_object_init_t; + +typedef struct drm_nouveau_dma_object_init { + uint32_t handle; + int access; + int target; + uint32_t offset; + int size; +} +drm_nouveau_dma_object_init_t; + +#define NOUVEAU_MEM_FB 0x00000001 +#define NOUVEAU_MEM_AGP 0x00000002 +#define NOUVEAU_MEM_FB_ACCEPTABLE 0x00000004 +#define NOUVEAU_MEM_AGP_ACCEPTABLE 0x00000008 +#define NOUVEAU_MEM_PINNED 0x00000010 +#define NOUVEAU_MEM_USER_BACKED 0x00000020 +#define NOUVEAU_MEM_MAPPED 0x00000040 +#define NOUVEAU_MEM_INSTANCE 0x00000080 /* internal */ + +typedef struct drm_nouveau_mem_alloc { + int flags; + int alignment; + uint64_t size; // in bytes + uint64_t region_offset; +} +drm_nouveau_mem_alloc_t; + +typedef struct drm_nouveau_mem_free { + int flags; + uint64_t region_offset; +} +drm_nouveau_mem_free_t; + +/* FIXME : maybe unify {GET,SET}PARAMs */ +#define NOUVEAU_GETPARAM_PCI_VENDOR 3 +#define NOUVEAU_GETPARAM_PCI_DEVICE 4 +#define NOUVEAU_GETPARAM_BUS_TYPE 5 +#define NOUVEAU_GETPARAM_FB_PHYSICAL 6 +#define NOUVEAU_GETPARAM_AGP_PHYSICAL 7 +typedef struct drm_nouveau_getparam { + unsigned int param; + uint64_t value; +} +drm_nouveau_getparam_t; + +#define NOUVEAU_SETPARAM_CMDBUF_LOCATION 1 +#define NOUVEAU_SETPARAM_CMDBUF_SIZE 2 +typedef struct drm_nouveau_setparam { + unsigned int param; + unsigned int value; +} +drm_nouveau_setparam_t; + +enum nouveau_card_type { + NV_UNKNOWN =0, + NV_01 =1, + NV_03 =3, + NV_04 =4, + NV_05 =5, + NV_10 =10, + NV_11 =10, + NV_15 =10, + NV_17 =10, + NV_20 =20, + NV_25 =20, + NV_30 =30, + NV_34 =30, + NV_40 =40, + NV_44 =44, + NV_50 =50, + NV_LAST =0xffff, +}; + +enum nouveau_bus_type { + NV_AGP =0, + NV_PCI =1, + NV_PCIE =2, +}; + +#define NOUVEAU_MAX_SAREA_CLIPRECTS 16 + +typedef struct drm_nouveau_sarea { + /* the cliprects */ + drm_clip_rect_t boxes[NOUVEAU_MAX_SAREA_CLIPRECTS]; + unsigned int nbox; +} +drm_nouveau_sarea_t; + +#define DRM_NOUVEAU_FIFO_ALLOC 0x00 +#define DRM_NOUVEAU_OBJECT_INIT 0x01 +#define DRM_NOUVEAU_DMA_OBJECT_INIT 0x02 // We don't want this eventually.. +#define DRM_NOUVEAU_MEM_ALLOC 0x03 +#define DRM_NOUVEAU_MEM_FREE 0x04 +#define DRM_NOUVEAU_GETPARAM 0x05 +#define DRM_NOUVEAU_SETPARAM 0x06 + +#endif /* __NOUVEAU_DRM_H__ */ + diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h new file mode 100644 index 00000000..6b09046c --- /dev/null +++ b/shared-core/nouveau_drv.h @@ -0,0 +1,222 @@ +/* + * Copyright 2005 Stephane Marchesin. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NOUVEAU_DRV_H__ +#define __NOUVEAU_DRV_H__ + +#define DRIVER_AUTHOR "Stephane Marchesin" +#define DRIVER_EMAIL "dri-devel@lists.sourceforge.net" + +#define DRIVER_NAME "nouveau" +#define DRIVER_DESC "nVidia Riva/TNT/GeForce" +#define DRIVER_DATE "20060213" + +#define DRIVER_MAJOR 0 +#define DRIVER_MINOR 0 +#define DRIVER_PATCHLEVEL 2 + +#define NOUVEAU_FAMILY 0x0000FFFF +#define NOUVEAU_FLAGS 0xFFFF0000 + +#include "nouveau_drm.h" +#include "nouveau_reg.h" + +struct mem_block { + struct mem_block *next; + struct mem_block *prev; + uint64_t start; + uint64_t size; + DRMFILE filp; /* 0: free, -1: heap, other: real files */ + int flags; + drm_local_map_t *map; +}; + +enum nouveau_flags { + NV_NFORCE =0x10000000, + NV_NFORCE2 =0x20000000 +}; + +struct nouveau_object +{ + struct nouveau_object *next; + struct nouveau_object *prev; + + struct mem_block *instance; + uint32_t ht_loc; + + uint32_t handle; + int class; + int engine; +}; + +#define NV_DMA_TARGET_VIDMEM 0 +#define NV_DMA_TARGET_PCI 2 +#define NV_DMA_TARGET_AGP 3 +struct nouveau_fifo +{ + int used; + /* owner of this fifo */ + DRMFILE filp; + /* mapping of the fifo itself */ + drm_local_map_t *map; + /* mapping of the regs controling the fifo */ + drm_local_map_t *regs; + /* dma object for the command buffer itself */ + struct mem_block *cmdbuf_mem; + struct nouveau_object *cmdbuf_obj; + /* PGRAPH context, for cards that keep it in RAMIN */ + struct mem_block *ramin_grctx; + /* objects belonging to this fifo */ + struct nouveau_object *objs; + + /* XXX move this in PGRAPH struct */ + uint32_t pgraph_ctx_user; +}; + +struct nouveau_config { + struct { + int location; + int size; + } cmdbuf; +}; + +typedef struct drm_nouveau_private { + /* the card type, takes NV_* as values */ + int card_type; + int flags; + + drm_local_map_t *mmio; + drm_local_map_t *fb; + drm_local_map_t *ramin; /* NV40 onwards */ + + //TODO: Remove me, I'm bogus :) + int cur_fifo; + + struct nouveau_object *fb_obj; + int cmdbuf_ch_size; + struct mem_block* cmdbuf_alloc; + + int fifo_alloc_count; + struct nouveau_fifo fifos[NV_MAX_FIFO_NUMBER]; + + /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ + uint32_t ramin_size; + uint32_t ramht_offset; + uint32_t ramht_size; + uint32_t ramht_bits; + uint32_t ramfc_offset; + uint32_t ramfc_size; + uint32_t ramro_offset; + uint32_t ramro_size; + + /* base physical adresses */ + uint64_t fb_phys; + uint64_t agp_phys; + + /* the mtrr covering the FB */ + int fb_mtrr; + + struct mem_block *agp_heap; + struct mem_block *fb_heap; + struct mem_block *fb_nomap_heap; + struct mem_block *ramin_heap; + + struct nouveau_config config; +} +drm_nouveau_private_t; + +/* nouveau_state.c */ +extern void nouveau_preclose(drm_device_t * dev, DRMFILE filp); +extern int nouveau_load(struct drm_device *dev, unsigned long flags); +extern int nouveau_firstopen(struct drm_device *dev); +extern void nouveau_lastclose(struct drm_device *dev); +extern int nouveau_unload(struct drm_device *dev); +extern int nouveau_ioctl_getparam(DRM_IOCTL_ARGS); +extern int nouveau_ioctl_setparam(DRM_IOCTL_ARGS); +extern void nouveau_wait_for_idle(struct drm_device *dev); + +/* nouveau_mem.c */ +extern uint64_t nouveau_mem_fb_amount(struct drm_device *dev); +extern void nouveau_mem_release(DRMFILE filp, struct mem_block *heap); +extern int nouveau_ioctl_mem_alloc(DRM_IOCTL_ARGS); +extern int nouveau_ioctl_mem_free(DRM_IOCTL_ARGS); +extern struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment, uint64_t size, int flags, DRMFILE filp); +extern void nouveau_mem_free(struct drm_device* dev, struct mem_block*); +extern int nouveau_mem_init(struct drm_device *dev); +extern void nouveau_mem_close(struct drm_device *dev); +extern int nouveau_instmem_init(struct drm_device *dev, + uint32_t offset); +extern struct mem_block* nouveau_instmem_alloc(struct drm_device *dev, + uint32_t size, uint32_t align); +extern void nouveau_instmem_free(struct drm_device *dev, + struct mem_block *block); +extern uint32_t nouveau_instmem_r32(drm_nouveau_private_t *dev_priv, + struct mem_block *mem, int index); +extern void nouveau_instmem_w32(drm_nouveau_private_t *dev_priv, + struct mem_block *mem, int index, + uint32_t val); + +/* nouveau_fifo.c */ +extern int nouveau_fifo_init(drm_device_t *dev); +extern int nouveau_fifo_number(drm_device_t *dev); +extern void nouveau_fifo_cleanup(drm_device_t *dev, DRMFILE filp); +extern int nouveau_fifo_id_get(drm_device_t *dev, DRMFILE filp); +extern void nouveau_fifo_free(drm_device_t *dev, int channel); + +/* nouveau_object.c */ +extern void nouveau_object_cleanup(drm_device_t *dev, DRMFILE filp); +extern struct nouveau_object *nouveau_dma_object_create(drm_device_t *dev, + uint32_t offset, uint32_t size, int access, uint32_t target); +extern int nouveau_ioctl_object_init(DRM_IOCTL_ARGS); +extern int nouveau_ioctl_dma_object_init(DRM_IOCTL_ARGS); +extern uint32_t nouveau_chip_instance_get(drm_device_t *dev, struct mem_block *mem); + +/* nouveau_irq.c */ +extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS); +extern void nouveau_irq_preinstall(drm_device_t*); +extern void nouveau_irq_postinstall(drm_device_t*); +extern void nouveau_irq_uninstall(drm_device_t*); + +/* nv40_graph.c */ +extern int nv40_graph_init(drm_device_t *dev); +extern int nv40_graph_context_create(drm_device_t *dev, int channel); +extern void nv40_graph_context_save_current(drm_device_t *dev); +extern void nv40_graph_context_restore(drm_device_t *dev, int channel); + +extern long nouveau_compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg); + +#if defined(__powerpc__) +#define NV_READ(reg) in_be32((void __iomem *)(dev_priv->mmio)->handle + (reg) ) +#define NV_WRITE(reg,val) out_be32((void __iomem *)(dev_priv->mmio)->handle + (reg) , (val) ) +#else +#define NV_READ(reg) DRM_READ32( dev_priv->mmio, (reg) ) +#define NV_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) ) +#endif + +#define INSTANCE_WR(mem,ofs,val) nouveau_instmem_w32(dev_priv,(mem),(ofs),(val)) +#define INSTANCE_RD(mem,ofs) nouveau_instmem_r32(dev_priv,(mem),(ofs)) + +#endif /* __NOUVEAU_DRV_H__ */ + diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c new file mode 100644 index 00000000..e5f825e6 --- /dev/null +++ b/shared-core/nouveau_fifo.c @@ -0,0 +1,674 @@ +/* + * Copyright 2005-2006 Stephane Marchesin + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" +#include "nouveau_drm.h" + + +/* returns the number of hw fifos */ +int nouveau_fifo_number(drm_device_t* dev) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + switch(dev_priv->card_type) + { + case NV_03: + return 8; + case NV_04: + case NV_05: + return 16; + default: + return 32; + } +} + +/* returns the size of fifo context */ +static int nouveau_fifo_ctx_size(drm_device_t* dev) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + + if (dev_priv->card_type >= NV_40) + return 128; + else if (dev_priv->card_type >= NV_10) + return 64; + else + return 32; +} + +/*********************************** + * functions doing the actual work + ***********************************/ + +/* voir nv_xaa.c : NVResetGraphics + * mémoire mappée par nv_driver.c : NVMapMem + * voir nv_driver.c : NVPreInit + */ + +static int nouveau_fifo_instmem_configure(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + int i; + + /* Clear start of RAMIN, enough to cover RAMFC/HT/RO basically */ + for (i=0x00710000; i<0x00730000; i++) + NV_WRITE(i, 0x00000000); + + /* FIFO hash table (RAMHT) + * use 4k hash table at RAMIN+0x10000 + * TODO: extend the hash table + */ + dev_priv->ramht_offset = 0x10000; + dev_priv->ramht_bits = 9; + dev_priv->ramht_size = (1 << dev_priv->ramht_bits); + NV_WRITE(NV_PFIFO_RAMHT, + (0x03 << 24) /* search 128 */ | + ((dev_priv->ramht_bits - 9) << 16) | + (dev_priv->ramht_offset >> 8) + ); + DRM_DEBUG("RAMHT offset=0x%x, size=%d\n", + dev_priv->ramht_offset, + dev_priv->ramht_size); + + /* FIFO runout table (RAMRO) - 512k at 0x11200 */ + dev_priv->ramro_offset = 0x11200; + dev_priv->ramro_size = 512; + NV_WRITE(NV_PFIFO_RAMRO, dev_priv->ramro_offset>>8); + DRM_DEBUG("RAMRO offset=0x%x, size=%d\n", + dev_priv->ramro_offset, + dev_priv->ramro_size); + + /* FIFO context table (RAMFC) + * NV40 : Not sure exactly how to position RAMFC on some cards, + * 0x30002 seems to position it at RAMIN+0x20000 on these + * cards. RAMFC is 4kb (32 fifos, 128byte entries). + * Others: Position RAMFC at RAMIN+0x11400 + */ + switch(dev_priv->card_type) + { + case NV_50: + case NV_40: + dev_priv->ramfc_offset = 0x20000; + dev_priv->ramfc_size = nouveau_fifo_number(dev) * nouveau_fifo_ctx_size(dev); + NV_WRITE(NV40_PFIFO_RAMFC, 0x30002); + break; + case NV_44: + dev_priv->ramfc_offset = 0x20000; + dev_priv->ramfc_size = nouveau_fifo_number(dev) * nouveau_fifo_ctx_size(dev); + NV_WRITE(NV40_PFIFO_RAMFC, ((nouveau_mem_fb_amount(dev)-512*1024+dev_priv->ramfc_offset)>>16) | + (2 << 16)); + break; + case NV_30: + case NV_20: + case NV_10: + dev_priv->ramfc_offset = 0x11400; + dev_priv->ramfc_size = nouveau_fifo_number(dev) * nouveau_fifo_ctx_size(dev); + NV_WRITE(NV_PFIFO_RAMFC, (dev_priv->ramfc_offset>>8) | + (1 << 16) /* 64 Bytes entry*/); + break; + case NV_04: + case NV_03: + dev_priv->ramfc_offset = 0x11400; + dev_priv->ramfc_size = nouveau_fifo_number(dev) * nouveau_fifo_ctx_size(dev); + NV_WRITE(NV_PFIFO_RAMFC, dev_priv->ramfc_offset>>8); + break; + } + DRM_DEBUG("RAMFC offset=0x%x, size=%d\n", + dev_priv->ramfc_offset, + dev_priv->ramfc_size); + + if (nouveau_instmem_init(dev, dev_priv->ramfc_offset + + dev_priv->ramfc_size)) + return 1; + + return 0; +} + +int nouveau_fifo_init(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + int ret; + + NV_WRITE(NV_PFIFO_CACHES, 0x00000000); + + ret = nouveau_fifo_instmem_configure(dev); + if (ret) { + DRM_ERROR("Failed to configure instance memory\n"); + return ret; + } + + /* FIXME remove all the stuff that's done in nouveau_fifo_alloc */ + + DRM_DEBUG("Setting defaults for remaining PFIFO regs\n"); + + /* All channels into PIO mode */ + NV_WRITE(NV_PFIFO_MODE, 0x00000000); + + NV_WRITE(NV_PFIFO_CACH1_PSH0, 0x00000000); + NV_WRITE(NV_PFIFO_CACH1_PUL0, 0x00000000); + /* Channel 0 active, PIO mode */ + NV_WRITE(NV_PFIFO_CACH1_PSH1, 0x00000000); + /* PUT and GET to 0 */ + NV_WRITE(NV_PFIFO_CACH1_DMAP, 0x00000000); + NV_WRITE(NV_PFIFO_CACH1_DMAP, 0x00000000); + /* No cmdbuf object */ + NV_WRITE(NV_PFIFO_CACH1_DMAI, 0x00000000); + NV_WRITE(NV_PFIFO_CACH0_PSH0, 0x00000000); + NV_WRITE(NV_PFIFO_CACH0_PUL0, 0x00000000); + NV_WRITE(NV_PFIFO_SIZE, 0x0000FFFF); + NV_WRITE(NV_PFIFO_CACH1_HASH, 0x0000FFFF); + NV_WRITE(NV_PFIFO_CACH0_PUL1, 0x00000001); + NV_WRITE(NV_PFIFO_CACH1_DMAC, 0x00000000); + NV_WRITE(NV_PFIFO_CACH1_DMAS, 0x00000000); + NV_WRITE(NV_PFIFO_CACH1_ENG, 0x00000000); +#ifdef __BIG_ENDIAN + NV_WRITE(NV_PFIFO_CACH1_DMAF, NV_PFIFO_CACH1_DMAF_TRIG_112_BYTES | + NV_PFIFO_CACH1_DMAF_SIZE_128_BYTES | + NV_PFIFO_CACH1_DMAF_MAX_REQS_4 | + NV_PFIFO_CACH1_BIG_ENDIAN); +#else + NV_WRITE(NV_PFIFO_CACH1_DMAF, NV_PFIFO_CACH1_DMAF_TRIG_112_BYTES | + NV_PFIFO_CACH1_DMAF_SIZE_128_BYTES | + NV_PFIFO_CACH1_DMAF_MAX_REQS_4); +#endif + NV_WRITE(NV_PFIFO_CACH1_DMAPSH, 0x00000001); + NV_WRITE(NV_PFIFO_CACH1_PSH0, 0x00000001); + NV_WRITE(NV_PFIFO_CACH1_PUL0, 0x00000001); + NV_WRITE(NV_PFIFO_CACH1_PUL1, 0x00000001); + + NV_WRITE(NV_PGRAPH_CTX_USER, 0x0); + NV_WRITE(NV_PFIFO_DELAY_0, 0xff /* retrycount*/ ); + if (dev_priv->card_type >= NV_40) + NV_WRITE(NV_PGRAPH_CTX_CONTROL, 0x00002001); + else + NV_WRITE(NV_PGRAPH_CTX_CONTROL, 0x10110000); + + NV_WRITE(NV_PFIFO_DMA_TIMESLICE, 0x001fffff); + NV_WRITE(NV_PFIFO_CACHES, 0x00000001); + + return 0; +} + +static int +nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_config *config = &dev_priv->config; + struct mem_block *cb; + struct nouveau_object *cb_dma = NULL; + int cb_min_size = max(NV03_FIFO_SIZE,PAGE_SIZE); + + /* Defaults for unconfigured values */ + if (!config->cmdbuf.location) + config->cmdbuf.location = NOUVEAU_MEM_FB; + if (!config->cmdbuf.size || config->cmdbuf.size < cb_min_size) + config->cmdbuf.size = cb_min_size; + + cb = nouveau_mem_alloc(dev, 0, config->cmdbuf.size, + config->cmdbuf.location | NOUVEAU_MEM_MAPPED, + (DRMFILE)-2); + if (!cb) { + DRM_ERROR("Couldn't allocate DMA command buffer.\n"); + return DRM_ERR(ENOMEM); + } + + if (cb->flags & NOUVEAU_MEM_AGP) { + cb_dma = nouveau_dma_object_create(dev, + cb->start, cb->size, + NV_DMA_ACCESS_RO, NV_DMA_TARGET_AGP); + } else if (dev_priv->card_type != NV_04) { + cb_dma = nouveau_dma_object_create(dev, + cb->start - drm_get_resource_start(dev, 1), + cb->size, + NV_DMA_ACCESS_RO, NV_DMA_TARGET_VIDMEM); + } else { + /* NV04 cmdbuf hack, from original ddx.. not sure of it's + * exact reason for existing :) PCI access to cmdbuf in + * VRAM. + */ + cb_dma = nouveau_dma_object_create(dev, + cb->start, cb->size, + NV_DMA_ACCESS_RO, NV_DMA_TARGET_PCI); + } + + if (!cb_dma) { + nouveau_mem_free(dev, cb); + DRM_ERROR("Failed to alloc DMA object for command buffer\n"); + return DRM_ERR(ENOMEM); + } + + dev_priv->fifos[channel].cmdbuf_mem = cb; + dev_priv->fifos[channel].cmdbuf_obj = cb_dma; + return 0; +} + +#define RAMFC_WR(offset, val) NV_WRITE(fifoctx + NV04_RAMFC_##offset, (val)) +static void nouveau_nv04_context_init(drm_device_t *dev, + drm_nouveau_fifo_alloc_t *init) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_object *cb_obj; + uint32_t fifoctx, ctx_size = 32; + int i; + + cb_obj = dev_priv->fifos[init->channel].cmdbuf_obj; + + fifoctx=NV_RAMIN+dev_priv->ramfc_offset+init->channel*ctx_size; + // clear the fifo context + for(i=0;i<ctx_size/4;i++) + NV_WRITE(fifoctx+4*i,0x0); + + RAMFC_WR(DMA_PUT , init->put_base); + RAMFC_WR(DMA_GET , init->put_base); + RAMFC_WR(DMA_INSTANCE , nouveau_chip_instance_get(dev, cb_obj->instance)); +#ifdef __BIG_ENDIAN + RAMFC_WR(DMA_FETCH, NV_PFIFO_CACH1_DMAF_TRIG_112_BYTES|NV_PFIFO_CACH1_DMAF_SIZE_128_BYTES|NV_PFIFO_CACH1_DMAF_MAX_REQS_4|NV_PFIFO_CACH1_BIG_ENDIAN); +#else + RAMFC_WR(DMA_FETCH, NV_PFIFO_CACH1_DMAF_TRIG_112_BYTES|NV_PFIFO_CACH1_DMAF_SIZE_128_BYTES|NV_PFIFO_CACH1_DMAF_MAX_REQS_4); +#endif +} +#undef RAMFC_WR + +#define RAMFC_WR(offset, val) NV_WRITE(fifoctx + NV10_RAMFC_##offset, (val)) +static void nouveau_nv10_context_init(drm_device_t *dev, + drm_nouveau_fifo_alloc_t *init) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_object *cb_obj; + uint32_t fifoctx; + int i; + + cb_obj = dev_priv->fifos[init->channel].cmdbuf_obj; + fifoctx = NV_RAMIN + dev_priv->ramfc_offset + init->channel*64; + for (i=0;i<64;i+=4) + NV_WRITE(fifoctx + i, 0); + + /* Fill entries that are seen filled in dumps of nvidia driver just + * after channel's is put into DMA mode + */ + RAMFC_WR(DMA_PUT , init->put_base); + RAMFC_WR(DMA_GET , init->put_base); + RAMFC_WR(DMA_INSTANCE , nouveau_chip_instance_get(dev, + cb_obj->instance)); +#ifdef __BIG_ENDIAN + RAMFC_WR(DMA_FETCH, NV_PFIFO_CACH1_DMAF_TRIG_112_BYTES | + NV_PFIFO_CACH1_DMAF_SIZE_128_BYTES | + NV_PFIFO_CACH1_DMAF_MAX_REQS_4 | + NV_PFIFO_CACH1_BIG_ENDIAN); +#else + RAMFC_WR(DMA_FETCH, NV_PFIFO_CACH1_DMAF_TRIG_112_BYTES | + NV_PFIFO_CACH1_DMAF_SIZE_128_BYTES | + NV_PFIFO_CACH1_DMAF_MAX_REQS_4); +#endif + RAMFC_WR(DMA_SUBROUTINE, 0); +} + +static void nouveau_nv10_context_save(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t fifoctx; + int channel; + + channel = NV_READ(NV_PFIFO_CACH1_PSH1) & (nouveau_fifo_number(dev)-1); + fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*64; + + RAMFC_WR(DMA_PUT , NV_READ(NV_PFIFO_CACH1_DMAP)); + RAMFC_WR(DMA_GET , NV_READ(NV_PFIFO_CACH1_DMAG)); + RAMFC_WR(REF_CNT , NV_READ(NV_PFIFO_CACH1_REF_CNT)); + RAMFC_WR(DMA_INSTANCE , NV_READ(NV_PFIFO_CACH1_DMAI)); + RAMFC_WR(DMA_STATE , NV_READ(NV_PFIFO_CACH1_DMAS)); + RAMFC_WR(DMA_FETCH , NV_READ(NV_PFIFO_CACH1_DMAF)); + RAMFC_WR(ENGINE , NV_READ(NV_PFIFO_CACH1_ENG)); + RAMFC_WR(PULL1_ENGINE , NV_READ(NV_PFIFO_CACH1_PUL1)); + RAMFC_WR(ACQUIRE_VALUE , NV_READ(NV_PFIFO_CACH1_ACQUIRE_VALUE)); + RAMFC_WR(ACQUIRE_TIMESTAMP, NV_READ(NV_PFIFO_CACH1_ACQUIRE_TIMESTAMP)); + RAMFC_WR(ACQUIRE_TIMEOUT , NV_READ(NV_PFIFO_CACH1_ACQUIRE_TIMEOUT)); + RAMFC_WR(SEMAPHORE , NV_READ(NV_PFIFO_CACH1_SEMAPHORE)); + RAMFC_WR(DMA_SUBROUTINE , NV_READ(NV_PFIFO_CACH1_DMASR)); +} +#undef RAMFC_WR + +#define RAMFC_WR(offset, val) NV_WRITE(fifoctx + NV40_RAMFC_##offset, (val)) +static void nouveau_nv40_context_init(drm_device_t *dev, + drm_nouveau_fifo_alloc_t *init) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[init->channel]; + uint32_t fifoctx, cb_inst, grctx_inst; + int i; + + cb_inst = nouveau_chip_instance_get(dev, chan->cmdbuf_obj->instance); + grctx_inst = nouveau_chip_instance_get(dev, chan->ramin_grctx); + fifoctx = NV_RAMIN + dev_priv->ramfc_offset + init->channel*128; + for (i=0;i<128;i+=4) + NV_WRITE(fifoctx + i, 0); + + /* Fill entries that are seen filled in dumps of nvidia driver just + * after channel's is put into DMA mode + */ + RAMFC_WR(DMA_PUT , init->put_base); + RAMFC_WR(DMA_GET , init->put_base); + RAMFC_WR(DMA_INSTANCE , cb_inst); + RAMFC_WR(DMA_FETCH , NV_PFIFO_CACH1_DMAF_TRIG_128_BYTES | + NV_PFIFO_CACH1_DMAF_SIZE_128_BYTES | + NV_PFIFO_CACH1_DMAF_MAX_REQS_8 | +#ifdef __BIG_ENDIAN + NV_PFIFO_CACH1_BIG_ENDIAN | +#endif + 0x30000000 /* no idea.. */); + RAMFC_WR(DMA_SUBROUTINE, init->put_base); + RAMFC_WR(GRCTX_INSTANCE, grctx_inst); + RAMFC_WR(DMA_TIMESLICE , 0x0001FFFF); +} + +static void nouveau_nv40_context_save(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t fifoctx; + int channel; + + channel = NV_READ(NV_PFIFO_CACH1_PSH1) & (nouveau_fifo_number(dev)-1); + fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*128; + + RAMFC_WR(DMA_PUT , NV_READ(NV_PFIFO_CACH1_DMAP)); + RAMFC_WR(DMA_GET , NV_READ(NV_PFIFO_CACH1_DMAG)); + RAMFC_WR(REF_CNT , NV_READ(NV_PFIFO_CACH1_REF_CNT)); + RAMFC_WR(DMA_INSTANCE , NV_READ(NV_PFIFO_CACH1_DMAI)); + RAMFC_WR(DMA_DCOUNT , NV_READ(NV_PFIFO_CACH1_DMA_DCOUNT)); + RAMFC_WR(DMA_STATE , NV_READ(NV_PFIFO_CACH1_DMAS)); + RAMFC_WR(DMA_FETCH , NV_READ(NV_PFIFO_CACH1_DMAF)); + RAMFC_WR(ENGINE , NV_READ(NV_PFIFO_CACH1_ENG)); + RAMFC_WR(PULL1_ENGINE , NV_READ(NV_PFIFO_CACH1_PUL1)); + RAMFC_WR(ACQUIRE_VALUE , NV_READ(NV_PFIFO_CACH1_ACQUIRE_VALUE)); + RAMFC_WR(ACQUIRE_TIMESTAMP, NV_READ(NV_PFIFO_CACH1_ACQUIRE_TIMESTAMP)); + RAMFC_WR(ACQUIRE_TIMEOUT , NV_READ(NV_PFIFO_CACH1_ACQUIRE_TIMEOUT)); + RAMFC_WR(SEMAPHORE , NV_READ(NV_PFIFO_CACH1_SEMAPHORE)); + RAMFC_WR(DMA_SUBROUTINE , NV_READ(NV_PFIFO_CACH1_DMAG)); + RAMFC_WR(GRCTX_INSTANCE , NV_READ(NV40_PFIFO_GRCTX_INSTANCE)); + RAMFC_WR(DMA_TIMESLICE , NV_READ(NV_PFIFO_DMA_TIMESLICE) & 0x1FFFF); + RAMFC_WR(UNK_40 , NV_READ(NV40_PFIFO_UNK32E4)); +} +#undef RAMFC_WR + +/* This function should load values from RAMFC into PFIFO, but for now + * it just clobbers PFIFO with what nouveau_fifo_alloc used to setup + * unconditionally. + */ +static void +nouveau_fifo_context_restore(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + uint32_t cb_inst; + + cb_inst = nouveau_chip_instance_get(dev, chan->cmdbuf_obj->instance); + + // FIXME check if we need to refill the time quota with something like NV_WRITE(0x204C, 0x0003FFFF); + + if (dev_priv->card_type >= NV_40) + NV_WRITE(NV_PFIFO_CACH1_PSH1, 0x00010000|channel); + else + NV_WRITE(NV_PFIFO_CACH1_PSH1, 0x00000100|channel); + + NV_WRITE(NV_PFIFO_CACH1_DMAP, 0 /*RAMFC_DMA_PUT*/); + NV_WRITE(NV_PFIFO_CACH1_DMAG, 0 /*RAMFC_DMA_GET*/); + NV_WRITE(NV_PFIFO_CACH1_DMAI, cb_inst); + NV_WRITE(NV_PFIFO_SIZE , 0x0000FFFF); + NV_WRITE(NV_PFIFO_CACH1_HASH, 0x0000FFFF); + + NV_WRITE(NV_PFIFO_CACH0_PUL1, 0x00000001); + NV_WRITE(NV_PFIFO_CACH1_DMAC, 0x00000000); + NV_WRITE(NV_PFIFO_CACH1_DMAS, 0x00000000); + NV_WRITE(NV_PFIFO_CACH1_ENG, 0x00000000); +#ifdef __BIG_ENDIAN + NV_WRITE(NV_PFIFO_CACH1_DMAF, NV_PFIFO_CACH1_DMAF_TRIG_112_BYTES|NV_PFIFO_CACH1_DMAF_SIZE_128_BYTES|NV_PFIFO_CACH1_DMAF_MAX_REQS_4|NV_PFIFO_CACH1_BIG_ENDIAN); +#else + NV_WRITE(NV_PFIFO_CACH1_DMAF, NV_PFIFO_CACH1_DMAF_TRIG_112_BYTES|NV_PFIFO_CACH1_DMAF_SIZE_128_BYTES|NV_PFIFO_CACH1_DMAF_MAX_REQS_4); +#endif +} + +/* allocates and initializes a fifo for user space consumption */ +static int nouveau_fifo_alloc(drm_device_t* dev,drm_nouveau_fifo_alloc_t* init, DRMFILE filp) +{ + int i; + int ret; + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_object *cb_obj; + + /* + * Alright, here is the full story + * Nvidia cards have multiple hw fifo contexts (praise them for that, + * no complicated crash-prone context switches) + * We allocate a new context for each app and let it write to it directly + * (woo, full userspace command submission !) + * When there are no more contexts, you lost + */ + for(i=0;i<nouveau_fifo_number(dev);i++) + if (dev_priv->fifos[i].used==0) + break; + + DRM_INFO("Allocating FIFO number %d\n", i); + /* no more fifos. you lost. */ + if (i==nouveau_fifo_number(dev)) + return DRM_ERR(EINVAL); + + /* allocate a command buffer, and create a dma object for the gpu */ + ret = nouveau_fifo_cmdbuf_alloc(dev, i); + if (ret) return ret; + cb_obj = dev_priv->fifos[i].cmdbuf_obj; + + /* that fifo is used */ + dev_priv->fifos[i].used=1; + dev_priv->fifos[i].filp=filp; + + init->channel = i; + init->put_base = 0; + dev_priv->cur_fifo = init->channel; + dev_priv->fifos[i].pgraph_ctx_user = i << 24; + + nouveau_wait_for_idle(dev); + + /* disable the fifo caches */ + NV_WRITE(NV_PFIFO_CACHES, 0x00000000); + NV_WRITE(NV_PFIFO_CACH1_DMAPSH, NV_READ(NV_PFIFO_CACH1_DMAPSH)&(~0x1)); + NV_WRITE(NV_PFIFO_CACH1_PSH0, 0x00000000); + NV_WRITE(NV_PFIFO_CACH1_PUL0, 0x00000000); + + /* Construct inital RAMFC for new channel */ + if (dev_priv->card_type < NV_10) { + nouveau_nv04_context_init(dev, init); + } else if (dev_priv->card_type < NV_40) { + nouveau_nv10_context_init(dev, init); + } else { + ret = nv40_graph_context_create(dev, init->channel); + if (ret) { + nouveau_fifo_free(dev, init->channel); + return ret; + } + nouveau_nv40_context_init(dev, init); + } + + /* enable the fifo dma operation */ + NV_WRITE(NV_PFIFO_MODE,NV_READ(NV_PFIFO_MODE)|(1<<init->channel)); + + /* setup channel's default get/put values */ + NV_WRITE(NV03_FIFO_REGS_DMAPUT(init->channel), init->put_base); + NV_WRITE(NV03_FIFO_REGS_DMAGET(init->channel), init->put_base); + + /* If this is the first channel, setup PFIFO ourselves. For any + * other case, the GPU will handle this when it switches contexts. + */ + if (dev_priv->fifo_alloc_count == 0) { + nouveau_fifo_context_restore(dev, init->channel); + if (dev_priv->card_type >= NV_40) { + struct nouveau_fifo *chan; + uint32_t inst; + + chan = &dev_priv->fifos[init->channel]; + inst = nouveau_chip_instance_get(dev, + chan->ramin_grctx); + + /* see comments in nv40_graph_context_restore() */ + NV_WRITE(0x400784, inst); + NV_WRITE(0x40032C, inst | 0x01000000); + NV_WRITE(NV40_PFIFO_GRCTX_INSTANCE, inst); + } + } + + NV_WRITE(NV_PFIFO_CACH1_DMAPSH, 0x00000001); + NV_WRITE(NV_PFIFO_CACH1_PSH0, 0x00000001); + NV_WRITE(NV_PFIFO_CACH1_PUL0, 0x00000001); + NV_WRITE(NV_PFIFO_CACH1_PUL1, 0x00000001); + + /* reenable the fifo caches */ + NV_WRITE(NV_PFIFO_CACHES, 0x00000001); + + /* make the fifo available to user space */ + /* first, the fifo control regs */ + init->ctrl = dev_priv->mmio->offset + NV03_FIFO_REGS(init->channel); + init->ctrl_size = NV03_FIFO_REGS_SIZE; + ret = drm_addmap(dev, init->ctrl, init->ctrl_size, _DRM_REGISTERS, + 0, &dev_priv->fifos[init->channel].regs); + if (ret != 0) + return ret; + + /* pass back FIFO map info to the caller */ + init->cmdbuf = dev_priv->fifos[init->channel].cmdbuf_mem->start; + init->cmdbuf_size = dev_priv->fifos[init->channel].cmdbuf_mem->size; + + /* FIFO has no objects yet */ + dev_priv->fifos[init->channel].objs = NULL; + dev_priv->fifo_alloc_count++; + + DRM_INFO("%s: initialised FIFO %d\n", __func__, init->channel); + return 0; +} + +/* stops a fifo */ +void nouveau_fifo_free(drm_device_t* dev,int n) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + int i; + int ctx_size = nouveau_fifo_ctx_size(dev); + + dev_priv->fifos[n].used=0; + DRM_INFO("%s: freeing fifo %d\n", __func__, n); + + /* disable the fifo caches */ + NV_WRITE(NV_PFIFO_CACHES, 0x00000000); + + NV_WRITE(NV_PFIFO_MODE,NV_READ(NV_PFIFO_MODE)&~(1<<n)); + // FIXME XXX needs more code + + /* Clean RAMFC */ + for (i=0;i<ctx_size;i+=4) { + DRM_DEBUG("RAMFC +%02x: 0x%08x\n", i, NV_READ(NV_RAMIN + + dev_priv->ramfc_offset + n*ctx_size + i)); + NV_WRITE(NV_RAMIN + dev_priv->ramfc_offset + n*ctx_size + i, 0); + } + + if (dev_priv->card_type >= NV_40) + nouveau_instmem_free(dev, dev_priv->fifos[n].ramin_grctx); + + /* reenable the fifo caches */ + NV_WRITE(NV_PFIFO_CACHES, 0x00000001); + + /* Deallocate command buffer, and dma object */ + nouveau_mem_free(dev, dev_priv->fifos[n].cmdbuf_mem); + + dev_priv->fifo_alloc_count--; +} + +/* cleanups all the fifos from filp */ +void nouveau_fifo_cleanup(drm_device_t* dev, DRMFILE filp) +{ + int i; + drm_nouveau_private_t *dev_priv = dev->dev_private; + + DRM_DEBUG("clearing FIFO enables from filp\n"); + for(i=0;i<nouveau_fifo_number(dev);i++) + if (dev_priv->fifos[i].used && dev_priv->fifos[i].filp==filp) + nouveau_fifo_free(dev,i); + + /* check we still point at an active channel */ + if (dev_priv->fifos[dev_priv->cur_fifo].used == 0) { + DRM_DEBUG("%s: cur_fifo is no longer owned.\n", __func__); + for (i=0;i<nouveau_fifo_number(dev);i++) + if (dev_priv->fifos[i].used) break; + if (i==nouveau_fifo_number(dev)) + i=0; + DRM_DEBUG("%s: new cur_fifo is %d\n", __func__, i); + dev_priv->cur_fifo = i; + } + +/* if (dev_priv->cmdbuf_alloc) + nouveau_fifo_init(dev);*/ +} + +int nouveau_fifo_id_get(drm_device_t* dev, DRMFILE filp) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + int i; + + for(i=0;i<nouveau_fifo_number(dev);i++) + if (dev_priv->fifos[i].used && dev_priv->fifos[i].filp == filp) + return i; + return -1; +} + +/*********************************** + * ioctls wrapping the functions + ***********************************/ + +static int nouveau_ioctl_fifo_alloc(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + drm_nouveau_fifo_alloc_t init; + int res; + DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_fifo_alloc_t __user *) data, sizeof(init)); + + res=nouveau_fifo_alloc(dev,&init,filp); + if (!res) + DRM_COPY_TO_USER_IOCTL((drm_nouveau_fifo_alloc_t __user *)data, init, sizeof(init)); + + return res; +} + +/*********************************** + * finally, the ioctl table + ***********************************/ + +drm_ioctl_desc_t nouveau_ioctls[] = { + [DRM_IOCTL_NR(DRM_NOUVEAU_FIFO_ALLOC)] = {nouveau_ioctl_fifo_alloc, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_NOUVEAU_OBJECT_INIT)] = {nouveau_ioctl_object_init, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_NOUVEAU_DMA_OBJECT_INIT)] = {nouveau_ioctl_dma_object_init, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_NOUVEAU_MEM_ALLOC)] = {nouveau_ioctl_mem_alloc, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_NOUVEAU_MEM_FREE)] = {nouveau_ioctl_mem_free, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_NOUVEAU_GETPARAM)] = {nouveau_ioctl_getparam, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_NOUVEAU_SETPARAM)] = {nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, +}; + +int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); + + diff --git a/shared-core/nouveau_irq.c b/shared-core/nouveau_irq.c new file mode 100644 index 00000000..7a31fb0b --- /dev/null +++ b/shared-core/nouveau_irq.c @@ -0,0 +1,422 @@ +/* + * Copyright (C) 2006 Ben Skeggs. + * + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +/* + * Authors: + * Ben Skeggs <darktama@iinet.net.au> + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drm.h" +#include "nouveau_drv.h" +#include "nouveau_reg.h" + +void nouveau_irq_preinstall(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + + DRM_DEBUG("IRQ: preinst\n"); + + /* Disable/Clear PFIFO interrupts */ + NV_WRITE(NV_PFIFO_INTEN, 0); + NV_WRITE(NV_PFIFO_INTSTAT, 0xFFFFFFFF); + /* Disable/Clear PGRAPH interrupts */ + if (dev_priv->card_type<NV_40) + NV_WRITE(NV04_PGRAPH_INTEN, 0); + else + NV_WRITE(NV40_PGRAPH_INTEN, 0); + NV_WRITE(NV_PGRAPH_INTSTAT, 0xFFFFFFFF); +#if 0 + /* Disable/Clear CRTC0/1 interrupts */ + NV_WRITE(NV_CRTC0_INTEN, 0); + NV_WRITE(NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK); + NV_WRITE(NV_CRTC1_INTEN, 0); + NV_WRITE(NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK); +#endif + /* Master disable */ + NV_WRITE(NV_PMC_INTEN, 0); +} + +void nouveau_irq_postinstall(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + + DRM_DEBUG("IRQ: postinst\n"); + + /* Enable PFIFO error reporting */ + NV_WRITE(NV_PFIFO_INTEN , + NV_PFIFO_INTR_CACHE_ERROR | + NV_PFIFO_INTR_RUNOUT | + NV_PFIFO_INTR_RUNOUT_OVERFLOW | + NV_PFIFO_INTR_DMA_PUSHER | + NV_PFIFO_INTR_DMA_PT | + NV_PFIFO_INTR_SEMAPHORE | + NV_PFIFO_INTR_ACQUIRE_TIMEOUT + ); + NV_WRITE(NV_PFIFO_INTSTAT, 0xFFFFFFFF); + + /* Enable PGRAPH interrupts */ + if (dev_priv->card_type<NV_40) + NV_WRITE(NV04_PGRAPH_INTEN, + NV_PGRAPH_INTR_NOTIFY | + NV_PGRAPH_INTR_MISSING_HW | + NV_PGRAPH_INTR_CONTEXT_SWITCH | + NV_PGRAPH_INTR_BUFFER_NOTIFY | + NV_PGRAPH_INTR_ERROR + ); + else + NV_WRITE(NV40_PGRAPH_INTEN, + NV_PGRAPH_INTR_NOTIFY | + NV_PGRAPH_INTR_MISSING_HW | + NV_PGRAPH_INTR_CONTEXT_SWITCH | + NV_PGRAPH_INTR_BUFFER_NOTIFY | + NV_PGRAPH_INTR_ERROR + ); + NV_WRITE(NV_PGRAPH_INTSTAT, 0xFFFFFFFF); + +#if 0 + /* Enable CRTC0/1 interrupts */ + NV_WRITE(NV_CRTC0_INTEN, NV_CRTC_INTR_VBLANK); + NV_WRITE(NV_CRTC1_INTEN, NV_CRTC_INTR_VBLANK); +#endif + + /* Master enable */ + NV_WRITE(NV_PMC_INTEN, NV_PMC_INTEN_MASTER_ENABLE); +} + +void nouveau_irq_uninstall(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + + DRM_DEBUG("IRQ: uninst\n"); + + /* Disable PFIFO interrupts */ + NV_WRITE(NV_PFIFO_INTEN, 0); + /* Disable PGRAPH interrupts */ + if (dev_priv->card_type<NV_40) + NV_WRITE(NV04_PGRAPH_INTEN, 0); + else + NV_WRITE(NV40_PGRAPH_INTEN, 0); +#if 0 + /* Disable CRTC0/1 interrupts */ + NV_WRITE(NV_CRTC0_INTEN, 0); + NV_WRITE(NV_CRTC1_INTEN, 0); +#endif + /* Master disable */ + NV_WRITE(NV_PMC_INTEN, 0); +} + +static void nouveau_fifo_irq_handler(drm_device_t *dev) +{ + uint32_t status, chmode, chstat, channel; + drm_nouveau_private_t *dev_priv = dev->dev_private; + + status = NV_READ(NV_PFIFO_INTSTAT); + if (!status) + return; + chmode = NV_READ(NV_PFIFO_MODE); + chstat = NV_READ(NV_PFIFO_DMA); + channel=NV_READ(NV_PFIFO_CACH1_PSH1)&(nouveau_fifo_number(dev)-1); + + DRM_DEBUG("NV: PFIFO interrupt! Channel=%d, INTSTAT=0x%08x/MODE=0x%08x/PEND=0x%08x\n", channel, status, chmode, chstat); + + if (status & NV_PFIFO_INTR_CACHE_ERROR) { + uint32_t c1get, c1method, c1data; + + DRM_ERROR("NV: PFIFO error interrupt\n"); + + c1get = NV_READ(NV_PFIFO_CACH1_GET) >> 2; + if (dev_priv->card_type < NV_40) { + /* Untested, so it may not work.. */ + c1method = NV_READ(NV_PFIFO_CACH1_METHOD(c1get)); + c1data = NV_READ(NV_PFIFO_CACH1_DATA(c1get)); + } else { + c1method = NV_READ(NV40_PFIFO_CACH1_METHOD(c1get)); + c1data = NV_READ(NV40_PFIFO_CACH1_DATA(c1get)); + } + + DRM_ERROR("NV: Channel %d/%d - Method 0x%04x, Data 0x%08x\n", + channel, (c1method >> 13) & 7, + c1method & 0x1ffc, c1data + ); + + status &= ~NV_PFIFO_INTR_CACHE_ERROR; + NV_WRITE(NV_PFIFO_INTSTAT, NV_PFIFO_INTR_CACHE_ERROR); + } + + if (status & NV_PFIFO_INTR_DMA_PUSHER) { + DRM_INFO("NV: PFIFO DMA pusher interrupt\n"); + + status &= ~NV_PFIFO_INTR_DMA_PUSHER; + NV_WRITE(NV_PFIFO_INTSTAT, NV_PFIFO_INTR_DMA_PUSHER); + + NV_WRITE(NV_PFIFO_CACH1_DMAS, 0x00000000); + if (NV_READ(NV_PFIFO_CACH1_DMAP)!=NV_READ(NV_PFIFO_CACH1_DMAG)) + { + uint32_t getval=NV_READ(NV_PFIFO_CACH1_DMAG)+4; + NV_WRITE(NV_PFIFO_CACH1_DMAG,getval); + } + } + + if (status) { + DRM_INFO("NV: unknown PFIFO interrupt. status=0x%08x\n", status); + + NV_WRITE(NV_PFIFO_INTSTAT, status); + } + + NV_WRITE(NV_PMC_INTSTAT, NV_PMC_INTSTAT_PFIFO_PENDING); +} + +static void nouveau_nv04_context_switch(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t channel,i; + uint32_t max=0; + NV_WRITE(NV_PGRAPH_FIFO,0x0); + channel=NV_READ(NV_PFIFO_CACH1_PSH1)&(nouveau_fifo_number(dev)-1); + //DRM_INFO("raw PFIFO_CACH1_PHS1 reg is %x\n",NV_READ(NV_PFIFO_CACH1_PSH1)); + //DRM_INFO("currently on channel %d\n",channel); + for (i=0;i<nouveau_fifo_number(dev);i++) + if ((dev_priv->fifos[i].used)&&(i!=channel)) { + uint32_t put,get,pending; + //put=NV_READ(dev_priv->ramfc_offset+i*32); + //get=NV_READ(dev_priv->ramfc_offset+4+i*32); + put=NV_READ(NV03_FIFO_REGS_DMAPUT(i)); + get=NV_READ(NV03_FIFO_REGS_DMAGET(i)); + pending=NV_READ(NV_PFIFO_DMA); + //DRM_INFO("Channel %d (put/get %x/%x)\n",i,put,get); + /* mark all pending channels as such */ + if ((put!=get)&!(pending&(1<<i))) + { + pending|=(1<<i); + NV_WRITE(NV_PFIFO_DMA,pending); + } + max++; + } + nouveau_wait_for_idle(dev); + +#if 1 + /* 2-channel commute */ + // NV_WRITE(NV_PFIFO_CACH1_PSH1,channel|0x100); + if (channel==0) + channel=1; + else + channel=0; + // dev_priv->cur_fifo=channel; + NV_WRITE(0x2050,channel|0x100); +#endif + //NV_WRITE(NV_PFIFO_CACH1_PSH1,max|0x100); + //NV_WRITE(0x2050,max|0x100); + + NV_WRITE(NV_PGRAPH_FIFO,0x1); + +} + +static void nouveau_nv10_context_switch(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + int channel, channel_old; + + channel=NV_READ(NV_PFIFO_CACH1_PSH1)&(nouveau_fifo_number(dev)-1); + channel_old = (NV_READ(NV_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1); + + DRM_INFO("NV: PGRAPH context switch interrupt channel %x -> %x\n",channel_old, channel); + + NV_WRITE(NV_PGRAPH_FIFO,0x0); + NV_WRITE(NV_PFIFO_CACH1_PUL0, 0x00000000); + NV_WRITE(NV_PFIFO_CACH1_PUL1, 0x00000000); + NV_WRITE(NV_PFIFO_CACHES, 0x00000000); + + dev_priv->fifos[channel_old].pgraph_ctx_user = NV_READ(NV_PGRAPH_CTX_USER); + //XXX save PGRAPH context + NV_WRITE(NV_PGRAPH_CTX_CONTROL, 0x10000000); + NV_WRITE(NV_PGRAPH_CTX_USER, dev_priv->fifos[channel].pgraph_ctx_user); + //XXX restore PGRAPH context + printk("ctx_user %x %x\n", dev_priv->fifos[channel_old].pgraph_ctx_user, dev_priv->fifos[channel].pgraph_ctx_user); + + NV_WRITE(NV_PGRAPH_FFINTFC_ST2, NV_READ(NV_PGRAPH_FFINTFC_ST2)&0xCFFFFFFF); + NV_WRITE(NV_PGRAPH_CTX_CONTROL, 0x10010100); + + NV_WRITE(NV_PFIFO_CACH1_PUL0, 0x00000001); + NV_WRITE(NV_PFIFO_CACH1_PUL1, 0x00000001); + NV_WRITE(NV_PFIFO_CACHES, 0x00000001); + NV_WRITE(NV_PGRAPH_FIFO,0x1); +} + +static void nouveau_pgraph_irq_handler(drm_device_t *dev) +{ + uint32_t status; + drm_nouveau_private_t *dev_priv = dev->dev_private; + + status = NV_READ(NV_PGRAPH_INTSTAT); + if (!status) + return; + + if (status & NV_PGRAPH_INTR_NOTIFY) { + uint32_t nsource, nstatus, instance, notify; + DRM_DEBUG("NV: PGRAPH notify interrupt\n"); + + nstatus = NV_READ(0x00400104); + nsource = NV_READ(0x00400108); + DRM_DEBUG("nsource:0x%08x\tnstatus:0x%08x\n", nsource, nstatus); + + instance = NV_READ(0x00400158); + notify = NV_READ(0x00400150) >> 16; + DRM_DEBUG("instance:0x%08x\tnotify:0x%08x\n", nsource, nstatus); + + status &= ~NV_PGRAPH_INTR_NOTIFY; + NV_WRITE(NV_PGRAPH_INTSTAT, NV_PGRAPH_INTR_NOTIFY); + } + + if (status & NV_PGRAPH_INTR_BUFFER_NOTIFY) { + uint32_t nsource, nstatus, instance, notify; + DRM_DEBUG("NV: PGRAPH buffer notify interrupt\n"); + + nstatus = NV_READ(0x00400104); + nsource = NV_READ(0x00400108); + DRM_DEBUG("nsource:0x%08x\tnstatus:0x%08x\n", nsource, nstatus); + + instance = NV_READ(0x00400158); + notify = NV_READ(0x00400150) >> 16; + DRM_DEBUG("instance:0x%08x\tnotify:0x%08x\n", instance, notify); + + status &= ~NV_PGRAPH_INTR_BUFFER_NOTIFY; + NV_WRITE(NV_PGRAPH_INTSTAT, NV_PGRAPH_INTR_BUFFER_NOTIFY); + } + + if (status & NV_PGRAPH_INTR_MISSING_HW) { + DRM_ERROR("NV: PGRAPH missing hw interrupt\n"); + + status &= ~NV_PGRAPH_INTR_MISSING_HW; + NV_WRITE(NV_PGRAPH_INTSTAT, NV_PGRAPH_INTR_MISSING_HW); + } + + if (status & NV_PGRAPH_INTR_ERROR) { + uint32_t nsource, nstatus, instance; + uint32_t address; + uint32_t channel; + uint32_t method, subc, data; + + DRM_ERROR("NV: PGRAPH error interrupt\n"); + + nstatus = NV_READ(0x00400104); + nsource = NV_READ(0x00400108); + DRM_DEBUG("nsource:0x%08x\tnstatus:0x%08x\n", nsource, nstatus); + + instance = NV_READ(0x00400158); + DRM_DEBUG("instance:0x%08x\n", instance); + + address = NV_READ(0x400704); + data = NV_READ(0x400708); + channel = (address >> 20) & 0x1F; + subc = (address >> 16) & 0x7; + method = address & 0x1FFC; + DRM_DEBUG("NV: 0x400704 = 0x%08x\n", address); + DRM_ERROR("NV: Channel %d/%d (class 0x%04x) -" + "Method 0x%04x, Data 0x%08x\n", + channel, subc, + NV_READ(0x400160+subc*4) & 0xFFFF, + method, data + ); + + status &= ~NV_PGRAPH_INTR_ERROR; + NV_WRITE(NV_PGRAPH_INTSTAT, NV_PGRAPH_INTR_ERROR); + } + + if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) { + uint32_t channel=NV_READ(NV_PFIFO_CACH1_PSH1)&(nouveau_fifo_number(dev)-1); + DRM_INFO("NV: PGRAPH context switch interrupt channel %x\n",channel); + switch(dev_priv->card_type) + { + case NV_04: + case NV_05: + nouveau_nv04_context_switch(dev); + break; + case NV_10: + nouveau_nv10_context_switch(dev); + break; + default: + DRM_INFO("NV: Context switch not implemented\n"); + break; + } + + status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; + NV_WRITE(NV_PGRAPH_INTSTAT, NV_PGRAPH_INTR_CONTEXT_SWITCH); + } + + if (status) { + DRM_INFO("NV: Unknown PGRAPH interrupt! STAT=0x%08x\n", status); + NV_WRITE(NV_PGRAPH_INTSTAT, status); + } + + NV_WRITE(NV_PMC_INTSTAT, NV_PMC_INTSTAT_PGRAPH_PENDING); +} + +static void nouveau_crtc_irq_handler(drm_device_t *dev, int crtc) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + if (crtc&1) { + NV_WRITE(NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK); + } + + if (crtc&2) { + NV_WRITE(NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK); + } +} + +irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS) +{ + drm_device_t *dev = (drm_device_t*)arg; + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t status; + + status = NV_READ(NV_PMC_INTSTAT); + if (!status) + return IRQ_NONE; + + DRM_DEBUG("PMC INTSTAT: 0x%08x\n", status); + + if (status & NV_PMC_INTSTAT_PFIFO_PENDING) { + nouveau_fifo_irq_handler(dev); + status &= ~NV_PMC_INTSTAT_PFIFO_PENDING; + } + if (status & NV_PMC_INTSTAT_PGRAPH_PENDING) { + nouveau_pgraph_irq_handler(dev); + status &= ~NV_PMC_INTSTAT_PGRAPH_PENDING; + } + if (status & NV_PMC_INTSTAT_CRTCn_PENDING) { + nouveau_crtc_irq_handler(dev, (status>>24)&3); + status &= ~NV_PMC_INTSTAT_CRTCn_PENDING; + } + + if (status) + DRM_ERROR("Unhandled PMC INTR status bits 0x%08x\n", status); + + return IRQ_HANDLED; +} + diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c new file mode 100644 index 00000000..cd53d25d --- /dev/null +++ b/shared-core/nouveau_mem.c @@ -0,0 +1,612 @@ +/* + * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. + * Copyright 2005 Stephane Marchesin + * + * The Weather Channel (TM) funded Tungsten Graphics to develop the + * initial release of the Radeon 8500 driver under the XFree86 license. + * This notice must be preserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Keith Whitwell <keith@tungstengraphics.com> + */ + + +#include "drmP.h" +#include "drm.h" +#include "drm_sarea.h" +#include "nouveau_drv.h" + +static int meminit_ok=0; + +static struct mem_block *split_block(struct mem_block *p, uint64_t start, uint64_t size, + DRMFILE filp) +{ + /* Maybe cut off the start of an existing block */ + if (start > p->start) { + struct mem_block *newblock = + drm_alloc(sizeof(*newblock), DRM_MEM_BUFS); + if (!newblock) + goto out; + newblock->start = start; + newblock->size = p->size - (start - p->start); + newblock->filp = NULL; + newblock->next = p->next; + newblock->prev = p; + p->next->prev = newblock; + p->next = newblock; + p->size -= newblock->size; + p = newblock; + } + + /* Maybe cut off the end of an existing block */ + if (size < p->size) { + struct mem_block *newblock = + drm_alloc(sizeof(*newblock), DRM_MEM_BUFS); + if (!newblock) + goto out; + newblock->start = start + size; + newblock->size = p->size - size; + newblock->filp = NULL; + newblock->next = p->next; + newblock->prev = p; + p->next->prev = newblock; + p->next = newblock; + p->size = size; + } + +out: + /* Our block is in the middle */ + p->filp = filp; + return p; +} + +static struct mem_block *alloc_block(struct mem_block *heap, uint64_t size, + int align2, DRMFILE filp) +{ + struct mem_block *p; + uint64_t mask = (1 << align2) - 1; + + if (!heap) + return NULL; + + list_for_each(p, heap) { + uint64_t start = (p->start + mask) & ~mask; + if (p->filp == 0 && start + size <= p->start + p->size) + return split_block(p, start, size, filp); + } + + return NULL; +} + +static struct mem_block *find_block(struct mem_block *heap, uint64_t start) +{ + struct mem_block *p; + + list_for_each(p, heap) + if (p->start == start) + return p; + + return NULL; +} + +static void free_block(struct mem_block *p) +{ + p->filp = NULL; + + /* Assumes a single contiguous range. Needs a special filp in + * 'heap' to stop it being subsumed. + */ + if (p->next->filp == 0) { + struct mem_block *q = p->next; + p->size += q->size; + p->next = q->next; + p->next->prev = p; + drm_free(q, sizeof(*q), DRM_MEM_BUFS); + } + + if (p->prev->filp == 0) { + struct mem_block *q = p->prev; + q->size += p->size; + q->next = p->next; + q->next->prev = q; + drm_free(p, sizeof(*q), DRM_MEM_BUFS); + } +} + +/* Initialize. How to check for an uninitialized heap? + */ +static int init_heap(struct mem_block **heap, uint64_t start, uint64_t size) +{ + struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS); + + if (!blocks) + return DRM_ERR(ENOMEM); + + *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS); + if (!*heap) { + drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS); + return DRM_ERR(ENOMEM); + } + + blocks->start = start; + blocks->size = size; + blocks->filp = NULL; + blocks->next = blocks->prev = *heap; + + memset(*heap, 0, sizeof(**heap)); + (*heap)->filp = (DRMFILE) - 1; + (*heap)->next = (*heap)->prev = blocks; + return 0; +} + +/* + * Free all blocks associated with the releasing filp + */ +void nouveau_mem_release(DRMFILE filp, struct mem_block *heap) +{ + struct mem_block *p; + + if (!heap || !heap->next) + return; + + list_for_each(p, heap) { + if (p->filp == filp) + p->filp = NULL; + } + + /* Assumes a single contiguous range. Needs a special filp in + * 'heap' to stop it being subsumed. + */ + list_for_each(p, heap) { + while ((p->filp == 0) && (p->next->filp == 0) && (p->next!=heap)) { + struct mem_block *q = p->next; + p->size += q->size; + p->next = q->next; + p->next->prev = p; + drm_free(q, sizeof(*q), DRM_MEM_DRIVER); + } + } +} + +/* + * Cleanup everything + */ +static void nouveau_mem_takedown(struct mem_block **heap) +{ + struct mem_block *p; + + if (!*heap) + return; + + for (p = (*heap)->next; p != *heap;) { + struct mem_block *q = p; + p = p->next; + drm_free(q, sizeof(*q), DRM_MEM_DRIVER); + } + + drm_free(*heap, sizeof(**heap), DRM_MEM_DRIVER); + *heap = NULL; +} + +void nouveau_mem_close(struct drm_device *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + nouveau_mem_takedown(&dev_priv->agp_heap); + nouveau_mem_takedown(&dev_priv->fb_heap); +} + +/* returns the amount of FB ram in bytes */ +uint64_t nouveau_mem_fb_amount(struct drm_device *dev) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + switch(dev_priv->card_type) + { + case NV_03: + switch(NV_READ(NV03_BOOT_0)&NV03_BOOT_0_RAM_AMOUNT) + { + case NV03_BOOT_0_RAM_AMOUNT_8MB: + case NV03_BOOT_0_RAM_AMOUNT_8MB_SDRAM: + return 8*1024*1024; + case NV03_BOOT_0_RAM_AMOUNT_4MB: + return 4*1024*1024; + case NV03_BOOT_0_RAM_AMOUNT_2MB: + return 2*1024*1024; + } + break; + case NV_04: + case NV_05: + if (NV_READ(NV03_BOOT_0) & 0x00000100) { + return (((NV_READ(NV03_BOOT_0) >> 12) & 0xf)*2+2)*1024*1024; + } else + switch(NV_READ(NV03_BOOT_0)&NV03_BOOT_0_RAM_AMOUNT) + { + case NV04_BOOT_0_RAM_AMOUNT_32MB: + return 32*1024*1024; + case NV04_BOOT_0_RAM_AMOUNT_16MB: + return 16*1024*1024; + case NV04_BOOT_0_RAM_AMOUNT_8MB: + return 8*1024*1024; + case NV04_BOOT_0_RAM_AMOUNT_4MB: + return 4*1024*1024; + } + break; + case NV_10: + case NV_20: + case NV_30: + case NV_40: + case NV_44: + case NV_50: + default: + // XXX won't work on BSD because of pci_read_config_dword + if (dev_priv->flags&NV_NFORCE) { + uint32_t mem; + pci_read_config_dword(dev->pdev, 0x7C, &mem); + return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024; + } else if(dev_priv->flags&NV_NFORCE2) { + uint32_t mem; + pci_read_config_dword(dev->pdev, 0x84, &mem); + return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024; + } else { + uint64_t mem; + mem=(NV_READ(NV04_FIFO_DATA)&NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >> NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT; + return mem*1024*1024; + } + break; + } + + DRM_ERROR("Unable to detect video ram size. Please report your setup to " DRIVER_EMAIL "\n"); + return 0; +} + + + +int nouveau_mem_init(struct drm_device *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t fb_size; + dev_priv->agp_phys=0; + dev_priv->fb_phys=0; + + /* init AGP */ + dev_priv->agp_heap=NULL; + if (drm_device_is_agp(dev)) + { + int err; + drm_agp_info_t info; + drm_agp_mode_t mode; + drm_agp_buffer_t agp_req; + drm_agp_binding_t bind_req; + + err = drm_agp_acquire(dev); + if (err) { + DRM_ERROR("Unable to acquire AGP: %d\n", err); + goto no_agp; + } + + err = drm_agp_info(dev, &info); + if (err) { + DRM_ERROR("Unable to get AGP info: %d\n", err); + goto no_agp; + } + + /* see agp.h for the AGPSTAT_* modes available */ + mode.mode = info.mode; + err = drm_agp_enable(dev, mode); + if (err) { + DRM_ERROR("Unable to enable AGP: %d\n", err); + goto no_agp; + } + + agp_req.size = info.aperture_size; + agp_req.type = 0; + err = drm_agp_alloc(dev, &agp_req); + if (err) { + DRM_ERROR("Unable to alloc AGP: %d\n", err); + goto no_agp; + } + + bind_req.handle = agp_req.handle; + bind_req.offset = 0; + err = drm_agp_bind(dev, &bind_req); + if (err) { + DRM_ERROR("Unable to bind AGP: %d\n", err); + goto no_agp; + } + + if (init_heap(&dev_priv->agp_heap, info.aperture_base, info.aperture_size)) + goto no_agp; + + dev_priv->agp_phys=info.aperture_base; + } +no_agp: + + /* Init FB */ + dev_priv->fb_phys=drm_get_resource_start(dev,1); + fb_size = nouveau_mem_fb_amount(dev); + /* On at least NV40, RAMIN is actually at the end of vram. + * We don't want to allocate this... */ + if (dev_priv->card_type >= NV_40) + fb_size -= dev_priv->ramin_size; + DRM_DEBUG("Available VRAM: %dKiB\n", fb_size>>10); + + if (fb_size>256*1024*1024) { + /* On cards with > 256Mb, you can't map everything. + * So we create a second FB heap for that type of memory */ + if (init_heap(&dev_priv->fb_heap, drm_get_resource_start(dev,1), 256*1024*1024)) + return DRM_ERR(ENOMEM); + if (init_heap(&dev_priv->fb_nomap_heap, drm_get_resource_start(dev,1)+256*1024*1024, fb_size-256*1024*1024)) + return DRM_ERR(ENOMEM); + } else { + if (init_heap(&dev_priv->fb_heap, drm_get_resource_start(dev,1), fb_size)) + return DRM_ERR(ENOMEM); + dev_priv->fb_nomap_heap=NULL; + } + + return 0; +} + +struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment, uint64_t size, int flags, DRMFILE filp) +{ + struct mem_block *block; + int type; + drm_nouveau_private_t *dev_priv = dev->dev_private; + + /* + * Init memory if needed + */ + if (meminit_ok==0) + { + nouveau_mem_init(dev); + meminit_ok=1; + } + + /* + * Make things easier on ourselves: all allocations are page-aligned. + * We need that to map allocated regions into the user space + */ + if (alignment < PAGE_SHIFT) + alignment = PAGE_SHIFT; + + /* + * Warn about 0 sized allocations, but let it go through. It'll return 1 page + */ + if (size == 0) + DRM_INFO("warning : 0 byte allocation\n"); + + /* + * Keep alloc size a multiple of the page size to keep drm_addmap() happy + */ + if (size & (~PAGE_MASK)) + size = ((size/PAGE_SIZE) + 1) * PAGE_SIZE; + + if (flags&NOUVEAU_MEM_AGP) { + type=NOUVEAU_MEM_AGP; + block = alloc_block(dev_priv->agp_heap, size, alignment, filp); + if (block) goto alloc_ok; + } + if (flags&(NOUVEAU_MEM_FB|NOUVEAU_MEM_FB_ACCEPTABLE)) { + type=NOUVEAU_MEM_FB; + if (!(flags&NOUVEAU_MEM_MAPPED)) { + block = alloc_block(dev_priv->fb_nomap_heap, size, alignment, filp); + if (block) goto alloc_ok; + } + block = alloc_block(dev_priv->fb_heap, size, alignment, filp); + if (block) goto alloc_ok; + } + if (flags&NOUVEAU_MEM_AGP_ACCEPTABLE) { + type=NOUVEAU_MEM_AGP; + block = alloc_block(dev_priv->agp_heap, size, alignment, filp); + if (block) goto alloc_ok; + } + + return NULL; + +alloc_ok: + block->flags=type; + + if (flags&NOUVEAU_MEM_MAPPED) + { + int ret; + block->flags|=NOUVEAU_MEM_MAPPED; + + if (type == NOUVEAU_MEM_AGP) + ret = drm_addmap(dev, block->start - dev->agp->base, block->size, + _DRM_AGP, 0, &block->map); + else + ret = drm_addmap(dev, block->start, block->size, + _DRM_FRAME_BUFFER, 0, &block->map); + if (ret) { + free_block(block); + return NULL; + } + } + + DRM_INFO("allocated 0x%llx\n", block->start); + return block; +} + +void nouveau_mem_free(struct drm_device* dev, struct mem_block* block) +{ + DRM_INFO("freeing 0x%llx\n", block->start); + if (meminit_ok==0) + { + DRM_ERROR("%s called without init\n", __FUNCTION__); + return; + } + if (block->flags&NOUVEAU_MEM_MAPPED) + drm_rmmap(dev, block->map); + free_block(block); +} + +int nouveau_instmem_init(struct drm_device *dev, uint32_t offset) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + int ret; + + if (dev_priv->card_type >= NV_40) + /* We'll want more instance memory than this on some NV4x cards. + * There's a 16MB aperture to play with that maps onto the end + * of vram. For now, only reserve a small piece until we know + * more about what each chipset requires. + */ + dev_priv->ramin_size = (1*1024* 1024); + else { + /*XXX: what *are* the limits on <NV40 cards?, and does RAMIN + * exist in vram on those cards as well? + */ + dev_priv->ramin_size = (512*1024); + } + DRM_DEBUG("RAMIN size: %dKiB\n", dev_priv->ramin_size>>10); + + /* Create a heap to manage RAMIN allocations, we don't allocate + * the space that was reserved for RAMHT/FC/RO. + */ + ret = init_heap(&dev_priv->ramin_heap, offset, + dev_priv->ramin_size - offset); + if (ret) { + dev_priv->ramin_heap = NULL; + DRM_ERROR("Failed to init RAMIN heap\n"); + } + + return ret; +} + +struct mem_block *nouveau_instmem_alloc(struct drm_device *dev, + uint32_t size, uint32_t align) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct mem_block *block; + + if (!dev_priv->ramin_heap) { + DRM_ERROR("instmem alloc called without init\n"); + return NULL; + } + + block = alloc_block(dev_priv->ramin_heap, size, align, (DRMFILE)-2); + if (block) { + block->flags = NOUVEAU_MEM_INSTANCE; + DRM_DEBUG("instance(size=%d, align=%d) alloc'd at 0x%08x\n", + size, (1<<align), (uint32_t)block->start); + } + + return block; +} + +void nouveau_instmem_free(struct drm_device *dev, struct mem_block *block) +{ + if (dev && block) { + free_block(block); + } +} + +uint32_t nouveau_instmem_r32(drm_nouveau_private_t *dev_priv, + struct mem_block *mem, int index) +{ + uint32_t ofs = (uint32_t)mem->start + (index<<2); + + if (dev_priv->ramin) { +#if defined(__powerpc__) + return in_be32((void __iomem *)(dev_priv->ramin)->handle + ofs); +#else + return DRM_READ32(dev_priv->ramin, ofs); +#endif + } else { + return NV_READ(NV_RAMIN+ofs); + } +} + +void nouveau_instmem_w32(drm_nouveau_private_t *dev_priv, + struct mem_block *mem, int index, uint32_t val) +{ + uint32_t ofs = (uint32_t)mem->start + (index<<2); + + if (dev_priv->ramin) { +#if defined(__powerpc__) + out_be32((void __iomem *)(dev_priv->ramin)->handle + ofs, val); +#else + DRM_WRITE32(dev_priv->ramin, ofs, val); +#endif + } else { + NV_WRITE(NV_RAMIN+ofs, val); + } +} + +/* + * Ioctls + */ + +int nouveau_ioctl_mem_alloc(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + drm_nouveau_private_t *dev_priv = dev->dev_private; + drm_nouveau_mem_alloc_t alloc; + struct mem_block *block; + + if (!dev_priv) { + DRM_ERROR("%s called with no initialization\n", __FUNCTION__); + return DRM_ERR(EINVAL); + } + + DRM_COPY_FROM_USER_IOCTL(alloc, (drm_nouveau_mem_alloc_t __user *) data, + sizeof(alloc)); + + block=nouveau_mem_alloc(dev, alloc.alignment, alloc.size, alloc.flags, filp); + if (!block) + return DRM_ERR(ENOMEM); + alloc.region_offset=block->start; + alloc.flags=block->flags; + + DRM_COPY_TO_USER_IOCTL((drm_nouveau_mem_alloc_t __user *) data, alloc, sizeof(alloc)); + + return 0; +} + +int nouveau_ioctl_mem_free(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + drm_nouveau_private_t *dev_priv = dev->dev_private; + drm_nouveau_mem_free_t memfree; + struct mem_block *block; + + if (!dev_priv) { + DRM_ERROR("%s called with no initialization\n", __FUNCTION__); + return DRM_ERR(EINVAL); + } + + DRM_COPY_FROM_USER_IOCTL(memfree, (drm_nouveau_mem_free_t __user *) data, + sizeof(memfree)); + + block=NULL; + if (memfree.flags&NOUVEAU_MEM_FB) + block = find_block(dev_priv->fb_heap, memfree.region_offset); + else if (memfree.flags&NOUVEAU_MEM_AGP) + block = find_block(dev_priv->agp_heap, memfree.region_offset); + if (!block) + return DRM_ERR(EFAULT); + if (block->filp != filp) + return DRM_ERR(EPERM); + + nouveau_mem_free(dev, block); + return 0; +} + + diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c new file mode 100644 index 00000000..c11b05eb --- /dev/null +++ b/shared-core/nouveau_object.c @@ -0,0 +1,578 @@ +/* + * Copyright (C) 2006 Ben Skeggs. + * + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +/* + * Authors: + * Ben Skeggs <darktama@iinet.net.au> + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" +#include "nouveau_drm.h" + +/* TODO + * - Check object class, deny unsafe objects (add card-specific versioning?) + * - Get rid of DMA object creation, this should be wrapped by MM routines. + */ + +/* Translate a RAMIN offset into a value the card understands, will be useful + * in the future when we can access more instance ram which isn't mapped into + * the PRAMIN aperture + */ +uint32_t nouveau_chip_instance_get(drm_device_t *dev, + struct mem_block *mem) +{ + uint32_t inst = (uint32_t)mem->start >> 4; + DRM_DEBUG("****** on-chip instance for 0x%016llx = 0x%08x\n", + mem->start, inst); + return inst; +} + +static void nouveau_object_link(drm_device_t *dev, int fifo_num, + struct nouveau_object *obj) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + struct nouveau_fifo *fifo = &dev_priv->fifos[fifo_num]; + + if (!fifo->objs) { + fifo->objs = obj; + return; + } + + obj->prev = NULL; + obj->next = fifo->objs; + + fifo->objs->prev = obj; + fifo->objs = obj; +} + +static void nouveau_object_unlink(drm_device_t *dev, int fifo_num, + struct nouveau_object *obj) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + struct nouveau_fifo *fifo = &dev_priv->fifos[fifo_num]; + + if (obj->prev == NULL) { + if (obj->next) + obj->next->prev = NULL; + fifo->objs = obj->next; + } else if (obj->next == NULL) { + if (obj->prev) + obj->prev->next = NULL; + } else { + obj->prev->next = obj->next; + obj->next->prev = obj->prev; + } +} + +static struct nouveau_object * +nouveau_object_handle_find(drm_device_t *dev, int fifo_num, uint32_t handle) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + struct nouveau_fifo *fifo = &dev_priv->fifos[fifo_num]; + struct nouveau_object *obj = fifo->objs; + + if (!handle) + return NULL; + + DRM_DEBUG("Looking for handle 0x%08x\n", handle); + while (obj) { + if (obj->handle == handle) + return obj; + obj = obj->next; + } + + DRM_DEBUG("...couldn't find handle\n"); + return NULL; +} + +/* NVidia uses context objects to drive drawing operations. + + Context objects can be selected into 8 subchannels in the FIFO, + and then used via DMA command buffers. + + A context object is referenced by a user defined handle (CARD32). The HW + looks up graphics objects in a hash table in the instance RAM. + + An entry in the hash table consists of 2 CARD32. The first CARD32 contains + the handle, the second one a bitfield, that contains the address of the + object in instance RAM. + + The format of the second CARD32 seems to be: + + NV4 to NV30: + + 15: 0 instance_addr >> 4 + 17:16 engine (here uses 1 = graphics) + 28:24 channel id (here uses 0) + 31 valid (use 1) + + NV40: + + 15: 0 instance_addr >> 4 (maybe 19-0) + 21:20 engine (here uses 1 = graphics) + I'm unsure about the other bits, but using 0 seems to work. + + The key into the hash table depends on the object handle and channel id and + is given as: +*/ +static uint32_t nouveau_handle_hash(drm_device_t* dev, uint32_t handle, + int fifo) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + uint32_t hash = 0; + int i; + + for (i=32;i>0;i-=dev_priv->ramht_bits) { + hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1)); + handle >>= dev_priv->ramht_bits; + } + hash ^= fifo << (dev_priv->ramht_bits - 4); + return hash << 3; +} + +static int nouveau_hash_table_insert(drm_device_t* dev, int fifo, + struct nouveau_object *obj) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + int ht_base = NV_RAMIN + dev_priv->ramht_offset; + int ht_end = ht_base + dev_priv->ramht_size; + int o_ofs, ofs; + + o_ofs = ofs = nouveau_handle_hash(dev, obj->handle, fifo); + + while (NV_READ(ht_base + ofs)) { + ofs += 8; + if (ofs == ht_end) ofs = ht_base; + if (ofs == o_ofs) { + DRM_ERROR("no free hash table entries\n"); + return 1; + } + } + ofs += ht_base; + + DRM_DEBUG("Channel %d - Handle 0x%08x at 0x%08x\n", + fifo, obj->handle, ofs); + + NV_WRITE(NV_RAMHT_HANDLE_OFFSET + ofs, obj->handle); + if (dev_priv->card_type >= NV_40) + NV_WRITE(NV_RAMHT_CONTEXT_OFFSET + ofs, + (fifo << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | + (obj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT) | + nouveau_chip_instance_get(dev, obj->instance) + ); + else + NV_WRITE(NV_RAMHT_CONTEXT_OFFSET + ofs, + NV_RAMHT_CONTEXT_VALID | + (fifo << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | + (obj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT) | + nouveau_chip_instance_get(dev, obj->instance) + ); + + obj->ht_loc = ofs; + return 0; +} + +static void nouveau_hash_table_remove(drm_device_t* dev, + struct nouveau_object *obj) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + + DRM_DEBUG("Remove handle 0x%08x at 0x%08x from HT\n", + obj->handle, obj->ht_loc); + if (obj->ht_loc) { + DRM_DEBUG("... HT entry was: 0x%08x/0x%08x\n", + NV_READ(obj->ht_loc), NV_READ(obj->ht_loc+4)); + NV_WRITE(obj->ht_loc , 0x00000000); + NV_WRITE(obj->ht_loc+4, 0x00000000); + } +} + +static struct nouveau_object *nouveau_instance_alloc(drm_device_t* dev) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + struct nouveau_object *obj; + + /* Create object struct */ + obj = drm_calloc(1, sizeof(struct nouveau_object), DRM_MEM_DRIVER); + if (!obj) { + DRM_ERROR("couldn't alloc memory for object\n"); + return NULL; + } + obj->instance = nouveau_instmem_alloc(dev, + (dev_priv->card_type >= NV_40 ? 32 : 16), 4); + if (!obj->instance) { + DRM_ERROR("couldn't alloc RAMIN for object\n"); + drm_free(obj, sizeof(struct nouveau_object), DRM_MEM_DRIVER); + return NULL; + } + + return obj; +} + +static void nouveau_object_instance_free(drm_device_t *dev, + struct nouveau_object *obj) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + int count, i; + + if (dev_priv->card_type >= NV_40) + count = 8; + else + count = 4; + + /* Clean RAMIN entry */ + DRM_DEBUG("Instance entry for 0x%08x" + "(engine %d, class 0x%x) before destroy:\n", + obj->handle, obj->engine, obj->class); + for (i=0;i<count;i++) { + DRM_DEBUG(" +0x%02x: 0x%08x\n", (i*4), + INSTANCE_RD(obj->instance, i)); + INSTANCE_WR(obj->instance, i, 0x00000000); + } + + /* Free RAMIN */ + nouveau_instmem_free(dev, obj->instance); +} + +/* + DMA objects are used to reference a piece of memory in the + framebuffer, PCI or AGP address space. Each object is 16 bytes big + and looks as follows: + + entry[0] + 11:0 class (seems like I can always use 0 here) + 12 page table present? + 13 page entry linear? + 15:14 access: 0 rw, 1 ro, 2 wo + 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP + 31:20 dma adjust (bits 0-11 of the address) + entry[1] + dma limit + entry[2] + 1 0 readonly, 1 readwrite + 31:12 dma frame address (bits 12-31 of the address) + + Non linear page tables seem to need a list of frame addresses afterwards, + the rivatv project has some info on this. + + The method below creates a DMA object in instance RAM and returns a handle + to it that can be used to set up context objects. +*/ +struct nouveau_object *nouveau_dma_object_create(drm_device_t* dev, + uint32_t offset, uint32_t size, + int access, uint32_t target) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + struct nouveau_object *obj; + uint32_t frame, adjust; + + DRM_DEBUG("offset:0x%08x, size:0x%08x, target:%d, access:%d\n", + offset, size, target, access); + + frame = offset & ~0x00000FFF; + adjust = offset & 0x00000FFF; + + obj = nouveau_instance_alloc(dev); + if (!obj) { + DRM_ERROR("couldn't allocate DMA object\n"); + return obj; + } + + obj->engine = 0; + obj->class = 0; + + INSTANCE_WR(obj->instance, 0, ((1<<12) | (1<<13) | + (adjust << 20) | + (access << 14) | + (target << 16) | + 0x3D /* DMA_IN_MEMORY */)); + INSTANCE_WR(obj->instance, 1, size-1); + INSTANCE_WR(obj->instance, 2, + frame | ((access != NV_DMA_ACCESS_RO) ? (1<<1) : 0)); + /* I don't actually know what this is, the DMA objects I see + * in renouveau dumps usually have this as the same as +8 + */ + INSTANCE_WR(obj->instance, 3, + frame | ((access != NV_DMA_ACCESS_RO) ? (1<<1) : 0)); + + return obj; +} + + +/* Context objects in the instance RAM have the following structure. + * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes. + + NV4 - NV30: + + entry[0] + 11:0 class + 12 chroma key enable + 13 user clip enable + 14 swizzle enable + 17:15 patch config: + scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre + 18 synchronize enable + 19 endian: 1 big, 0 little + 21:20 dither mode + 23 single step enable + 24 patch status: 0 invalid, 1 valid + 25 context_surface 0: 1 valid + 26 context surface 1: 1 valid + 27 context pattern: 1 valid + 28 context rop: 1 valid + 29,30 context beta, beta4 + entry[1] + 7:0 mono format + 15:8 color format + 31:16 notify instance address + entry[2] + 15:0 dma 0 instance address + 31:16 dma 1 instance address + entry[3] + dma method traps + + NV40: + No idea what the exact format is. Here's what can be deducted: + + entry[0]: + 11:0 class (maybe uses more bits here?) + 17 user clip enable + 21:19 patch config + 25 patch status valid ? + entry[1]: + 15:0 DMA notifier (maybe 20:0) + entry[2]: + 15:0 DMA 0 instance (maybe 20:0) + 24 big endian + entry[3]: + 15:0 DMA 1 instance (maybe 20:0) + entry[4]: + entry[5]: + set to 0? +*/ +static struct nouveau_object *nouveau_context_object_create(drm_device_t* dev, + int class, uint32_t flags, + struct nouveau_object *dma0, + struct nouveau_object *dma1, + struct nouveau_object *dma_notifier) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + struct nouveau_object *obj; + uint32_t d0, d1, dn; + uint32_t flags0,flags1,flags2; + flags0=0;flags1=0;flags2=0; + + if (dev_priv->card_type >= NV_40) { + if (flags & NV_DMA_CONTEXT_FLAGS_PATCH_ROP_AND) + flags0 |= 0x02080000; + else if (flags & NV_DMA_CONTEXT_FLAGS_PATCH_SRCCOPY) + flags0 |= 0x02080000; + if (flags & NV_DMA_CONTEXT_FLAGS_CLIP_ENABLE) + flags0 |= 0x00020000; +#ifdef __BIG_ENDIAN + if (flags & NV_DMA_CONTEXT_FLAGS_MONO) + flags1 |= 0x01000000; + flags2 |= 0x01000000; +#else + if (flags & NV_DMA_CONTEXT_FLAGS_MONO) + flags1 |= 0x02000000; +#endif + } else { + if (flags & NV_DMA_CONTEXT_FLAGS_PATCH_ROP_AND) + flags0 |= 0x01008000; + else if (flags & NV_DMA_CONTEXT_FLAGS_PATCH_SRCCOPY) + flags0 |= 0x01018000; + if (flags & NV_DMA_CONTEXT_FLAGS_CLIP_ENABLE) + flags0 |= 0x00002000; +#ifdef __BIG_ENDIAN + flags0 |= 0x00080000; + if (flags & NV_DMA_CONTEXT_FLAGS_MONO) + flags1 |= 0x00000001; +#else + if (flags & NV_DMA_CONTEXT_FLAGS_MONO) + flags1 |= 0x00000002; +#endif + } + + DRM_DEBUG("class=%x, dma0=%08x, dma1=%08x, dman=%08x\n", + class, + dma0 ? dma0->handle : 0, + dma1 ? dma1->handle : 0, + dma_notifier ? dma_notifier->handle : 0); + + obj = nouveau_instance_alloc(dev); + if (!obj) { + DRM_ERROR("couldn't allocate context object\n"); + return obj; + } + + obj->engine = 1; + obj->class = class; + + d0 = dma0 ? nouveau_chip_instance_get(dev, dma0->instance) : 0; + d1 = dma1 ? nouveau_chip_instance_get(dev, dma1->instance) : 0; + dn = dma_notifier ? + nouveau_chip_instance_get(dev, dma_notifier->instance) : 0; + + if (dev_priv->card_type >= NV_40) { + INSTANCE_WR(obj->instance, 0, class | flags0); + INSTANCE_WR(obj->instance, 1, dn | flags1); + INSTANCE_WR(obj->instance, 2, d0 | flags2); + INSTANCE_WR(obj->instance, 3, d1); + INSTANCE_WR(obj->instance, 4, 0x00000000); + INSTANCE_WR(obj->instance, 5, 0x00000000); + INSTANCE_WR(obj->instance, 6, 0x00000000); + INSTANCE_WR(obj->instance, 7, 0x00000000); + } else { + INSTANCE_WR(obj->instance, 0, class | flags0); + INSTANCE_WR(obj->instance, 1, (dn << 16) | flags1); + INSTANCE_WR(obj->instance, 2, d0 | (d1 << 16)); + INSTANCE_WR(obj->instance, 3, 0); + } + + return obj; +} + +static void +nouveau_object_free(drm_device_t *dev, int fifo_num, struct nouveau_object *obj) +{ + nouveau_object_unlink(dev, fifo_num, obj); + + nouveau_object_instance_free(dev, obj); + nouveau_hash_table_remove(dev, obj); + + drm_free(obj, sizeof(struct nouveau_object), DRM_MEM_DRIVER); + return; +} + +void nouveau_object_cleanup(drm_device_t *dev, DRMFILE filp) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + int fifo; + + fifo = nouveau_fifo_id_get(dev, filp); + if (fifo == -1) + return; + + while (dev_priv->fifos[fifo].objs) + nouveau_object_free(dev, fifo, dev_priv->fifos[fifo].objs); +} + +int nouveau_ioctl_object_init(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + drm_nouveau_object_init_t init; + struct nouveau_object *obj, *dma0, *dma1, *dman; + int fifo; + + fifo = nouveau_fifo_id_get(dev, filp); + if (fifo == -1) + return DRM_ERR(EINVAL); + + DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_object_init_t __user *) + data, sizeof(init)); + + //FIXME: check args, only allow trusted objects to be created + + if (nouveau_object_handle_find(dev, fifo, init.handle)) { + DRM_ERROR("Channel %d: handle 0x%08x already exists\n", + fifo, init.handle); + return DRM_ERR(EINVAL); + } + + dma0 = nouveau_object_handle_find(dev, fifo, init.dma0); + if (init.dma0 && !dma0) { + DRM_ERROR("context dma0 - invalid handle 0x%08x\n", init.dma0); + return DRM_ERR(EINVAL); + } + dma1 = nouveau_object_handle_find(dev, fifo, init.dma1); + if (init.dma1 && !dma1) { + DRM_ERROR("context dma1 - invalid handle 0x%08x\n", init.dma0); + return DRM_ERR(EINVAL); + } + dman = nouveau_object_handle_find(dev, fifo, init.dma_notifier); + if (init.dma_notifier && !dman) { + DRM_ERROR("context dman - invalid handle 0x%08x\n", + init.dma_notifier); + return DRM_ERR(EINVAL); + } + + obj = nouveau_context_object_create(dev, init.class, init.flags, + dma0, dma1, dman); + if (!obj) + return DRM_ERR(ENOMEM); + + obj->handle = init.handle; + + if (nouveau_hash_table_insert(dev, fifo, obj)) { + nouveau_object_free(dev, fifo, obj); + return DRM_ERR(ENOMEM); + } + + nouveau_object_link(dev, fifo, obj); + + return 0; +} + +int nouveau_ioctl_dma_object_init(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + drm_nouveau_dma_object_init_t init; + struct nouveau_object *obj; + int fifo; + + fifo = nouveau_fifo_id_get(dev, filp); + if (fifo == -1) + return DRM_ERR(EINVAL); + + DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_dma_object_init_t __user *) + data, sizeof(init)); + + if (nouveau_object_handle_find(dev, fifo, init.handle)) { + DRM_ERROR("Channel %d: handle 0x%08x already exists\n", + fifo, init.handle); + return DRM_ERR(EINVAL); + } + + obj = nouveau_dma_object_create(dev, init.offset, init.size, + init.access, init.target); + if (!obj) + return DRM_ERR(ENOMEM); + + obj->handle = init.handle; + if (nouveau_hash_table_insert(dev, fifo, obj)) { + nouveau_object_free(dev, fifo, obj); + return DRM_ERR(ENOMEM); + } + + nouveau_object_link(dev, fifo, obj); + + return 0; +} + diff --git a/shared-core/nouveau_reg.h b/shared-core/nouveau_reg.h new file mode 100644 index 00000000..23fce39a --- /dev/null +++ b/shared-core/nouveau_reg.h @@ -0,0 +1,238 @@ + + +#define NV03_BOOT_0 0x00100000 +# define NV03_BOOT_0_RAM_AMOUNT 0x00000003 +# define NV03_BOOT_0_RAM_AMOUNT_8MB 0x00000000 +# define NV03_BOOT_0_RAM_AMOUNT_2MB 0x00000001 +# define NV03_BOOT_0_RAM_AMOUNT_4MB 0x00000002 +# define NV03_BOOT_0_RAM_AMOUNT_8MB_SDRAM 0x00000003 +# define NV04_BOOT_0_RAM_AMOUNT_32MB 0x00000000 +# define NV04_BOOT_0_RAM_AMOUNT_4MB 0x00000001 +# define NV04_BOOT_0_RAM_AMOUNT_8MB 0x00000002 +# define NV04_BOOT_0_RAM_AMOUNT_16MB 0x00000003 + +#define NV04_FIFO_DATA 0x0010020c +# define NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK 0xfff00000 +# define NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT 20 + +#define NV03_PGRAPH_STATUS 0x004006b0 +#define NV04_PGRAPH_STATUS 0x00400700 + +#define NV_RAMIN 0x00700000 + +#define NV_RAMHT_HANDLE_OFFSET 0 +#define NV_RAMHT_CONTEXT_OFFSET 4 +# define NV_RAMHT_CONTEXT_VALID (1<<31) +# define NV_RAMHT_CONTEXT_CHANNEL_SHIFT 24 +# define NV_RAMHT_CONTEXT_ENGINE_SHIFT 16 +# define NV_RAMHT_CONTEXT_ENGINE_SOFTWARE 0 +# define NV_RAMHT_CONTEXT_ENGINE_GRAPHICS 1 +# define NV_RAMHT_CONTEXT_INSTANCE_SHIFT 0 +# define NV40_RAMHT_CONTEXT_CHANNEL_SHIFT 23 +# define NV40_RAMHT_CONTEXT_ENGINE_SHIFT 20 +# define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT 0 + +#define NV_DMA_ACCESS_RW 0 +#define NV_DMA_ACCESS_RO 1 +#define NV_DMA_ACCESS_WO 2 +#define NV_DMA_TARGET_VIDMEM 0 +#define NV_DMA_TARGET_AGP 3 + +#define NV03_FIFO_SIZE 0x8000UL +#define NV_MAX_FIFO_NUMBER 32 +#define NV03_FIFO_REGS_SIZE 0x10000 +#define NV03_FIFO_REGS(i) (0x00800000+i*NV03_FIFO_REGS_SIZE) +# define NV03_FIFO_REGS_DMAPUT(i) (NV03_FIFO_REGS(i)+0x40) +# define NV03_FIFO_REGS_DMAGET(i) (NV03_FIFO_REGS(i)+0x44) + +#define NV_PMC_BOOT_0 0x00000000 +#define NV_PMC_INTSTAT 0x00000100 +# define NV_PMC_INTSTAT_PFIFO_PENDING (1<< 8) +# define NV_PMC_INTSTAT_PGRAPH_PENDING (1<<12) +# define NV_PMC_INTSTAT_CRTC0_PENDING (1<<24) +# define NV_PMC_INTSTAT_CRTC1_PENDING (1<<25) +# define NV_PMC_INTSTAT_CRTCn_PENDING (3<<24) +#define NV_PMC_INTEN 0x00000140 +# define NV_PMC_INTEN_MASTER_ENABLE (1<< 0) + +#define NV_PGRAPH_INTSTAT 0x00400100 +#define NV04_PGRAPH_INTEN 0x00400140 +#define NV40_PGRAPH_INTEN 0x0040013C +# define NV_PGRAPH_INTR_NOTIFY (1<< 0) +# define NV_PGRAPH_INTR_MISSING_HW (1<< 4) +# define NV_PGRAPH_INTR_CONTEXT_SWITCH (1<<12) +# define NV_PGRAPH_INTR_BUFFER_NOTIFY (1<<16) +# define NV_PGRAPH_INTR_ERROR (1<<20) +#define NV_PGRAPH_CTX_CONTROL 0x00400144 +#define NV_PGRAPH_NV40_UNK220 0x00400220 +# define NV_PGRAPH_NV40_UNK220_FB_INSTANCE +#define NV_PGRAPH_CTX_USER 0x00400148 +#define NV_PGRAPH_CTX_SWITCH1 0x0040014C +#define NV_PGRAPH_FIFO 0x00400720 +#define NV_PGRAPH_FFINTFC_ST2 0x00400764 + +/* It's a guess that this works on NV03. Confirmed on NV04, though */ +#define NV_PFIFO_DELAY_0 0x00002040 +#define NV_PFIFO_DMA_TIMESLICE 0x00002044 +#define NV_PFIFO_INTSTAT 0x00002100 +#define NV_PFIFO_INTEN 0x00002140 +# define NV_PFIFO_INTR_CACHE_ERROR (1<< 0) +# define NV_PFIFO_INTR_RUNOUT (1<< 4) +# define NV_PFIFO_INTR_RUNOUT_OVERFLOW (1<< 8) +# define NV_PFIFO_INTR_DMA_PUSHER (1<<12) +# define NV_PFIFO_INTR_DMA_PT (1<<16) +# define NV_PFIFO_INTR_SEMAPHORE (1<<20) +# define NV_PFIFO_INTR_ACQUIRE_TIMEOUT (1<<24) +#define NV_PFIFO_RAMHT 0x00002210 +#define NV_PFIFO_RAMFC 0x00002214 +#define NV_PFIFO_RAMRO 0x00002218 +#define NV40_PFIFO_RAMFC 0x00002220 +#define NV_PFIFO_CACHES 0x00002500 +#define NV_PFIFO_MODE 0x00002504 +#define NV_PFIFO_DMA 0x00002508 +#define NV_PFIFO_SIZE 0x0000250c +#define NV_PFIFO_CACH0_PSH0 0x00003000 +#define NV_PFIFO_CACH0_PUL0 0x00003050 +#define NV_PFIFO_CACH0_PUL1 0x00003054 +#define NV_PFIFO_CACH1_PSH0 0x00003200 +#define NV_PFIFO_CACH1_PSH1 0x00003204 +#define NV_PFIFO_CACH1_DMAPSH 0x00003220 +#define NV_PFIFO_CACH1_DMAF 0x00003224 +# define NV_PFIFO_CACH1_DMAF_TRIG_8_BYTES 0x00000000 +# define NV_PFIFO_CACH1_DMAF_TRIG_16_BYTES 0x00000008 +# define NV_PFIFO_CACH1_DMAF_TRIG_24_BYTES 0x00000010 +# define NV_PFIFO_CACH1_DMAF_TRIG_32_BYTES 0x00000018 +# define NV_PFIFO_CACH1_DMAF_TRIG_40_BYTES 0x00000020 +# define NV_PFIFO_CACH1_DMAF_TRIG_48_BYTES 0x00000028 +# define NV_PFIFO_CACH1_DMAF_TRIG_56_BYTES 0x00000030 +# define NV_PFIFO_CACH1_DMAF_TRIG_64_BYTES 0x00000038 +# define NV_PFIFO_CACH1_DMAF_TRIG_72_BYTES 0x00000040 +# define NV_PFIFO_CACH1_DMAF_TRIG_80_BYTES 0x00000048 +# define NV_PFIFO_CACH1_DMAF_TRIG_88_BYTES 0x00000050 +# define NV_PFIFO_CACH1_DMAF_TRIG_96_BYTES 0x00000058 +# define NV_PFIFO_CACH1_DMAF_TRIG_104_BYTES 0x00000060 +# define NV_PFIFO_CACH1_DMAF_TRIG_112_BYTES 0x00000068 +# define NV_PFIFO_CACH1_DMAF_TRIG_120_BYTES 0x00000070 +# define NV_PFIFO_CACH1_DMAF_TRIG_128_BYTES 0x00000078 +# define NV_PFIFO_CACH1_DMAF_TRIG_136_BYTES 0x00000080 +# define NV_PFIFO_CACH1_DMAF_TRIG_144_BYTES 0x00000088 +# define NV_PFIFO_CACH1_DMAF_TRIG_152_BYTES 0x00000090 +# define NV_PFIFO_CACH1_DMAF_TRIG_160_BYTES 0x00000098 +# define NV_PFIFO_CACH1_DMAF_TRIG_168_BYTES 0x000000A0 +# define NV_PFIFO_CACH1_DMAF_TRIG_176_BYTES 0x000000A8 +# define NV_PFIFO_CACH1_DMAF_TRIG_184_BYTES 0x000000B0 +# define NV_PFIFO_CACH1_DMAF_TRIG_192_BYTES 0x000000B8 +# define NV_PFIFO_CACH1_DMAF_TRIG_200_BYTES 0x000000C0 +# define NV_PFIFO_CACH1_DMAF_TRIG_208_BYTES 0x000000C8 +# define NV_PFIFO_CACH1_DMAF_TRIG_216_BYTES 0x000000D0 +# define NV_PFIFO_CACH1_DMAF_TRIG_224_BYTES 0x000000D8 +# define NV_PFIFO_CACH1_DMAF_TRIG_232_BYTES 0x000000E0 +# define NV_PFIFO_CACH1_DMAF_TRIG_240_BYTES 0x000000E8 +# define NV_PFIFO_CACH1_DMAF_TRIG_248_BYTES 0x000000F0 +# define NV_PFIFO_CACH1_DMAF_TRIG_256_BYTES 0x000000F8 +# define NV_PFIFO_CACH1_DMAF_SIZE 0x0000E000 +# define NV_PFIFO_CACH1_DMAF_SIZE_32_BYTES 0x00000000 +# define NV_PFIFO_CACH1_DMAF_SIZE_64_BYTES 0x00002000 +# define NV_PFIFO_CACH1_DMAF_SIZE_96_BYTES 0x00004000 +# define NV_PFIFO_CACH1_DMAF_SIZE_128_BYTES 0x00006000 +# define NV_PFIFO_CACH1_DMAF_SIZE_160_BYTES 0x00008000 +# define NV_PFIFO_CACH1_DMAF_SIZE_192_BYTES 0x0000A000 +# define NV_PFIFO_CACH1_DMAF_SIZE_224_BYTES 0x0000C000 +# define NV_PFIFO_CACH1_DMAF_SIZE_256_BYTES 0x0000E000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS 0x001F0000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS_0 0x00000000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS_1 0x00010000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS_2 0x00020000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS_3 0x00030000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS_4 0x00040000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS_5 0x00050000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS_6 0x00060000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS_7 0x00070000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS_8 0x00080000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS_9 0x00090000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS_10 0x000A0000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS_11 0x000B0000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS_12 0x000C0000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS_13 0x000D0000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS_14 0x000E0000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS_15 0x000F0000 +# define NV_PFIFO_CACH1_ENDIAN 0x80000000 +# define NV_PFIFO_CACH1_LITTLE_ENDIAN 0x7FFFFFFF +# define NV_PFIFO_CACH1_BIG_ENDIAN 0x80000000 +#define NV_PFIFO_CACH1_DMAS 0x00003228 +#define NV_PFIFO_CACH1_DMAI 0x0000322c +#define NV_PFIFO_CACH1_DMAC 0x00003230 +#define NV_PFIFO_CACH1_DMAP 0x00003240 +#define NV_PFIFO_CACH1_DMAG 0x00003244 +#define NV_PFIFO_CACH1_REF_CNT 0x00003248 +#define NV_PFIFO_CACH1_DMASR 0x0000324C +#define NV_PFIFO_CACH1_PUL0 0x00003250 +#define NV_PFIFO_CACH1_PUL1 0x00003254 +#define NV_PFIFO_CACH1_HASH 0x00003258 +#define NV_PFIFO_CACH1_ACQUIRE_TIMEOUT 0x00003260 +#define NV_PFIFO_CACH1_ACQUIRE_TIMESTAMP 0x00003264 +#define NV_PFIFO_CACH1_ACQUIRE_VALUE 0x00003268 +#define NV_PFIFO_CACH1_SEMAPHORE 0x0000326C +#define NV_PFIFO_CACH1_GET 0x00003270 +#define NV_PFIFO_CACH1_ENG 0x00003280 +#define NV_PFIFO_CACH1_DMA_DCOUNT 0x000032A0 +#define NV40_PFIFO_GRCTX_INSTANCE 0x000032E0 +#define NV40_PFIFO_UNK32E4 0x000032E4 +#define NV_PFIFO_CACH1_METHOD(i) (0x00003800+(i*8)) +#define NV_PFIFO_CACH1_DATA(i) (0x00003804+(i*8)) +#define NV40_PFIFO_CACH1_METHOD(i) (0x00090000+(i*8)) +#define NV40_PFIFO_CACH1_DATA(i) (0x00090004+(i*8)) + +#define NV_CRTC0_INTSTAT 0x00600100 +#define NV_CRTC0_INTEN 0x00600140 +#define NV_CRTC1_INTSTAT 0x00602100 +#define NV_CRTC1_INTEN 0x00602140 +# define NV_CRTC_INTR_VBLANK (1<<0) + +/* Fifo commands. These are not regs, neither masks */ +#define NV03_FIFO_CMD_JUMP 0x20000000 +#define NV03_FIFO_CMD_JUMP_OFFSET_MASK 0x1ffffffc +#define NV03_FIFO_CMD_REWIND (NV03_FIFO_CMD_JUMP | (0 & NV03_FIFO_CMD_JUMP_OFFSET_MASK)) + +/* RAMFC offsets */ +#define NV04_RAMFC_DMA_PUT 0x00 +#define NV04_RAMFC_DMA_GET 0x04 +#define NV04_RAMFC_DMA_INSTANCE 0x08 +#define NV04_RAMFC_DMA_FETCH 0x16 + +#define NV10_RAMFC_DMA_PUT 0x00 +#define NV10_RAMFC_DMA_GET 0x04 +#define NV10_RAMFC_REF_CNT 0x08 +#define NV10_RAMFC_DMA_INSTANCE 0x0C +#define NV10_RAMFC_DMA_STATE 0x10 +#define NV10_RAMFC_DMA_FETCH 0x14 +#define NV10_RAMFC_ENGINE 0x18 +#define NV10_RAMFC_PULL1_ENGINE 0x1C +#define NV10_RAMFC_ACQUIRE_VALUE 0x20 +#define NV10_RAMFC_ACQUIRE_TIMESTAMP 0x24 +#define NV10_RAMFC_ACQUIRE_TIMEOUT 0x28 +#define NV10_RAMFC_SEMAPHORE 0x2C +#define NV10_RAMFC_DMA_SUBROUTINE 0x30 + +#define NV40_RAMFC_DMA_PUT 0x00 +#define NV40_RAMFC_DMA_GET 0x04 +#define NV40_RAMFC_REF_CNT 0x08 +#define NV40_RAMFC_DMA_INSTANCE 0x0C +#define NV40_RAMFC_DMA_DCOUNT /* ? */ 0x10 +#define NV40_RAMFC_DMA_STATE 0x14 +#define NV40_RAMFC_DMA_FETCH 0x18 +#define NV40_RAMFC_ENGINE 0x1C +#define NV40_RAMFC_PULL1_ENGINE 0x20 +#define NV40_RAMFC_ACQUIRE_VALUE 0x24 +#define NV40_RAMFC_ACQUIRE_TIMESTAMP 0x28 +#define NV40_RAMFC_ACQUIRE_TIMEOUT 0x2C +#define NV40_RAMFC_SEMAPHORE 0x30 +#define NV40_RAMFC_DMA_SUBROUTINE 0x34 +#define NV40_RAMFC_GRCTX_INSTANCE /* guess */ 0x38 +#define NV40_RAMFC_DMA_TIMESLICE 0x3C +#define NV40_RAMFC_UNK_40 0x40 +#define NV40_RAMFC_UNK_44 0x44 +#define NV40_RAMFC_UNK_48 0x48 +#define NV40_RAMFC_2088 0x4C +#define NV40_RAMFC_3300 0x50 + diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c new file mode 100644 index 00000000..44f8c1aa --- /dev/null +++ b/shared-core/nouveau_state.c @@ -0,0 +1,222 @@ +/* + * Copyright 2005 Stephane Marchesin + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "drmP.h" +#include "drm.h" +#include "drm_sarea.h" +#include "nouveau_drv.h" + +/* here a client dies, release the stuff that was allocated for its filp */ +void nouveau_preclose(drm_device_t * dev, DRMFILE filp) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + + nouveau_mem_release(filp,dev_priv->fb_heap); + nouveau_mem_release(filp,dev_priv->agp_heap); + nouveau_object_cleanup(dev, filp); + nouveau_fifo_cleanup(dev, filp); +} + +/* first module load, setup the mmio/fb mapping */ +int nouveau_firstopen(struct drm_device *dev) +{ + int ret; + drm_nouveau_private_t *dev_priv = dev->dev_private; + + /* resource 0 is mmio regs */ + /* resource 1 is linear FB */ + /* resource 2 is RAMIN (mmio regs + 0x1000000) */ + /* resource 6 is bios */ + + /* map the mmio regs */ + ret = drm_addmap(dev, drm_get_resource_start(dev, 0), drm_get_resource_len(dev, 0), + _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio); + if (dev_priv->mmio) + { + DRM_INFO("regs mapped ok at 0x%lx\n",dev_priv->mmio->offset); + } + else + { + DRM_ERROR("Unable to initialize the mmio mapping. Please report your setup to " DRIVER_EMAIL "\n"); + return 1; + } + + DRM_INFO("%lld MB of video ram detected\n",nouveau_mem_fb_amount(dev)>>20); + + /* map larger RAMIN aperture on NV40 cards */ + if (dev_priv->card_type >= NV_40) { + ret = drm_addmap(dev, drm_get_resource_start(dev, 2), + drm_get_resource_len(dev, 2), + _DRM_REGISTERS, + _DRM_READ_ONLY, + &dev_priv->ramin); + if (ret) { + DRM_ERROR("Failed to init RAMIN mapping, " + "limited instance memory available\n"); + dev_priv->ramin = NULL; + } + } else + dev_priv->ramin = NULL; + + /* Clear RAMIN + * Determine locations for RAMHT/FC/RO + * Initialise PFIFO + */ + ret = nouveau_fifo_init(dev); + if (ret) return ret; + + /* setup a mtrr over the FB */ + dev_priv->fb_mtrr=drm_mtrr_add(drm_get_resource_start(dev, 1),nouveau_mem_fb_amount(dev), DRM_MTRR_WC); + + /* FIXME: doesn't belong here, and have no idea what it's for.. */ + if (dev_priv->card_type >= NV_40) + nv40_graph_init(dev); + + return 0; +} + +int nouveau_load(struct drm_device *dev, unsigned long flags) +{ + drm_nouveau_private_t *dev_priv; + + if (flags==NV_UNKNOWN) + return DRM_ERR(EINVAL); + + dev_priv = drm_alloc(sizeof(drm_nouveau_private_t), DRM_MEM_DRIVER); + if (!dev_priv) + return DRM_ERR(ENOMEM); + + memset(dev_priv, 0, sizeof(drm_nouveau_private_t)); + dev_priv->card_type=flags&NOUVEAU_FAMILY; + dev_priv->flags=flags&NOUVEAU_FLAGS; + + dev->dev_private = (void *)dev_priv; + + return 0; +} + +void nouveau_lastclose(struct drm_device *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + if(dev_priv->fb_mtrr>0) + { + drm_mtrr_del(dev_priv->fb_mtrr, drm_get_resource_start(dev, 1),nouveau_mem_fb_amount(dev), DRM_MTRR_WC); + dev_priv->fb_mtrr=0; + } +} + +int nouveau_unload(struct drm_device *dev) +{ + drm_free(dev->dev_private, sizeof(*dev->dev_private), DRM_MEM_DRIVER); + dev->dev_private = NULL; + return 0; +} + +int nouveau_ioctl_getparam(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + drm_nouveau_private_t *dev_priv = dev->dev_private; + drm_nouveau_getparam_t getparam; + + DRM_COPY_FROM_USER_IOCTL(getparam, (drm_nouveau_getparam_t __user *)data, + sizeof(getparam)); + + switch (getparam.param) { + case NOUVEAU_GETPARAM_PCI_VENDOR: + getparam.value=dev->pci_vendor; + break; + case NOUVEAU_GETPARAM_PCI_DEVICE: + getparam.value=dev->pci_device; + break; + case NOUVEAU_GETPARAM_BUS_TYPE: + if (drm_device_is_agp(dev)) + getparam.value=NV_AGP; + else if (drm_device_is_pcie(dev)) + getparam.value=NV_PCIE; + else + getparam.value=NV_PCI; + break; + case NOUVEAU_GETPARAM_FB_PHYSICAL: + getparam.value=dev_priv->fb_phys; + break; + case NOUVEAU_GETPARAM_AGP_PHYSICAL: + getparam.value=dev_priv->agp_phys; + break; + default: + DRM_ERROR("unknown parameter %d\n", getparam.param); + return DRM_ERR(EINVAL); + } + + DRM_COPY_TO_USER_IOCTL((drm_nouveau_getparam_t __user *)data, getparam, + sizeof(getparam)); + return 0; +} + +int nouveau_ioctl_setparam(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + drm_nouveau_private_t *dev_priv = dev->dev_private; + drm_nouveau_setparam_t setparam; + + DRM_COPY_FROM_USER_IOCTL(setparam, (drm_nouveau_setparam_t __user *)data, + sizeof(setparam)); + + switch (setparam.param) { + case NOUVEAU_SETPARAM_CMDBUF_LOCATION: + switch (setparam.value) { + case NOUVEAU_MEM_AGP: + case NOUVEAU_MEM_FB: + break; + default: + DRM_ERROR("invalid CMDBUF_LOCATION value=%d\n", setparam.value); + return DRM_ERR(EINVAL); + } + dev_priv->config.cmdbuf.location = setparam.value; + break; + case NOUVEAU_SETPARAM_CMDBUF_SIZE: + dev_priv->config.cmdbuf.size = setparam.value; + break; + default: + DRM_ERROR("unknown parameter %d\n", setparam.param); + return DRM_ERR(EINVAL); + } + + return 0; +} + +/* waits for idle */ +void nouveau_wait_for_idle(struct drm_device *dev) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + switch(dev_priv->card_type) + { + case NV_03: + while(NV_READ(NV03_PGRAPH_STATUS)); + break; + default: + while(NV_READ(NV04_PGRAPH_STATUS)); + break; + } +} + diff --git a/shared-core/nv40_graph.c b/shared-core/nv40_graph.c new file mode 100644 index 00000000..53f55bce --- /dev/null +++ b/shared-core/nv40_graph.c @@ -0,0 +1,827 @@ +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" +#include "nouveau_drm.h" + +/* The sizes are taken from the difference between the start of two + * grctx addresses while running the nvidia driver. Probably slightly + * larger than they actually are, because of other objects being created + * between the contexts + */ +#define NV40_GRCTX_SIZE (175*1024) +#define NV43_GRCTX_SIZE (70*1024) +#define NV4A_GRCTX_SIZE (60*1024) +#define NV4E_GRCTX_SIZE (25*1024) + +/*TODO: deciper what each offset in the context represents. The below + * contexts are taken from dumps just after the 3D object is + * created. + */ +static void nv40_graph_context_init(drm_device_t *dev, struct mem_block *ctx) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + int i; + + /* Always has the "instance address" of itself at offset 0 */ + INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx)); + /* unknown */ + INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00030/4, 0x00000001); + INSTANCE_WR(ctx, 0x0011c/4, 0x20010001); + INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00); + INSTANCE_WR(ctx, 0x00128/4, 0x02008821); + INSTANCE_WR(ctx, 0x0016c/4, 0x00000040); + INSTANCE_WR(ctx, 0x00170/4, 0x00000040); + INSTANCE_WR(ctx, 0x00174/4, 0x00000040); + INSTANCE_WR(ctx, 0x0017c/4, 0x80000000); + INSTANCE_WR(ctx, 0x00180/4, 0x80000000); + INSTANCE_WR(ctx, 0x00184/4, 0x80000000); + INSTANCE_WR(ctx, 0x00188/4, 0x80000000); + INSTANCE_WR(ctx, 0x0018c/4, 0x80000000); + INSTANCE_WR(ctx, 0x0019c/4, 0x00000040); + INSTANCE_WR(ctx, 0x001a0/4, 0x80000000); + INSTANCE_WR(ctx, 0x001b0/4, 0x80000000); + INSTANCE_WR(ctx, 0x001c0/4, 0x80000000); + INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c); + INSTANCE_WR(ctx, 0x00340/4, 0x00040000); + INSTANCE_WR(ctx, 0x00350/4, 0x55555555); + INSTANCE_WR(ctx, 0x00354/4, 0x55555555); + INSTANCE_WR(ctx, 0x00358/4, 0x55555555); + INSTANCE_WR(ctx, 0x0035c/4, 0x55555555); + INSTANCE_WR(ctx, 0x00388/4, 0x00000008); + INSTANCE_WR(ctx, 0x0039c/4, 0x00000010); + INSTANCE_WR(ctx, 0x00480/4, 0x00000100); + INSTANCE_WR(ctx, 0x00494/4, 0x00000111); + INSTANCE_WR(ctx, 0x00498/4, 0x00080060); + INSTANCE_WR(ctx, 0x004b4/4, 0x00000080); + INSTANCE_WR(ctx, 0x004b8/4, 0xffff0000); + INSTANCE_WR(ctx, 0x004bc/4, 0x00000001); + INSTANCE_WR(ctx, 0x004d0/4, 0x46400000); + INSTANCE_WR(ctx, 0x004ec/4, 0xffff0000); + INSTANCE_WR(ctx, 0x004f8/4, 0x0fff0000); + INSTANCE_WR(ctx, 0x004fc/4, 0x0fff0000); + INSTANCE_WR(ctx, 0x00504/4, 0x00011100); + for (i=0x00520; i<=0x0055c; i+=4) + INSTANCE_WR(ctx, i/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x00568/4, 0x4b7fffff); + INSTANCE_WR(ctx, 0x00594/4, 0x30201000); + INSTANCE_WR(ctx, 0x00598/4, 0x70605040); + INSTANCE_WR(ctx, 0x0059c/4, 0xb8a89888); + INSTANCE_WR(ctx, 0x005a0/4, 0xf8e8d8c8); + INSTANCE_WR(ctx, 0x005b4/4, 0x40100000); + INSTANCE_WR(ctx, 0x005cc/4, 0x00000004); + INSTANCE_WR(ctx, 0x005d8/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x0060c/4, 0x435185d6); + INSTANCE_WR(ctx, 0x00610/4, 0x2155b699); + INSTANCE_WR(ctx, 0x00614/4, 0xfedcba98); + INSTANCE_WR(ctx, 0x00618/4, 0x00000098); + INSTANCE_WR(ctx, 0x00628/4, 0xffffffff); + INSTANCE_WR(ctx, 0x0062c/4, 0x00ff7000); + INSTANCE_WR(ctx, 0x00630/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00640/4, 0x00ff0000); + INSTANCE_WR(ctx, 0x0067c/4, 0x00ffff00); + /* 0x680-0x6BC - NV30_TCL_PRIMITIVE_3D_TX_ADDRESS_UNIT(0-15) */ + /* 0x6C0-0x6FC - NV30_TCL_PRIMITIVE_3D_TX_FORMAT_UNIT(0-15) */ + for (i=0x006C0; i<=0x006fc; i+=4) + INSTANCE_WR(ctx, i/4, 0x00018488); + /* 0x700-0x73C - NV30_TCL_PRIMITIVE_3D_TX_WRAP_UNIT(0-15) */ + for (i=0x00700; i<=0x0073c; i+=4) + INSTANCE_WR(ctx, i/4, 0x00028202); + /* 0x740-0x77C - NV30_TCL_PRIMITIVE_3D_TX_ENABLE_UNIT(0-15) */ + /* 0x780-0x7BC - NV30_TCL_PRIMITIVE_3D_TX_SWIZZLE_UNIT(0-15) */ + for (i=0x00780; i<=0x007bc; i+=4) + INSTANCE_WR(ctx, i/4, 0x0000aae4); + /* 0x7C0-0x7FC - NV30_TCL_PRIMITIVE_3D_TX_FILTER_UNIT(0-15) */ + for (i=0x007c0; i<=0x007fc; i+=4) + INSTANCE_WR(ctx, i/4, 0x01012000); + /* 0x800-0x83C - NV30_TCL_PRIMITIVE_3D_TX_XY_DIM_UNIT(0-15) */ + for (i=0x00800; i<=0x0083c; i+=4) + INSTANCE_WR(ctx, i/4, 0x00080008); + /* 0x840-0x87C - NV30_TCL_PRIMITIVE_3D_TX_UNK07_UNIT(0-15) */ + /* 0x880-0x8BC - NV30_TCL_PRIMITIVE_3D_TX_DEPTH_UNIT(0-15) */ + for (i=0x00880; i<=0x008bc; i+=4) + INSTANCE_WR(ctx, i/4, 0x00100008); + /* unknown */ + for (i=0x00910; i<=0x0091c; i+=4) + INSTANCE_WR(ctx, i/4, 0x0001bc80); + for (i=0x00920; i<=0x0092c; i+=4) + INSTANCE_WR(ctx, i/4, 0x00000202); + for (i=0x00940; i<=0x0094c; i+=4) + INSTANCE_WR(ctx, i/4, 0x00000008); + for (i=0x00960; i<=0x0096c; i+=4) + INSTANCE_WR(ctx, i/4, 0x00080008); + INSTANCE_WR(ctx, 0x00980/4, 0x00000002); + INSTANCE_WR(ctx, 0x009b4/4, 0x00000001); + INSTANCE_WR(ctx, 0x009c0/4, 0x3e020200); + INSTANCE_WR(ctx, 0x009c4/4, 0x00ffffff); + INSTANCE_WR(ctx, 0x009c8/4, 0x60103f00); + INSTANCE_WR(ctx, 0x009d4/4, 0x00020000); + INSTANCE_WR(ctx, 0x00a08/4, 0x00008100); + INSTANCE_WR(ctx, 0x00aac/4, 0x00000001); + INSTANCE_WR(ctx, 0x00af0/4, 0x00000001); + INSTANCE_WR(ctx, 0x00af8/4, 0x80800001); + INSTANCE_WR(ctx, 0x00bcc/4, 0x00000005); + INSTANCE_WR(ctx, 0x00bf8/4, 0x00005555); + INSTANCE_WR(ctx, 0x00bfc/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c00/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c04/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c08/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c0c/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c44/4, 0x00000001); + for (i=0x03008; i<=0x03080; i+=8) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i=0x05288; i<=0x08570; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i=0x08628; i<=0x08e18; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i=0x0bd28; i<=0x0f010; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i=0x0f0c8; i<=0x0f8b8; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i=0x127c8; i<=0x15ab0; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i=0x15b68; i<=0x16358; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i=0x19268; i<=0x1c550; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i=0x1c608; i<=0x1cdf8; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i=0x1fd08; i<=0x22ff0; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i=0x230a8; i<=0x23898; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i=0x267a8; i<=0x29a90; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i=0x29b48; i<=0x2a338; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); +} + +static void +nv43_graph_context_init(drm_device_t *dev, struct mem_block *ctx) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + int i; + + INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx)); + INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00030/4, 0x00000001); + INSTANCE_WR(ctx, 0x0011c/4, 0x20010001); + INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00); + INSTANCE_WR(ctx, 0x00128/4, 0x02008821); + INSTANCE_WR(ctx, 0x00178/4, 0x00000040); + INSTANCE_WR(ctx, 0x0017c/4, 0x00000040); + INSTANCE_WR(ctx, 0x00180/4, 0x00000040); + INSTANCE_WR(ctx, 0x00188/4, 0x00000040); + INSTANCE_WR(ctx, 0x00194/4, 0x80000000); + INSTANCE_WR(ctx, 0x00198/4, 0x80000000); + INSTANCE_WR(ctx, 0x0019c/4, 0x80000000); + INSTANCE_WR(ctx, 0x001a0/4, 0x80000000); + INSTANCE_WR(ctx, 0x001a4/4, 0x80000000); + INSTANCE_WR(ctx, 0x001a8/4, 0x80000000); + INSTANCE_WR(ctx, 0x001ac/4, 0x80000000); + INSTANCE_WR(ctx, 0x001b0/4, 0x80000000); + INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c); + INSTANCE_WR(ctx, 0x00340/4, 0x00040000); + INSTANCE_WR(ctx, 0x00350/4, 0x55555555); + INSTANCE_WR(ctx, 0x00354/4, 0x55555555); + INSTANCE_WR(ctx, 0x00358/4, 0x55555555); + INSTANCE_WR(ctx, 0x0035c/4, 0x55555555); + INSTANCE_WR(ctx, 0x00388/4, 0x00000008); + INSTANCE_WR(ctx, 0x0039c/4, 0x00001010); + INSTANCE_WR(ctx, 0x003cc/4, 0x00000111); + INSTANCE_WR(ctx, 0x003d0/4, 0x00080060); + INSTANCE_WR(ctx, 0x003ec/4, 0x00000080); + INSTANCE_WR(ctx, 0x003f0/4, 0xffff0000); + INSTANCE_WR(ctx, 0x003f4/4, 0x00000001); + INSTANCE_WR(ctx, 0x00408/4, 0x46400000); + INSTANCE_WR(ctx, 0x00418/4, 0xffff0000); + INSTANCE_WR(ctx, 0x00424/4, 0x0fff0000); + INSTANCE_WR(ctx, 0x00428/4, 0x0fff0000); + INSTANCE_WR(ctx, 0x00430/4, 0x00011100); + for (i=0x0044c; i<=0x00488; i+=4) + INSTANCE_WR(ctx, i/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x00494/4, 0x4b7fffff); + INSTANCE_WR(ctx, 0x004bc/4, 0x30201000); + INSTANCE_WR(ctx, 0x004c0/4, 0x70605040); + INSTANCE_WR(ctx, 0x004c4/4, 0xb8a89888); + INSTANCE_WR(ctx, 0x004c8/4, 0xf8e8d8c8); + INSTANCE_WR(ctx, 0x004dc/4, 0x40100000); + INSTANCE_WR(ctx, 0x004f8/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x0052c/4, 0x435185d6); + INSTANCE_WR(ctx, 0x00530/4, 0x2155b699); + INSTANCE_WR(ctx, 0x00534/4, 0xfedcba98); + INSTANCE_WR(ctx, 0x00538/4, 0x00000098); + INSTANCE_WR(ctx, 0x00548/4, 0xffffffff); + INSTANCE_WR(ctx, 0x0054c/4, 0x00ff7000); + INSTANCE_WR(ctx, 0x00550/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00560/4, 0x00ff0000); + INSTANCE_WR(ctx, 0x00598/4, 0x00ffff00); + for (i=0x005dc; i<=0x00618; i+=4) + INSTANCE_WR(ctx, i/4, 0x00018488); + for (i=0x0061c; i<=0x00658; i+=4) + INSTANCE_WR(ctx, i/4, 0x00028202); + for (i=0x0069c; i<=0x006d8; i+=4) + INSTANCE_WR(ctx, i/4, 0x0000aae4); + for (i=0x006dc; i<=0x00718; i+=4) + INSTANCE_WR(ctx, i/4, 0x01012000); + for (i=0x0071c; i<=0x00758; i+=4) + INSTANCE_WR(ctx, i/4, 0x00080008); + for (i=0x0079c; i<=0x007d8; i+=4) + INSTANCE_WR(ctx, i/4, 0x00100008); + for (i=0x0082c; i<=0x00838; i+=4) + INSTANCE_WR(ctx, i/4, 0x0001bc80); + for (i=0x0083c; i<=0x00848; i+=4) + INSTANCE_WR(ctx, i/4, 0x00000202); + for (i=0x0085c; i<=0x00868; i+=4) + INSTANCE_WR(ctx, i/4, 0x00000008); + for (i=0x0087c; i<=0x00888; i+=4) + INSTANCE_WR(ctx, i/4, 0x00080008); + INSTANCE_WR(ctx, 0x0089c/4, 0x00000002); + INSTANCE_WR(ctx, 0x008d0/4, 0x00000021); + INSTANCE_WR(ctx, 0x008d4/4, 0x030c30c3); + INSTANCE_WR(ctx, 0x008e0/4, 0x3e020200); + INSTANCE_WR(ctx, 0x008e4/4, 0x00ffffff); + INSTANCE_WR(ctx, 0x008e8/4, 0x0c103f00); + INSTANCE_WR(ctx, 0x008f4/4, 0x00020000); + INSTANCE_WR(ctx, 0x0092c/4, 0x00008100); + INSTANCE_WR(ctx, 0x009b8/4, 0x00000001); + INSTANCE_WR(ctx, 0x009fc/4, 0x00001001); + INSTANCE_WR(ctx, 0x00a04/4, 0x00000003); + INSTANCE_WR(ctx, 0x00a08/4, 0x00888001); + INSTANCE_WR(ctx, 0x00a8c/4, 0x00000005); + INSTANCE_WR(ctx, 0x00a98/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00ab4/4, 0x00005555); + INSTANCE_WR(ctx, 0x00ab8/4, 0x00005555); + INSTANCE_WR(ctx, 0x00abc/4, 0x00005555); + INSTANCE_WR(ctx, 0x00ac0/4, 0x00000001); + INSTANCE_WR(ctx, 0x00af8/4, 0x00000001); + for (i=0x02ec0; i<=0x02f38; i+=8) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i=0x04c80; i<=0x06e70; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i=0x06e80; i<=0x07270; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i=0x096c0; i<=0x0b8b0; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i=0x0b8c0; i<=0x0bcb0; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i=0x0e100; i<=0x102f0; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i=0x10300; i<=0x106f0; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); +}; + +static void nv4a_graph_context_init(drm_device_t *dev, struct mem_block *ctx) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + int i; + + INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx)); + INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00030/4, 0x00000001); + INSTANCE_WR(ctx, 0x0011c/4, 0x20010001); + INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00); + INSTANCE_WR(ctx, 0x00128/4, 0x02008821); + INSTANCE_WR(ctx, 0x00158/4, 0x00000001); + INSTANCE_WR(ctx, 0x0015c/4, 0x00000001); + INSTANCE_WR(ctx, 0x00160/4, 0x00000001); + INSTANCE_WR(ctx, 0x00164/4, 0x00000001); + INSTANCE_WR(ctx, 0x00168/4, 0x00000001); + INSTANCE_WR(ctx, 0x0016c/4, 0x00000001); + INSTANCE_WR(ctx, 0x00170/4, 0x00000001); + INSTANCE_WR(ctx, 0x00174/4, 0x00000001); + INSTANCE_WR(ctx, 0x00178/4, 0x00000040); + INSTANCE_WR(ctx, 0x0017c/4, 0x00000040); + INSTANCE_WR(ctx, 0x00180/4, 0x00000040); + INSTANCE_WR(ctx, 0x00188/4, 0x00000040); + INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c); + INSTANCE_WR(ctx, 0x00340/4, 0x00040000); + INSTANCE_WR(ctx, 0x00350/4, 0x55555555); + INSTANCE_WR(ctx, 0x00354/4, 0x55555555); + INSTANCE_WR(ctx, 0x00358/4, 0x55555555); + INSTANCE_WR(ctx, 0x0035c/4, 0x55555555); + INSTANCE_WR(ctx, 0x00388/4, 0x00000008); + INSTANCE_WR(ctx, 0x0039c/4, 0x00003010); + INSTANCE_WR(ctx, 0x003cc/4, 0x00000111); + INSTANCE_WR(ctx, 0x003d0/4, 0x00080060); + INSTANCE_WR(ctx, 0x003ec/4, 0x00000080); + INSTANCE_WR(ctx, 0x003f0/4, 0xffff0000); + INSTANCE_WR(ctx, 0x003f4/4, 0x00000001); + INSTANCE_WR(ctx, 0x00408/4, 0x46400000); + INSTANCE_WR(ctx, 0x00418/4, 0xffff0000); + INSTANCE_WR(ctx, 0x00424/4, 0x0fff0000); + INSTANCE_WR(ctx, 0x00428/4, 0x0fff0000); + INSTANCE_WR(ctx, 0x00430/4, 0x00011100); + for (i=0x0044c; i<=0x00488; i+=4) + INSTANCE_WR(ctx, i/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x00494/4, 0x4b7fffff); + INSTANCE_WR(ctx, 0x004bc/4, 0x30201000); + INSTANCE_WR(ctx, 0x004c0/4, 0x70605040); + INSTANCE_WR(ctx, 0x004c4/4, 0xb8a89888); + INSTANCE_WR(ctx, 0x004c8/4, 0xf8e8d8c8); + INSTANCE_WR(ctx, 0x004dc/4, 0x40100000); + INSTANCE_WR(ctx, 0x004f8/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x0052c/4, 0x435185d6); + INSTANCE_WR(ctx, 0x00530/4, 0x2155b699); + INSTANCE_WR(ctx, 0x00534/4, 0xfedcba98); + INSTANCE_WR(ctx, 0x00538/4, 0x00000098); + INSTANCE_WR(ctx, 0x00548/4, 0xffffffff); + INSTANCE_WR(ctx, 0x0054c/4, 0x00ff7000); + INSTANCE_WR(ctx, 0x00550/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x0055c/4, 0x00ff0000); + INSTANCE_WR(ctx, 0x00594/4, 0x00ffff00); + for (i=0x005d8; i<=0x00614; i+=4) + INSTANCE_WR(ctx, i/4, 0x00018488); + for (i=0x00618; i<=0x00654; i+=4) + INSTANCE_WR(ctx, i/4, 0x00028202); + for (i=0x00698; i<=0x006d4; i+=4) + INSTANCE_WR(ctx, i/4, 0x0000aae4); + for (i=0x006d8; i<=0x00714; i+=4) + INSTANCE_WR(ctx, i/4, 0x01012000); + for (i=0x00718; i<=0x00754; i+=4) + INSTANCE_WR(ctx, i/4, 0x00080008); + for (i=0x00798; i<=0x007d4; i+=4) + INSTANCE_WR(ctx, i/4, 0x00100008); + for (i=0x00828; i<=0x00834; i+=4) + INSTANCE_WR(ctx, i/4, 0x0001bc80); + for (i=0x00838; i<=0x00844; i+=4) + INSTANCE_WR(ctx, i/4, 0x00000202); + for (i=0x00858; i<=0x00864; i+=4) + INSTANCE_WR(ctx, i/4, 0x00000008); + for (i=0x00878; i<=0x00884; i+=4) + INSTANCE_WR(ctx, i/4, 0x00080008); + INSTANCE_WR(ctx, 0x00898/4, 0x00000002); + INSTANCE_WR(ctx, 0x008cc/4, 0x00000021); + INSTANCE_WR(ctx, 0x008d0/4, 0x030c30c3); + INSTANCE_WR(ctx, 0x008d4/4, 0x00011001); + INSTANCE_WR(ctx, 0x008e0/4, 0x3e020200); + INSTANCE_WR(ctx, 0x008e4/4, 0x00ffffff); + INSTANCE_WR(ctx, 0x008e8/4, 0x0c103f00); + INSTANCE_WR(ctx, 0x008f4/4, 0x00040000); + INSTANCE_WR(ctx, 0x0092c/4, 0x00008100); + INSTANCE_WR(ctx, 0x009b8/4, 0x00000001); + INSTANCE_WR(ctx, 0x009fc/4, 0x00001001); + INSTANCE_WR(ctx, 0x00a04/4, 0x00000003); + INSTANCE_WR(ctx, 0x00a08/4, 0x00888001); + INSTANCE_WR(ctx, 0x00a8c/4, 0x00000005); + INSTANCE_WR(ctx, 0x00a98/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00ab4/4, 0x00005555); + INSTANCE_WR(ctx, 0x00ab8/4, 0x00005555); + INSTANCE_WR(ctx, 0x00abc/4, 0x00005555); + INSTANCE_WR(ctx, 0x00ac0/4, 0x00000001); + INSTANCE_WR(ctx, 0x00af8/4, 0x00000001); + for (i=0x016c0; i<=0x01738; i+=8) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i=0x03840; i<=0x05670; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i=0x05680; i<=0x05a70; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i=0x07e00; i<=0x09ff0; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i=0x0a000; i<=0x0a3f0; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i=0x0c780; i<=0x0e970; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i=0x0e980; i<=0x0ed70; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); +} + +static void nv4e_graph_context_init(drm_device_t *dev, struct mem_block *ctx) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + int i; + + INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx)); + INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00030/4, 0x00000001); + INSTANCE_WR(ctx, 0x0011c/4, 0x20010001); + INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00); + INSTANCE_WR(ctx, 0x00128/4, 0x02008821); + INSTANCE_WR(ctx, 0x00158/4, 0x00000001); + INSTANCE_WR(ctx, 0x0015c/4, 0x00000001); + INSTANCE_WR(ctx, 0x00160/4, 0x00000001); + INSTANCE_WR(ctx, 0x00164/4, 0x00000001); + INSTANCE_WR(ctx, 0x00168/4, 0x00000001); + INSTANCE_WR(ctx, 0x0016c/4, 0x00000001); + INSTANCE_WR(ctx, 0x00170/4, 0x00000001); + INSTANCE_WR(ctx, 0x00174/4, 0x00000001); + INSTANCE_WR(ctx, 0x00178/4, 0x00000040); + INSTANCE_WR(ctx, 0x0017c/4, 0x00000040); + INSTANCE_WR(ctx, 0x00180/4, 0x00000040); + INSTANCE_WR(ctx, 0x00188/4, 0x00000040); + INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c); + INSTANCE_WR(ctx, 0x00340/4, 0x00040000); + INSTANCE_WR(ctx, 0x00350/4, 0x55555555); + INSTANCE_WR(ctx, 0x00354/4, 0x55555555); + INSTANCE_WR(ctx, 0x00358/4, 0x55555555); + INSTANCE_WR(ctx, 0x0035c/4, 0x55555555); + INSTANCE_WR(ctx, 0x00388/4, 0x00000008); + INSTANCE_WR(ctx, 0x0039c/4, 0x00001010); + INSTANCE_WR(ctx, 0x003cc/4, 0x00000111); + INSTANCE_WR(ctx, 0x003d0/4, 0x00080060); + INSTANCE_WR(ctx, 0x003ec/4, 0x00000080); + INSTANCE_WR(ctx, 0x003f0/4, 0xffff0000); + INSTANCE_WR(ctx, 0x003f4/4, 0x00000001); + INSTANCE_WR(ctx, 0x00408/4, 0x46400000); + INSTANCE_WR(ctx, 0x00418/4, 0xffff0000); + INSTANCE_WR(ctx, 0x00424/4, 0x0fff0000); + INSTANCE_WR(ctx, 0x00428/4, 0x0fff0000); + INSTANCE_WR(ctx, 0x00430/4, 0x00011100); + for (i=0x0044c; i<=0x00488; i+=4) + INSTANCE_WR(ctx, i/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x00494/4, 0x4b7fffff); + INSTANCE_WR(ctx, 0x004bc/4, 0x30201000); + INSTANCE_WR(ctx, 0x004c0/4, 0x70605040); + INSTANCE_WR(ctx, 0x004c4/4, 0xb8a89888); + INSTANCE_WR(ctx, 0x004c8/4, 0xf8e8d8c8); + INSTANCE_WR(ctx, 0x004dc/4, 0x40100000); + INSTANCE_WR(ctx, 0x004f8/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x0052c/4, 0x435185d6); + INSTANCE_WR(ctx, 0x00530/4, 0x2155b699); + INSTANCE_WR(ctx, 0x00534/4, 0xfedcba98); + INSTANCE_WR(ctx, 0x00538/4, 0x00000098); + INSTANCE_WR(ctx, 0x00548/4, 0xffffffff); + INSTANCE_WR(ctx, 0x0054c/4, 0x00ff7000); + INSTANCE_WR(ctx, 0x00550/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x0055c/4, 0x00ff0000); + INSTANCE_WR(ctx, 0x00594/4, 0x00ffff00); + for (i=0x005d8; i<=0x00614; i+=4) + INSTANCE_WR(ctx, i/4, 0x00018488); + for (i=0x00618; i<=0x00654; i+=4) + INSTANCE_WR(ctx, i/4, 0x00028202); + for (i=0x00698; i<=0x006d4; i+=4) + INSTANCE_WR(ctx, i/4, 0x0000aae4); + for (i=0x006d8; i<=0x00714; i+=4) + INSTANCE_WR(ctx, i/4, 0x01012000); + for (i=0x00718; i<=0x00754; i+=4) + INSTANCE_WR(ctx, i/4, 0x00080008); + for (i=0x00798; i<=0x007d4; i+=4) + INSTANCE_WR(ctx, i/4, 0x00100008); + for (i=0x00828; i<=0x00834; i+=4) + INSTANCE_WR(ctx, i/4, 0x0001bc80); + for (i=0x00838; i<=0x00844; i+=4) + INSTANCE_WR(ctx, i/4, 0x00000202); + for (i=0x00858; i<=0x00864; i+=4) + INSTANCE_WR(ctx, i/4, 0x00000008); + for (i=0x00878; i<=0x00884; i+=4) + INSTANCE_WR(ctx, i/4, 0x00080008); + INSTANCE_WR(ctx, 0x00898/4, 0x00000002); + INSTANCE_WR(ctx, 0x008cc/4, 0x00000020); + INSTANCE_WR(ctx, 0x008d0/4, 0x030c30c3); + INSTANCE_WR(ctx, 0x008d4/4, 0x00011001); + INSTANCE_WR(ctx, 0x008e0/4, 0x3e020200); + INSTANCE_WR(ctx, 0x008e4/4, 0x00ffffff); + INSTANCE_WR(ctx, 0x008e8/4, 0x0c103f00); + INSTANCE_WR(ctx, 0x008f4/4, 0x00040000); + INSTANCE_WR(ctx, 0x0092c/4, 0x00008100); + INSTANCE_WR(ctx, 0x009b8/4, 0x00000001); + INSTANCE_WR(ctx, 0x009fc/4, 0x00001001); + INSTANCE_WR(ctx, 0x00a04/4, 0x00000003); + INSTANCE_WR(ctx, 0x00a08/4, 0x00888001); + INSTANCE_WR(ctx, 0x00a6c/4, 0x00000005); + INSTANCE_WR(ctx, 0x00a78/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00a94/4, 0x00005555); + INSTANCE_WR(ctx, 0x00a98/4, 0x00000001); + INSTANCE_WR(ctx, 0x00aa4/4, 0x00000001); + for (i=0x01668; i<=0x016e0; i+=8) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i=0x03428; i<=0x05618; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i=0x05628; i<=0x05a18; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); +} + +int +nv40_graph_context_create(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = + (drm_nouveau_private_t *)dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + void (*ctx_init)(drm_device_t *, struct mem_block *); + unsigned int ctx_size; + int i, chipset; + + chipset = (NV_READ(NV_PMC_BOOT_0) & 0x0ff00000) >> 20; + switch (chipset) { + case 0x40: + ctx_size = NV40_GRCTX_SIZE; + ctx_init = nv40_graph_context_init; + break; + case 0x43: + ctx_size = NV43_GRCTX_SIZE; + ctx_init = nv43_graph_context_init; + break; + case 0x4a: + ctx_size = NV4A_GRCTX_SIZE; + ctx_init = nv4a_graph_context_init; + break; + case 0x4e: + ctx_size = NV4E_GRCTX_SIZE; + ctx_init = nv4e_graph_context_init; + break; + default: + ctx_size = NV40_GRCTX_SIZE; + ctx_init = nv40_graph_context_init; + break; + } + + /* Alloc and clear RAMIN to store the context */ + chan->ramin_grctx = nouveau_instmem_alloc(dev, ctx_size, 4); + if (!chan->ramin_grctx) + return DRM_ERR(ENOMEM); + for (i=0; i<ctx_size; i+=4) + INSTANCE_WR(chan->ramin_grctx, i/4, 0x00000000); + + /* Initialise default context values */ + ctx_init(dev, chan->ramin_grctx); + + return 0; +} + +/* Save current context (from PGRAPH) into the channel's context + *XXX: fails sometimes, not sure why.. + */ +void +nv40_graph_context_save_current(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = + (drm_nouveau_private_t *)dev->dev_private; + uint32_t instance; + int i; + + NV_WRITE(NV_PGRAPH_FIFO, 0); + + instance = NV_READ(0x40032C) & 0xFFFFF; + if (!instance) { + NV_WRITE(NV_PGRAPH_FIFO, 1); + return; + } + + NV_WRITE(0x400784, instance); + NV_WRITE(0x400310, NV_READ(0x400310) | 0x20); + NV_WRITE(0x400304, 1); + /* just in case, we don't want to spin in-kernel forever */ + for (i=0; i<1000; i++) { + if (NV_READ(0x40030C) == 0) + break; + } + if (i==1000) { + DRM_ERROR("failed to save current grctx to ramin\n"); + DRM_ERROR("instance = 0x%08x\n", NV_READ(0x40032C)); + DRM_ERROR("0x40030C = 0x%08x\n", NV_READ(0x40030C)); + NV_WRITE(NV_PGRAPH_FIFO, 1); + return; + } + + NV_WRITE(NV_PGRAPH_FIFO, 1); +} + +/* Restore the context for a specific channel into PGRAPH + * XXX: fails sometimes.. not sure why + */ +void +nv40_graph_context_restore(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = + (drm_nouveau_private_t *)dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + uint32_t instance; + int i; + + instance = nouveau_chip_instance_get(dev, chan->ramin_grctx); + + NV_WRITE(NV_PGRAPH_FIFO, 0); + NV_WRITE(0x400784, instance); + NV_WRITE(0x400310, NV_READ(0x400310) | 0x40); + NV_WRITE(0x400304, 1); + /* just in case, we don't want to spin in-kernel forever */ + for (i=0; i<1000; i++) { + if (NV_READ(0x40030C) == 0) + break; + } + if (i==1000) { + DRM_ERROR("failed to restore grctx for ch%d to PGRAPH\n", + channel); + DRM_ERROR("instance = 0x%08x\n", instance); + DRM_ERROR("0x40030C = 0x%08x\n", NV_READ(0x40030C)); + NV_WRITE(NV_PGRAPH_FIFO, 1); + return; + } + + + /* 0x40032C, no idea of it's exact function. Could simply be a + * record of the currently active PGRAPH context. It's currently + * unknown as to what bit 24 does. The nv ddx has it set, so we will + * set it here too. + */ + NV_WRITE(0x40032C, instance | 0x01000000); + /* 0x32E0 records the instance address of the active FIFO's PGRAPH + * context. If at any time this doesn't match 0x40032C, you will + * recieve PGRAPH_INTR_CONTEXT_SWITCH + */ + NV_WRITE(NV40_PFIFO_GRCTX_INSTANCE, instance); + NV_WRITE(NV_PGRAPH_FIFO, 1); +} + +/* Some voodoo that makes context switching work without the binary driver + * initialising the card first. + * + * It is possible to effect how the context is saved from PGRAPH into a block + * of instance memory by altering the values in these tables. This may mean + * that the context layout of each chipset is slightly different (at least + * NV40 and C51 are different). It would also be possible for chipsets to + * have an identical context layout, but pull the data from different PGRAPH + * registers. + * + * TODO: decode the meaning of the magic values, may provide clues about the + * differences between the various NV40 chipsets. + * TODO: one we have a better idea of how each chipset differs, perhaps think + * about unifying these instead of providing a separate table for each + * chip. + * + * mmio-trace dumps from other nv4x/g7x/c5x cards very welcome :) + */ +static uint32_t nv40_ctx_voodoo[] = { + 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, + 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409406, + 0x0040a268, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, + 0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061, + 0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, + 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, + 0x001041c9, 0x0010c1dc, 0x00110205, 0x0011420a, 0x00114210, 0x00110216, + 0x0012421b, 0x00120270, 0x001242c0, 0x00200040, 0x00100280, 0x00128100, + 0x00128120, 0x00128143, 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, + 0x00110400, 0x00104d10, 0x00500060, 0x00403b87, 0x0060000d, 0x004076e6, + 0x002000f0, 0x0060000a, 0x00200045, 0x00100620, 0x00108668, 0x0011466b, + 0x00120682, 0x0011068b, 0x00168691, 0x0010c6ae, 0x001206b4, 0x0020002a, + 0x001006c4, 0x001246f0, 0x002000c0, 0x00100700, 0x0010c3d7, 0x001043e1, + 0x00500060, 0x00405600, 0x00405684, 0x00600003, 0x00500067, 0x00600008, + 0x00500060, 0x00700082, 0x0020026c, 0x0060000a, 0x00104800, 0x00104901, + 0x00120920, 0x00200035, 0x00100940, 0x00148a00, 0x00104a14, 0x00200038, + 0x00100b00, 0x00138d00, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06, + 0x0020031a, 0x0060000a, 0x00300000, 0x00200680, 0x00406c00, 0x00200684, + 0x00800001, 0x00200b62, 0x0060000a, 0x0020a0b0, 0x0040728a, 0x00201b68, + 0x00800041, 0x00407684, 0x00203e60, 0x00800002, 0x00408700, 0x00600006, + 0x00700003, 0x004080e6, 0x00700080, 0x0020031a, 0x0060000a, 0x00200004, + 0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a284, + 0x00700002, 0x00600004, 0x0040a268, 0x00700000, 0x00200000, 0x0060000a, + 0x00106002, 0x00700080, 0x00400a84, 0x00700002, 0x00400a68, 0x00500060, + 0x00600007, 0x00409388, 0x0060000f, 0x00000000, 0x00500060, 0x00200000, + 0x0060000a, 0x00700000, 0x00106001, 0x00700083, 0x00910880, 0x00901ffe, + 0x00940400, 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00401b68, + 0x0040a406, 0x0040a505, 0x00600009, 0x00700005, 0x00700006, 0x0060000e, + ~0 +}; + +static uint32_t nv43_ctx_voodoo[] = { + 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, + 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409565, 0x00409a06, + 0x0040a868, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, + 0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061, + 0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, + 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, + 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, + 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, + 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, + 0x001046ec, 0x00500060, 0x00403a87, 0x0060000d, 0x00407ce6, 0x002000f1, + 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b, + 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, + 0x00200020, 0x001006cc, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700, + 0x0010c3d7, 0x001043e1, 0x00500060, 0x00405800, 0x00405884, 0x00600003, + 0x00500067, 0x00600008, 0x00500060, 0x00700082, 0x00200233, 0x0060000a, + 0x00104800, 0x00108901, 0x00124920, 0x0020001f, 0x00100940, 0x00140965, + 0x00148a00, 0x00108a14, 0x00160b00, 0x00134b2c, 0x0010cd00, 0x0010cd04, + 0x0010cd08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06, + 0x002002c8, 0x0060000a, 0x00300000, 0x00200680, 0x00407200, 0x00200684, + 0x00800001, 0x00200b10, 0x0060000a, 0x00203870, 0x0040788a, 0x00201350, + 0x00800041, 0x00407c84, 0x00201560, 0x00800002, 0x00408d00, 0x00600006, + 0x00700003, 0x004086e6, 0x00700080, 0x002002c8, 0x0060000a, 0x00200004, + 0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a884, + 0x00700002, 0x00600004, 0x0040a868, 0x00700000, 0x00200000, 0x0060000a, + 0x00106002, 0x00700080, 0x00400a84, 0x00700002, 0x00400a68, 0x00500060, + 0x00600007, 0x00409988, 0x0060000f, 0x00000000, 0x00500060, 0x00200000, + 0x0060000a, 0x00700000, 0x00106001, 0x00700083, 0x00910880, 0x00901ffe, + 0x00940400, 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00401b68, + 0x0040aa06, 0x0040ab05, 0x00600009, 0x00700005, 0x00700006, 0x0060000e, + ~0 +}; + +static uint32_t nv4a_ctx_voodoo[] = { + 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, + 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409965, 0x00409e06, + 0x0040ac68, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, + 0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061, + 0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, + 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, + 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, + 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, + 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, + 0x001046ec, 0x00500060, 0x00403a87, 0x0060000d, 0x00407de6, 0x002000f1, + 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b, + 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, + 0x001646cc, 0x001186e6, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700, + 0x0010c3d7, 0x001043e1, 0x00500060, 0x00405800, 0x00405884, 0x00600003, + 0x00500067, 0x00600008, 0x00500060, 0x00700082, 0x00200232, 0x0060000a, + 0x00104800, 0x00108901, 0x00104910, 0x00124920, 0x0020001f, 0x00100940, + 0x00140965, 0x00148a00, 0x00108a14, 0x00160b00, 0x00134b2c, 0x0010cd00, + 0x0010cd04, 0x0010cd08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, + 0x00104f06, 0x002002c8, 0x0060000a, 0x00300000, 0x00200080, 0x00407300, + 0x00200084, 0x00800001, 0x00200510, 0x0060000a, 0x002037e0, 0x0040798a, + 0x00201320, 0x00800029, 0x00407d84, 0x00201560, 0x00800002, 0x00409100, + 0x00600006, 0x00700003, 0x00408ae6, 0x00700080, 0x0020007a, 0x0060000a, + 0x00104280, 0x002002c8, 0x0060000a, 0x00200004, 0x00800001, 0x00700000, + 0x00200000, 0x0060000a, 0x00106002, 0x0040ac84, 0x00700002, 0x00600004, + 0x0040ac68, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x00700080, + 0x00400a84, 0x00700002, 0x00400a68, 0x00500060, 0x00600007, 0x00409d88, + 0x0060000f, 0x00000000, 0x00500060, 0x00200000, 0x0060000a, 0x00700000, + 0x00106001, 0x00700083, 0x00910880, 0x00901ffe, 0x01940000, 0x00200020, + 0x0060000b, 0x00500069, 0x0060000c, 0x00401b68, 0x0040ae06, 0x0040af05, + 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0 +}; + +static uint32_t nv4e_ctx_voodoo[] = { + 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, + 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409565, 0x00409a06, + 0x0040a868, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, + 0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061, + 0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, + 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, + 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, + 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, + 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, + 0x001046ec, 0x00500060, 0x00403a87, 0x0060000d, 0x00407ce6, 0x002000f1, + 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b, + 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, + 0x001646cc, 0x001186e6, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700, + 0x0010c3d7, 0x001043e1, 0x00500060, 0x00405800, 0x00405884, 0x00600003, + 0x00500067, 0x00600008, 0x00500060, 0x00700082, 0x00200232, 0x0060000a, + 0x00104800, 0x00108901, 0x00104910, 0x00124920, 0x0020001f, 0x00100940, + 0x00140965, 0x00148a00, 0x00108a14, 0x00140b00, 0x00134b2c, 0x0010cd00, + 0x0010cd04, 0x00104d08, 0x00104d80, 0x00104e00, 0x00105c00, 0x00104f06, + 0x002002b2, 0x0060000a, 0x00300000, 0x00200080, 0x00407200, 0x00200084, + 0x00800001, 0x002004fa, 0x0060000a, 0x00201320, 0x0040788a, 0xfffffb06, + 0x00800029, 0x00407c84, 0x00200b20, 0x00800002, 0x00408d00, 0x00600006, + 0x00700003, 0x004086e6, 0x00700080, 0x002002b2, 0x0060000a, 0x00200004, + 0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a884, + 0x00700002, 0x00600004, 0x0040a868, 0x00700000, 0x00200000, 0x0060000a, + 0x00106002, 0x00700080, 0x00400a84, 0x00700002, 0x00400a68, 0x00500060, + 0x00600007, 0x00409988, 0x0060000f, 0x00000000, 0x00500060, 0x00200000, + 0x0060000a, 0x00700000, 0x00106001, 0x00700083, 0x00910880, 0x00901ffe, + 0x01940000, 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00401b68, + 0x0040aa06, 0x0040ab05, 0x00600009, 0x00700005, 0x00700006, 0x0060000e, + ~0 +}; + + +int +nv40_graph_init(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = + (drm_nouveau_private_t *)dev->dev_private; + uint32_t *ctx_voodoo; + uint32_t pg0220_inst; + int i, chipset; + + chipset = (NV_READ(NV_PMC_BOOT_0) & 0x0ff00000) >> 20; + DRM_DEBUG("chipset (from PMC_BOOT_0): NV%02X\n", chipset); + switch (chipset) { + case 0x40: ctx_voodoo = nv40_ctx_voodoo; break; + case 0x43: ctx_voodoo = nv43_ctx_voodoo; break; + case 0x4a: ctx_voodoo = nv4a_ctx_voodoo; break; + case 0x4e: ctx_voodoo = nv4e_ctx_voodoo; break; + default: + DRM_ERROR("Unknown ctx_voodoo for chipset 0x%02x\n", chipset); + ctx_voodoo = NULL; + break; + } + + /* Load the context voodoo onto the card */ + if (ctx_voodoo) { + DRM_DEBUG("Loading context-switch voodoo\n"); + i = 0; + + NV_WRITE(0x400324, 0); + while (ctx_voodoo[i] != ~0) { + NV_WRITE(0x400328, ctx_voodoo[i]); + i++; + } + } + + /* No context present currently */ + NV_WRITE(0x40032C, 0x00000000); + + /* No idea what this is for.. */ + dev_priv->fb_obj = nouveau_dma_object_create(dev, + 0, nouveau_mem_fb_amount(dev), + NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM); + pg0220_inst = nouveau_chip_instance_get(dev, + dev_priv->fb_obj->instance); + NV_WRITE(NV_PGRAPH_NV40_UNK220, pg0220_inst); + + return 0; +} + diff --git a/shared-core/r128_irq.c b/shared-core/r128_irq.c index de1597ec..87f8ca2b 100644 --- a/shared-core/r128_irq.c +++ b/shared-core/r128_irq.c @@ -1,5 +1,4 @@ -/* r128_irq.c -- IRQ handling for radeon -*- linux-c -*- - */ +/* r128_irq.c -- IRQ handling for radeon -*- linux-c -*- */ /* * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. * diff --git a/shared-core/r128_state.c b/shared-core/r128_state.c index deeb67ed..17b11e7d 100644 --- a/shared-core/r128_state.c +++ b/shared-core/r128_state.c @@ -221,7 +221,7 @@ static __inline__ void r128_emit_tex1(drm_r128_private_t * dev_priv) ADVANCE_RING(); } -static __inline__ void r128_emit_state(drm_r128_private_t * dev_priv) +static void r128_emit_state(drm_r128_private_t * dev_priv) { drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; unsigned int dirty = sarea_priv->dirty; diff --git a/shared-core/r300_cmdbuf.c b/shared-core/r300_cmdbuf.c index c65ffd59..0c04b5f8 100644 --- a/shared-core/r300_cmdbuf.c +++ b/shared-core/r300_cmdbuf.c @@ -242,26 +242,6 @@ static __inline__ int r300_check_range(unsigned reg, int count) return 0; } -/* - * we expect offsets passed to the framebuffer to be either within video - * memory or within AGP space - */ -static __inline__ int r300_check_offset(drm_radeon_private_t *dev_priv, - u32 offset) -{ - /* we realy want to check against end of video aperture - but this value is not being kept. - This code is correct for now (does the same thing as the - code that sets MC_FB_LOCATION) in radeon_cp.c */ - if (offset >= dev_priv->fb_location && - offset < (dev_priv->fb_location + dev_priv->fb_size)) - return 0; - if (offset >= dev_priv->gart_vm_start && - offset < (dev_priv->gart_vm_start + dev_priv->gart_size)) - return 0; - return 1; -} - static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t * dev_priv, drm_radeon_kcmd_buffer_t @@ -290,7 +270,7 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t * case MARK_SAFE: break; case MARK_CHECK_OFFSET: - if (r300_check_offset(dev_priv, (u32) values[i])) { + if (!radeon_check_offset(dev_priv, (u32) values[i])) { DRM_ERROR ("Offset failed range check (reg=%04x sz=%d)\n", reg, sz); @@ -452,7 +432,7 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv, i = 1; while ((k < narrays) && (i < (count + 1))) { i++; /* skip attribute field */ - if (r300_check_offset(dev_priv, payload[i])) { + if (!radeon_check_offset(dev_priv, payload[i])) { DRM_ERROR ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", k, i); @@ -463,7 +443,7 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv, if (k == narrays) break; /* have one more to process, they come in pairs */ - if (r300_check_offset(dev_priv, payload[i])) { + if (!radeon_check_offset(dev_priv, payload[i])) { DRM_ERROR ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", k, i); @@ -508,7 +488,7 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv, if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { offset = cmd[2] << 10; - ret = r300_check_offset(dev_priv, offset); + ret = !radeon_check_offset(dev_priv, offset); if (ret) { DRM_ERROR("Invalid bitblt first offset is %08X\n", offset); return DRM_ERR(EINVAL); @@ -518,7 +498,7 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv, if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) && (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { offset = cmd[3] << 10; - ret = r300_check_offset(dev_priv, offset); + ret = !radeon_check_offset(dev_priv, offset); if (ret) { DRM_ERROR("Invalid bitblt second offset is %08X\n", offset); return DRM_ERR(EINVAL); @@ -551,7 +531,7 @@ static __inline__ int r300_emit_indx_buffer(drm_radeon_private_t *dev_priv, DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); return DRM_ERR(EINVAL); } - ret = r300_check_offset(dev_priv, cmd[2]); + ret = !radeon_check_offset(dev_priv, cmd[2]); if (ret) { DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); return DRM_ERR(EINVAL); diff --git a/shared-core/radeon_drv.h b/shared-core/radeon_drv.h index 6ea2a175..5c426fe0 100644 --- a/shared-core/radeon_drv.h +++ b/shared-core/radeon_drv.h @@ -306,6 +306,21 @@ extern int radeon_no_wb; extern drm_ioctl_desc_t radeon_ioctls[]; extern int radeon_max_ioctl; +/* Check whether the given hardware address is inside the framebuffer or the + * GART area. + */ +static __inline__ int radeon_check_offset(drm_radeon_private_t *dev_priv, + u64 off) +{ + u32 fb_start = dev_priv->fb_location; + u32 fb_end = fb_start + dev_priv->fb_size - 1; + u32 gart_start = dev_priv->gart_vm_start; + u32 gart_end = gart_start + dev_priv->gart_size - 1; + + return ((off >= fb_start && off <= fb_end) || + (off >= gart_start && off <= gart_end)); +} + /* radeon_cp.c */ extern int radeon_cp_init(DRM_IOCTL_ARGS); extern int radeon_cp_start(DRM_IOCTL_ARGS); diff --git a/shared-core/radeon_state.c b/shared-core/radeon_state.c index bf5e3d29..40b7d6ce 100644 --- a/shared-core/radeon_state.c +++ b/shared-core/radeon_state.c @@ -43,10 +43,7 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t * u32 * offset) { u64 off = *offset; - u32 fb_start = dev_priv->fb_location; - u32 fb_end = fb_start + dev_priv->fb_size - 1; - u32 gart_start = dev_priv->gart_vm_start; - u32 gart_end = gart_start + dev_priv->gart_size - 1; + u32 fb_end = dev_priv->fb_location + dev_priv->fb_size - 1; struct drm_radeon_driver_file_fields *radeon_priv; /* Hrm ... the story of the offset ... So this function converts @@ -66,8 +63,7 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t * /* First, the best case, the offset already lands in either the * framebuffer or the GART mapped space */ - if ((off >= fb_start && off <= fb_end) || - (off >= gart_start && off <= gart_end)) + if (radeon_check_offset(dev_priv, off)) return 0; /* Ok, that didn't happen... now check if we have a zero based @@ -81,11 +77,10 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t * /* Finally, assume we aimed at a GART offset if beyond the fb */ if (off > fb_end) - off = off - fb_end - 1 + gart_start; + off = off - fb_end - 1 + dev_priv->gart_vm_start; /* Now recheck and fail if out of bounds */ - if ((off >= fb_start && off <= fb_end) || - (off >= gart_start && off <= gart_end)) { + if (radeon_check_offset(dev_priv, off)) { DRM_DEBUG("offset fixed up to 0x%x\n", (unsigned int)off); *offset = off; return 0; diff --git a/shared-core/savage_bci.c b/shared-core/savage_bci.c index 01121b92..5632b5c8 100644 --- a/shared-core/savage_bci.c +++ b/shared-core/savage_bci.c @@ -32,6 +32,8 @@ #define SAVAGE_EVENT_USEC_TIMEOUT 5000000 /* 5s */ #define SAVAGE_FREELIST_DEBUG 0 +static int savage_do_cleanup_bci(drm_device_t *dev); + static int savage_bci_wait_fifo_shadow(drm_savage_private_t *dev_priv, unsigned int n) { @@ -895,7 +897,7 @@ static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init) return 0; } -int savage_do_cleanup_bci(drm_device_t *dev) +static int savage_do_cleanup_bci(drm_device_t *dev) { drm_savage_private_t *dev_priv = dev->dev_private; diff --git a/shared-core/savage_drv.h b/shared-core/savage_drv.h index 560f934e..88c571e1 100644 --- a/shared-core/savage_drv.h +++ b/shared-core/savage_drv.h @@ -212,7 +212,6 @@ extern int savage_driver_load(drm_device_t *dev, unsigned long chipset); extern int savage_driver_firstopen(drm_device_t *dev); extern void savage_driver_lastclose(drm_device_t *dev); extern int savage_driver_unload(drm_device_t *dev); -extern int savage_do_cleanup_bci(drm_device_t *dev); extern void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp); /* state functions */ diff --git a/shared-core/savage_state.c b/shared-core/savage_state.c index 5c8a43eb..acc98f89 100644 --- a/shared-core/savage_state.c +++ b/shared-core/savage_state.c @@ -993,7 +993,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) if (cmdbuf.size) { kcmd_addr = drm_alloc(cmdbuf.size * 8, DRM_MEM_DRIVER); if (kcmd_addr == NULL) - return ENOMEM; + return DRM_ERR(ENOMEM); if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf.cmd_addr, cmdbuf.size * 8)) diff --git a/shared-core/sis_drv.h b/shared-core/sis_drv.h index d1cdc19c..006d148c 100644 --- a/shared-core/sis_drv.h +++ b/shared-core/sis_drv.h @@ -57,9 +57,9 @@ enum sis_family { typedef struct drm_sis_private { drm_local_map_t *mmio; - unsigned idle_fault; + unsigned int idle_fault; drm_sman_t sman; - unsigned long chipset; + unsigned int chipset; int vram_initialized; int agp_initialized; unsigned long vram_offset; diff --git a/shared-core/via_dma.c b/shared-core/via_dma.c index c6e7aa77..90dbb6a2 100644 --- a/shared-core/via_dma.c +++ b/shared-core/via_dma.c @@ -489,6 +489,7 @@ static int via_hook_segment(drm_via_private_t *dev_priv, VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16)); VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi); VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo); + VIA_READ(VIA_REG_TRANSPACE); } } return paused; @@ -572,8 +573,9 @@ static void via_cmdbuf_start(drm_via_private_t * dev_priv) VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi); VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo); - + DRM_WRITEMEMORYBARRIER(); VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK); + VIA_READ(VIA_REG_TRANSPACE); } static void via_pad_cache(drm_via_private_t *dev_priv, int qwords) diff --git a/shared-core/via_drm.h b/shared-core/via_drm.h index ee92ff69..16421d74 100644 --- a/shared-core/via_drm.h +++ b/shared-core/via_drm.h @@ -42,11 +42,11 @@ * backwards incompatibilities, (which should be avoided whenever possible). */ -#define VIA_DRM_DRIVER_DATE "20060616" +#define VIA_DRM_DRIVER_DATE "20061227" #define VIA_DRM_DRIVER_MAJOR 2 -#define VIA_DRM_DRIVER_MINOR 10 -#define VIA_DRM_DRIVER_PATCHLEVEL 2 +#define VIA_DRM_DRIVER_MINOR 11 +#define VIA_DRM_DRIVER_PATCHLEVEL 0 #define VIA_DRM_DRIVER_VERSION (((VIA_DRM_DRIVER_MAJOR) << 16) | (VIA_DRM_DRIVER_MINOR)) #define VIA_NR_SAREA_CLIPRECTS 8 diff --git a/shared-core/via_drv.h b/shared-core/via_drv.h index 18d2a331..7a8f2c34 100644 --- a/shared-core/via_drv.h +++ b/shared-core/via_drv.h @@ -83,7 +83,7 @@ typedef struct drm_via_private { char pci_buf[VIA_PCI_BUF_SIZE]; const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE]; uint32_t num_fire_offsets; - int pro_group_a; + int chipset; drm_via_irq_t via_irqs[VIA_NUM_IRQS]; unsigned num_irqs; maskarray_t *irq_masks; @@ -105,8 +105,9 @@ typedef struct drm_via_private { } drm_via_private_t; enum via_family { - VIA_OTHER = 0, - VIA_PRO_GROUP_A, + VIA_OTHER = 0, /* Baseline */ + VIA_PRO_GROUP_A, /* Another video engine and DMA commands */ + VIA_DX9_0 /* Same video as pro_group_a, but 3D is unsupported */ }; /* VIA MMIO register access */ diff --git a/shared-core/via_irq.c b/shared-core/via_irq.c index db60b28e..2ac86970 100644 --- a/shared-core/via_irq.c +++ b/shared-core/via_irq.c @@ -267,13 +267,17 @@ void via_driver_irq_preinstall(drm_device_t * dev) dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE; dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING; - dev_priv->irq_masks = (dev_priv->pro_group_a) ? - via_pro_group_a_irqs : via_unichrome_irqs; - dev_priv->num_irqs = (dev_priv->pro_group_a) ? - via_num_pro_group_a : via_num_unichrome; - dev_priv->irq_map = (dev_priv->pro_group_a) ? - via_irqmap_pro_group_a : via_irqmap_unichrome; - + if (dev_priv->chipset == VIA_PRO_GROUP_A || + dev_priv->chipset == VIA_DX9_0) { + dev_priv->irq_masks = via_pro_group_a_irqs; + dev_priv->num_irqs = via_num_pro_group_a; + dev_priv->irq_map = via_irqmap_pro_group_a; + } else { + dev_priv->irq_masks = via_unichrome_irqs; + dev_priv->num_irqs = via_num_unichrome; + dev_priv->irq_map = via_irqmap_unichrome; + } + for(i=0; i < dev_priv->num_irqs; ++i) { atomic_set(&cur_irq->irq_received, 0); cur_irq->enable_mask = dev_priv->irq_masks[i][0]; diff --git a/shared-core/via_map.c b/shared-core/via_map.c index 71967d6c..a37f5fd2 100644 --- a/shared-core/via_map.c +++ b/shared-core/via_map.c @@ -107,8 +107,7 @@ int via_driver_load(drm_device_t *dev, unsigned long chipset) dev->dev_private = (void *)dev_priv; - if (chipset == VIA_PRO_GROUP_A) - dev_priv->pro_group_a = 1; + dev_priv->chipset = chipset; #ifdef VIA_HAVE_CORE_MM ret = drm_sman_init(&dev_priv->sman, 2, 12, 8); diff --git a/shared-core/via_verifier.c b/shared-core/via_verifier.c index 1a5edd4f..b5a1d33a 100644 --- a/shared-core/via_verifier.c +++ b/shared-core/via_verifier.c @@ -311,6 +311,7 @@ static __inline__ int finish_current_sequence(drm_via_state_t * cur_seq) unsigned long lo = ~0, hi = 0, tmp; uint32_t *addr, *pitch, *height, tex; unsigned i; + int npot; if (end > 9) end = 9; @@ -321,12 +322,15 @@ static __inline__ int finish_current_sequence(drm_via_state_t * cur_seq) &(cur_seq->t_addr[tex = cur_seq->texture][start]); pitch = &(cur_seq->pitch[tex][start]); height = &(cur_seq->height[tex][start]); - + npot = cur_seq->tex_npot[tex]; for (i = start; i <= end; ++i) { tmp = *addr++; if (tmp < lo) lo = tmp; - tmp += (*height++ << *pitch++); + if (i == 0 && npot) + tmp += (*height++ * *pitch++); + else + tmp += (*height++ << *pitch++); if (tmp > hi) hi = tmp; } @@ -448,13 +452,21 @@ investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t * cur_seq) return 0; case check_texture_addr3: cur_seq->unfinished = tex_address; - tmp = ((cmd >> 24) - 0x2B); - cur_seq->pitch[cur_seq->texture][tmp] = - (cmd & 0x00F00000) >> 20; - if (!tmp && (cmd & 0x000FFFFF)) { - DRM_ERROR - ("Unimplemented texture level 0 pitch mode.\n"); - return 2; + tmp = ((cmd >> 24) - HC_SubA_HTXnL0Pit); + if (tmp == 0 && + (cmd & HC_HTXnEnPit_MASK)) { + cur_seq->pitch[cur_seq->texture][tmp] = + (cmd & HC_HTXnLnPit_MASK); + cur_seq->tex_npot[cur_seq->texture] = 1; + } else { + cur_seq->pitch[cur_seq->texture][tmp] = + (cmd & HC_HTXnLnPitE_MASK) >> HC_HTXnLnPitE_SHIFT; + cur_seq->tex_npot[cur_seq->texture] = 0; + if (cmd & 0x000FFFFF) { + DRM_ERROR + ("Unimplemented texture level 0 pitch mode.\n"); + return 2; + } } return 0; case check_texture_addr4: @@ -966,7 +978,13 @@ via_verify_command_stream(const uint32_t * buf, unsigned int size, uint32_t cmd; const uint32_t *buf_end = buf + (size >> 2); verifier_state_t state = state_command; - int pro_group_a = dev_priv->pro_group_a; + int cme_video; + int supported_3d; + + cme_video = (dev_priv->chipset == VIA_PRO_GROUP_A || + dev_priv->chipset == VIA_DX9_0); + + supported_3d = dev_priv->chipset != VIA_DX9_0; hc_state->dev = dev; hc_state->unfinished = no_sequence; @@ -991,17 +1009,21 @@ via_verify_command_stream(const uint32_t * buf, unsigned int size, state = via_check_vheader6(&buf, buf_end); break; case state_command: - if (HALCYON_HEADER2 == (cmd = *buf)) + if ((HALCYON_HEADER2 == (cmd = *buf)) && + supported_3d) state = state_header2; else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1) state = state_header1; - else if (pro_group_a + else if (cme_video && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5) state = state_vheader5; - else if (pro_group_a + else if (cme_video && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6) state = state_vheader6; - else { + else if ((cmd == HALCYON_HEADER2) && !supported_3d) { + DRM_ERROR("Accelerated 3D is not supported on this chipset yet.\n"); + state = state_error; + } else { DRM_ERROR ("Invalid / Unimplemented DMA HEADER command. 0x%x\n", cmd); diff --git a/shared-core/via_verifier.h b/shared-core/via_verifier.h index 96708a39..84497c44 100644 --- a/shared-core/via_verifier.h +++ b/shared-core/via_verifier.h @@ -45,6 +45,7 @@ typedef struct { uint32_t tex_level_lo[2]; uint32_t tex_level_hi[2]; uint32_t tex_palette_size[2]; + uint32_t tex_npot[2]; drm_via_sequence_t unfinished; int agp_texture; int multitex; |