summaryrefslogtreecommitdiff
path: root/shared-core
diff options
context:
space:
mode:
Diffstat (limited to 'shared-core')
-rw-r--r--shared-core/Makefile.am1
-rw-r--r--shared-core/drm_pciids.txt230
-rw-r--r--shared-core/drm_sarea.h2
-rw-r--r--shared-core/nouveau_drm.h152
-rw-r--r--shared-core/nouveau_drv.h198
-rw-r--r--shared-core/nouveau_fifo.c658
-rw-r--r--shared-core/nouveau_irq.c411
-rw-r--r--shared-core/nouveau_mem.c553
-rw-r--r--shared-core/nouveau_object.c578
-rw-r--r--shared-core/nouveau_reg.h232
-rw-r--r--shared-core/nouveau_state.c203
11 files changed, 3217 insertions, 1 deletions
diff --git a/shared-core/Makefile.am b/shared-core/Makefile.am
index cd278643..f0ebf2a3 100644
--- a/shared-core/Makefile.am
+++ b/shared-core/Makefile.am
@@ -29,6 +29,7 @@ klibdrminclude_HEADERS = \
i915_drm.h \
mach64_drm.h \
mga_drm.h \
+ nouveau_drm.h \
r128_drm.h \
radeon_drm.h \
savage_drm.h \
diff --git a/shared-core/drm_pciids.txt b/shared-core/drm_pciids.txt
index c597708d..b6dfe400 100644
--- a/shared-core/drm_pciids.txt
+++ b/shared-core/drm_pciids.txt
@@ -462,3 +462,233 @@
0x10DE 0x009C NV40 "NVidia 0x009C"
0x10DE 0x009D NV40 "NVidia Quadro FX 4500"
0x10DE 0x009E NV40 "NVidia 0x009E"
+
+[nouveau]
+0x10de 0x0008 NV_03 "EDGE 3D"
+0x10de 0x0009 NV_03 "EDGE 3D"
+0x10de 0x0010 NV_03 "Mutara V08"
+0x10de 0x0020 NV_04 "RIVA TNT"
+0x10de 0x0028 NV_04 "RIVA TNT2/TNT2 Pro"
+0x10de 0x0029 NV_04 "RIVA TNT2 Ultra"
+0x10de 0x002a NV_04 "Riva TnT2"
+0x10de 0x002b NV_04 "Riva TnT2"
+0x10de 0x002c NV_04 "Vanta/Vanta LT"
+0x10de 0x002d NV_04 "RIVA TNT2 Model 64/Model 64 Pro"
+0x10de 0x002e NV_04 "Vanta"
+0x10de 0x002f NV_04 "Vanta"
+0x10de 0x0040 NV_40 "GeForce 6800 Ultra"
+0x10de 0x0041 NV_40 "GeForce 6800"
+0x10de 0x0042 NV_40 "GeForce 6800 LE"
+0x10de 0x0043 NV_40 "NV40.3"
+0x10de 0x0044 NV_40 "GeForce 6800 XT"
+0x10de 0x0045 NV_40 "GeForce 6800 GT"
+0x10de 0x0046 NV_40 "GeForce 6800 GT"
+0x10de 0x0047 NV_40 "GeForce 6800 GS"
+0x10de 0x0048 NV_40 "GeForce 6800 XT"
+0x10de 0x0049 NV_40 "NV40GL"
+0x10de 0x004d NV_40 "Quadro FX 4000"
+0x10de 0x004e NV_40 "Quadro FX 4000"
+0x10de 0x0090 NV_40 "GeForce 7800 GTX"
+0x10de 0x0091 NV_40 "GeForce 7800 GTX"
+0x10de 0x0092 NV_40 "GeForce 7800 GT"
+0x10de 0x0093 NV_40 "GeForce 7800 GS"
+0x10de 0x0098 NV_40 "GeForce Go 7800"
+0x10de 0x0099 NV_40 "GE Force Go 7800 GTX"
+0x10de 0x009d NV_40 "Quadro FX4500"
+0x10de 0x00a0 NV_04 "Aladdin TNT2"
+0x10de 0x00c0 NV_40 "GeForce 6800 GS"
+0x10de 0x00c1 NV_40 "GeForce 6800"
+0x10de 0x00c2 NV_40 "GeForce 6800 LE"
+0x10de 0x00c3 NV_40 "Geforce 6800 XT"
+0x10de 0x00c8 NV_40 "GeForce Go 6800"
+0x10de 0x00c9 NV_40 "GeForce Go 6800 Ultra"
+0x10de 0x00cc NV_40 "Quadro FX Go1400"
+0x10de 0x00cd NV_40 "Quadro FX 3450/4000 SDI"
+0x10de 0x00ce NV_40 "Quadro FX 1400"
+0x10de 0x00f0 NV_40 "GeForce 6800/GeForce 6800 Ultra"
+0x10de 0x00f1 NV_40 "GeForce 6600/GeForce 6600 GT"
+0x10de 0x00f2 NV_40 "GeForce 6600/GeForce 6600 GT"
+0x10de 0x00f3 NV_40 "GeForce 6200"
+0x10de 0x00f4 NV_40 "GeForce 6600 LE"
+0x10de 0x00f5 NV_40 "GeForce 7800 GS"
+0x10de 0x00f6 NV_40 "GeForce 6600 GS"
+0x10de 0x00f8 NV_40 "Quadro FX 3400/4400"
+0x10de 0x00f9 NV_40 "GeForce 6800 Ultra/GeForce 6800 GT"
+0x10de 0x00fa NV_30 "GeForce PCX 5750"
+0x10de 0x00fb NV_30 "GeForce PCX 5900"
+0x10de 0x00fc NV_30 "Quadro FX 330/GeForce PCX 5300"
+0x10de 0x00fd NV_30 "Quadro FX 330/Quadro NVS280"
+0x10de 0x00fe NV_30 "Quadro FX 1300"
+0x10de 0x00ff NV_17 "GeForce PCX 4300"
+0x10de 0x0100 NV_10 "GeForce 256 SDR"
+0x10de 0x0101 NV_10 "GeForce 256 DDR"
+0x10de 0x0103 NV_10 "Quadro"
+0x10de 0x0110 NV_11 "GeForce2 MX/MX 400"
+0x10de 0x0111 NV_11 "GeForce2 MX 100 DDR/200 DDR"
+0x10de 0x0112 NV_11 "GeForce2 Go"
+0x10de 0x0113 NV_11 "Quadro2 MXR/EX/Go"
+0x10de 0x0140 NV_40 "GeForce 6600 GT"
+0x10de 0x0141 NV_40 "GeForce 6600"
+0x10de 0x0142 NV_40 "GeForce 6600 PCIe"
+0x10de 0x0144 NV_40 "GeForce Go 6600"
+0x10de 0x0145 NV_40 "GeForce 6610 XL"
+0x10de 0x0146 NV_40 "Geforce Go 6600TE/6200TE"
+0x10de 0x0148 NV_40 "GeForce Go 6600"
+0x10de 0x0149 NV_40 "GeForce Go 6600 GT"
+0x10de 0x014a NV_40 "Quadro NVS 440"
+0x10de 0x014d NV_17 "Quadro FX 550"
+0x10de 0x014e NV_40 "Quadro FX 540"
+0x10de 0x014f NV_40 "GeForce 6200"
+0x10de 0x0150 NV_15 "GeForce2 GTS/Pro"
+0x10de 0x0151 NV_15 "GeForce2 Ti"
+0x10de 0x0152 NV_15 "GeForce2 Ultra, Bladerunner"
+0x10de 0x0153 NV_15 "Quadro2 Pro"
+0x10de 0x0161 NV_44 "GeForce 6200 TurboCache(TM)"
+0x10de 0x0162 NV_44 "GeForce 6200 SE TurboCache (TM)"
+0x10de 0x0163 NV_44 "GeForce 6200 LE"
+0x10de 0x0164 NV_44 "GeForce Go 6200"
+0x10de 0x0165 NV_44 "Quadro NVS 285"
+0x10de 0x0166 NV_44 "GeForce Go 6400"
+0x10de 0x0167 NV_44 "GeForce Go 6200 TurboCache"
+0x10de 0x0168 NV_44 "GeForce Go 6200 TurboCache"
+0x10de 0x0170 NV_17 "GeForce4 MX 460"
+0x10de 0x0171 NV_17 "GeForce4 MX 440"
+0x10de 0x0172 NV_17 "GeForce4 MX 420"
+0x10de 0x0173 NV_17 "GeForce4 MX 440-SE"
+0x10de 0x0174 NV_17 "GeForce4 440 Go"
+0x10de 0x0175 NV_17 "GeForce4 420 Go"
+0x10de 0x0176 NV_17 "GeForce4 420 Go 32M"
+0x10de 0x0177 NV_17 "GeForce4 460 Go"
+0x10de 0x0178 NV_17 "Quadro4 550 XGL"
+0x10de 0x0179 NV_17 "GeForce4 420 Go 32M"
+0x10de 0x017a NV_17 "Quadro4 200/400 NVS"
+0x10de 0x017b NV_17 "Quadro4 550 XGL"
+0x10de 0x017c NV_17 "Quadro4 500 GoGL"
+0x10de 0x017d NV_17 "GeForce4 410 Go 16M"
+0x10de 0x0181 NV_17 "GeForce4 MX 440 AGP 8x"
+0x10de 0x0182 NV_17 "GeForce4 MX 440SE AGP 8x"
+0x10de 0x0183 NV_17 "GeForce4 MX 420 AGP 8x"
+0x10de 0x0185 NV_17 "GeForce4 MX 4000 AGP 8x"
+0x10de 0x0186 NV_17 "GeForce4 448 Go"
+0x10de 0x0187 NV_17 "GeForce4 488 Go"
+0x10de 0x0188 NV_17 "Quadro4 580 XGL"
+0x10de 0x018a NV_17 "Quadro4 NVS AGP 8x"
+0x10de 0x018b NV_17 "Quadro4 380 XGL"
+0x10de 0x018c NV_17 "Quadro NVS 50 PCI"
+0x10de 0x018d NV_17 "GeForce4 448 Go"
+0x10de 0x0191 NV_50 "GeForce 8800 GTX"
+0x10de 0x0193 NV_50 "GeForce 8800 GTS"
+0x10de 0x01a0 NV_11|NV_NFORCE "GeForce2 MX Integrated Graphics"
+0x10de 0x01d1 NV_44 "GeForce 7300 LE"
+0x10de 0x01d6 NV_44 "GeForce Go 7200"
+0x10de 0x01d7 NV_44 "Quadro NVS 110M / GeForce Go 7300"
+0x10de 0x01d8 NV_44 "GeForce Go 7400"
+0x10de 0x01da NV_44 "Quadro NVS 110M"
+0x10de 0x01df NV_44 "GeForce 7300 GS"
+0x10de 0x01f0 NV_17|NV_NFORCE2 "GeForce4 MX - nForce GPU"
+0x10de 0x0200 NV_20 "GeForce3"
+0x10de 0x0201 NV_20 "GeForce3 Ti 200"
+0x10de 0x0202 NV_20 "GeForce3 Ti 500"
+0x10de 0x0203 NV_20 "Quadro DCC"
+0x10de 0x0211 NV_40 "GeForce 6800"
+0x10de 0x0212 NV_40 "GeForce 6800 LE"
+0x10de 0x0215 NV_40 "GeForce 6800 GT"
+0x10de 0x0218 NV_40 "GeForce 6800 XT"
+0x10de 0x0221 NV_44 "GeForce 6200"
+0x10de 0x0240 NV_44 "GeForce 6150"
+0x10de 0x0242 NV_44 "GeForce 6100"
+0x10de 0x0250 NV_25 "GeForce4 Ti 4600"
+0x10de 0x0251 NV_25 "GeForce4 Ti 4400"
+0x10de 0x0252 NV_25 "GeForce4 Ti"
+0x10de 0x0253 NV_25 "GeForce4 Ti 4200"
+0x10de 0x0258 NV_25 "Quadro4 900 XGL"
+0x10de 0x0259 NV_25 "Quadro4 750 XGL"
+0x10de 0x025b NV_25 "Quadro4 700 XGL"
+0x10de 0x0280 NV_25 "GeForce4 Ti 4800"
+0x10de 0x0281 NV_25 "GeForce4 Ti 4200 AGP 8x"
+0x10de 0x0282 NV_25 "GeForce4 Ti 4800 SE"
+0x10de 0x0286 NV_25 "GeForce4 Ti 4200 Go AGP 8x"
+0x10de 0x0288 NV_25 "Quadro4 980 XGL"
+0x10de 0x0289 NV_25 "Quadro4 780 XGL"
+0x10de 0x028c NV_25 "Quadro4 700 GoGL"
+0x10de 0x0290 NV_40 "GeForce 7900 GTX"
+0x10de 0x0291 NV_40 "GeForce 7900 GT"
+0x10de 0x0292 NV_40 "GeForce 7900 GS"
+0x10de 0x0298 NV_40 "GeForce Go 7900 GS"
+0x10de 0x0299 NV_40 "GeForce Go 7900 GTX"
+0x10de 0x029a NV_40 "Quadro FX 2500M"
+0x10de 0x029b NV_40 "Quadro FX 1500M"
+0x10de 0x029c NV_40 "Quadro FX 5500"
+0x10de 0x029d NV_40 "Quadro FX 3500"
+0x10de 0x029e NV_40 "Quadro FX 1500"
+0x10de 0x029f NV_40 "Quadro FX 4500 X2"
+0x10de 0x02a0 NV_20 "XGPU"
+0x10de 0x02e1 NV_40 "GeForce 7600 GS"
+0x10de 0x0300 NV_30 "GeForce FX"
+0x10de 0x0301 NV_30 "GeForce FX 5800 Ultra"
+0x10de 0x0302 NV_30 "GeForce FX 5800"
+0x10de 0x0308 NV_30 "Quadro FX 2000"
+0x10de 0x0309 NV_30 "Quadro FX 1000"
+0x10de 0x0311 NV_30 "GeForce FX 5600 Ultra"
+0x10de 0x0312 NV_30 "GeForce FX 5600"
+0x10de 0x0313 NV_30 "NV31"
+0x10de 0x0314 NV_30 "GeForce FX 5600XT"
+0x10de 0x0316 NV_30 "NV31M"
+0x10de 0x0317 NV_30 "NV31M Pro"
+0x10de 0x031a NV_30 "GeForce FX Go5600"
+0x10de 0x031b NV_30 "GeForce FX Go5650"
+0x10de 0x031d NV_30 "NV31GLM"
+0x10de 0x031e NV_30 "NV31GLM Pro"
+0x10de 0x031f NV_30 "NV31GLM Pro"
+0x10de 0x0320 NV_34 "GeForce FX 5200"
+0x10de 0x0321 NV_34 "GeForce FX 5200 Ultra"
+0x10de 0x0322 NV_34 "GeForce FX 5200"
+0x10de 0x0323 NV_34 "GeForce FX 5200LE"
+0x10de 0x0324 NV_34 "GeForce FX Go5200"
+0x10de 0x0325 NV_34 "GeForce FX Go5250"
+0x10de 0x0326 NV_34 "GeForce FX 5500"
+0x10de 0x0327 NV_34 "GeForce FX 5100"
+0x10de 0x0328 NV_34 "GeForce FX Go5200 32M/64M"
+0x10de 0x0329 NV_34 "GeForce FX Go5200"
+0x10de 0x032a NV_34 "Quadro NVS 280 PCI"
+0x10de 0x032b NV_34 "Quadro FX 500/600 PCI"
+0x10de 0x032c NV_34 "GeForce FX Go 5300"
+0x10de 0x032d NV_34 "GeForce FX Go5100"
+0x10de 0x032f NV_34 "NV34GL"
+0x10de 0x0330 NV_30 "GeForce FX 5900 Ultra"
+0x10de 0x0331 NV_30 "GeForce FX 5900"
+0x10de 0x0332 NV_30 "GeForce FX 5900XT"
+0x10de 0x0333 NV_30 "GeForce FX 5950 Ultra"
+0x10de 0x0334 NV_30 "GeForce FX 5900ZT"
+0x10de 0x0338 NV_30 "Quadro FX 3000"
+0x10de 0x033f NV_30 "Quadro FX 700"
+0x10de 0x0341 NV_30 "GeForce FX 5700 Ultra"
+0x10de 0x0342 NV_30 "GeForce FX 5700"
+0x10de 0x0343 NV_30 "GeForce FX 5700LE"
+0x10de 0x0344 NV_30 "GeForce FX 5700VE"
+0x10de 0x0345 NV_30 "NV36.5"
+0x10de 0x0347 NV_30 "GeForce FX Go5700"
+0x10de 0x0348 NV_30 "GeForce FX Go5700"
+0x10de 0x0349 NV_30 "NV36M Pro"
+0x10de 0x034b NV_30 "NV36MAP"
+0x10de 0x034c NV_30 "Quadro FX Go1000"
+0x10de 0x034e NV_30 "Quadro FX 1100"
+0x10de 0x034f NV_30 "NV36GL"
+0x10de 0x0391 NV_40 "GeForce 7600 GT"
+0x10de 0x0392 NV_40 "GeForce 7600 GS"
+0x10de 0x0393 NV_40 "GeForce 7300 GT"
+0x10de 0x0398 NV_40 "GeForce Go 7600"
+0x10de 0x03d0 NV_44 "GeForce 6100 nForce 430"
+0x10de 0x03d1 NV_44 "GeForce 6100 nForce 405"
+0x10de 0x03d2 NV_44 "GeForce 6100 nForce 400"
+0x10de 0x03d5 NV_44 "GeForce 6100 nForce 420"
+0x12d2 0x0008 NV_03 "NV1"
+0x12d2 0x0009 NV_03 "DAC64"
+0x12d2 0x0018 NV_03 "Riva128"
+0x12d2 0x0019 NV_03 "Riva128ZX"
+0x12d2 0x0020 NV_04 "TNT"
+0x12d2 0x0028 NV_04 "TNT2"
+0x12d2 0x0029 NV_04 "UTNT2"
+0x12d2 0x002c NV_04 "VTNT2"
+0x12d2 0x00a0 NV_04 "ITNT2"
+
diff --git a/shared-core/drm_sarea.h b/shared-core/drm_sarea.h
index 0d5baf69..43d1114f 100644
--- a/shared-core/drm_sarea.h
+++ b/shared-core/drm_sarea.h
@@ -41,7 +41,7 @@
#define SAREA_MAX 0x10000 /* 64kB */
#else
/* Intel 830M driver needs at least 8k SAREA */
-#define SAREA_MAX 0x2000
+#define SAREA_MAX 0x2000UL
#endif
/** Maximum number of drawables in the SAREA */
diff --git a/shared-core/nouveau_drm.h b/shared-core/nouveau_drm.h
new file mode 100644
index 00000000..3f363192
--- /dev/null
+++ b/shared-core/nouveau_drm.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright 2005 Stephane Marchesin.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NOUVEAU_DRM_H__
+#define __NOUVEAU_DRM_H__
+
+typedef struct drm_nouveau_fifo_alloc {
+ int channel;
+ uint32_t put_base;
+ /* FIFO control regs */
+ drm_handle_t ctrl;
+ int ctrl_size;
+ /* DMA command buffer */
+ drm_handle_t cmdbuf;
+ int cmdbuf_size;
+}
+drm_nouveau_fifo_alloc_t;
+
+#define NV_DMA_CONTEXT_FLAGS_PATCH_ROP_AND 0x1
+#define NV_DMA_CONTEXT_FLAGS_PATCH_SRCCOPY 0x2
+#define NV_DMA_CONTEXT_FLAGS_CLIP_ENABLE 0x4
+#define NV_DMA_CONTEXT_FLAGS_MONO 0x8
+
+typedef struct drm_nouveau_object_init {
+ uint32_t handle;
+ int class;
+ uint32_t flags;
+ /* these are object handles */
+ uint32_t dma0;
+ uint32_t dma1;
+ uint32_t dma_notifier;
+}
+drm_nouveau_object_init_t;
+
+typedef struct drm_nouveau_dma_object_init {
+ uint32_t handle;
+ int access;
+ int target;
+ uint32_t offset;
+ int size;
+}
+drm_nouveau_dma_object_init_t;
+
+#define NOUVEAU_MEM_FB 0x00000001
+#define NOUVEAU_MEM_AGP 0x00000002
+#define NOUVEAU_MEM_FB_ACCEPTABLE 0x00000004
+#define NOUVEAU_MEM_AGP_ACCEPTABLE 0x00000008
+#define NOUVEAU_MEM_PINNED 0x00000010
+#define NOUVEAU_MEM_USER_BACKED 0x00000020
+#define NOUVEAU_MEM_MAPPED 0x00000040
+#define NOUVEAU_MEM_INSTANCE 0x00000080 /* internal */
+
+typedef struct drm_nouveau_mem_alloc {
+ int flags;
+ int alignment;
+ uint64_t size; // in bytes
+ uint64_t region_offset;
+}
+drm_nouveau_mem_alloc_t;
+
+typedef struct drm_nouveau_mem_free {
+ int flags;
+ uint64_t region_offset;
+}
+drm_nouveau_mem_free_t;
+
+/* FIXME : maybe unify {GET,SET}PARAMs */
+#define NOUVEAU_GETPARAM_PCI_VENDOR 3
+#define NOUVEAU_GETPARAM_PCI_DEVICE 4
+#define NOUVEAU_GETPARAM_BUS_TYPE 5
+#define NOUVEAU_GETPARAM_FB_PHYSICAL 6
+#define NOUVEAU_GETPARAM_AGP_PHYSICAL 7
+typedef struct drm_nouveau_getparam {
+ unsigned int param;
+ uint64_t value;
+}
+drm_nouveau_getparam_t;
+
+#define NOUVEAU_SETPARAM_CMDBUF_LOCATION 1
+#define NOUVEAU_SETPARAM_CMDBUF_SIZE 2
+typedef struct drm_nouveau_setparam {
+ unsigned int param;
+ unsigned int value;
+}
+drm_nouveau_setparam_t;
+
+enum nouveau_card_type {
+ NV_UNKNOWN =0,
+ NV_01 =1,
+ NV_03 =3,
+ NV_04 =4,
+ NV_05 =5,
+ NV_10 =10,
+ NV_11 =10,
+ NV_15 =10,
+ NV_17 =10,
+ NV_20 =20,
+ NV_25 =20,
+ NV_30 =30,
+ NV_34 =30,
+ NV_40 =40,
+ NV_44 =44,
+ NV_50 =50,
+ NV_LAST =0xffff,
+};
+
+enum nouveau_bus_type {
+ NV_AGP =0,
+ NV_PCI =1,
+ NV_PCIE =2,
+};
+
+#define NOUVEAU_MAX_SAREA_CLIPRECTS 16
+
+typedef struct drm_nouveau_sarea {
+ /* the cliprects */
+ drm_clip_rect_t boxes[NOUVEAU_MAX_SAREA_CLIPRECTS];
+ unsigned int nbox;
+}
+drm_nouveau_sarea_t;
+
+#define DRM_NOUVEAU_FIFO_ALLOC 0x00
+#define DRM_NOUVEAU_OBJECT_INIT 0x01
+#define DRM_NOUVEAU_DMA_OBJECT_INIT 0x02 // We don't want this eventually..
+#define DRM_NOUVEAU_MEM_ALLOC 0x03
+#define DRM_NOUVEAU_MEM_FREE 0x04
+#define DRM_NOUVEAU_GETPARAM 0x05
+#define DRM_NOUVEAU_SETPARAM 0x06
+
+#endif /* __NOUVEAU_DRM_H__ */
+
diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h
new file mode 100644
index 00000000..4dff0c59
--- /dev/null
+++ b/shared-core/nouveau_drv.h
@@ -0,0 +1,198 @@
+/*
+ * Copyright 2005 Stephane Marchesin.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NOUVEAU_DRV_H__
+#define __NOUVEAU_DRV_H__
+
+#define DRIVER_AUTHOR "Stephane Marchesin"
+#define DRIVER_EMAIL "dri-devel@lists.sourceforge.net"
+
+#define DRIVER_NAME "nouveau"
+#define DRIVER_DESC "nVidia Riva/TNT/GeForce"
+#define DRIVER_DATE "20060213"
+
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 1
+
+#define NOUVEAU_FAMILY 0x0000FFFF
+#define NOUVEAU_FLAGS 0xFFFF0000
+
+#include "nouveau_drm.h"
+#include "nouveau_reg.h"
+
+enum nouveau_flags {
+ NV_NFORCE =0x10000000,
+ NV_NFORCE2 =0x20000000
+};
+
+struct nouveau_object
+{
+ struct nouveau_object *next;
+ struct nouveau_object *prev;
+
+ struct mem_block *instance;
+ uint32_t ht_loc;
+
+ uint32_t handle;
+ int class;
+ int engine;
+};
+
+#define NV_DMA_TARGET_VIDMEM 0
+#define NV_DMA_TARGET_PCI 2
+#define NV_DMA_TARGET_AGP 3
+struct nouveau_fifo
+{
+ int used;
+ /* owner of this fifo */
+ DRMFILE filp;
+ /* mapping of the fifo itself */
+ drm_local_map_t *map;
+ /* mapping of the regs controling the fifo */
+ drm_local_map_t *regs;
+ /* dma object for the command buffer itself */
+ struct nouveau_object *cmdbuf_obj;
+ /* objects belonging to this fifo */
+ struct nouveau_object *objs;
+};
+
+struct mem_block {
+ struct mem_block *next;
+ struct mem_block *prev;
+ uint64_t start;
+ uint64_t size;
+ DRMFILE filp; /* 0: free, -1: heap, other: real files */
+ int flags;
+ drm_local_map_t *map;
+};
+
+struct nouveau_config {
+ struct {
+ int location;
+ int size;
+ } cmdbuf;
+};
+
+typedef struct drm_nouveau_private {
+ /* the card type, takes NV_* as values */
+ int card_type;
+ int flags;
+
+ drm_local_map_t *mmio;
+ drm_local_map_t *fb;
+
+ //TODO: Remove me, I'm bogus :)
+ int cur_fifo;
+
+ struct nouveau_object *fb_obj;
+ int cmdbuf_ch_size;
+ struct mem_block* cmdbuf_alloc;
+
+ int fifo_alloc_count;
+ struct nouveau_fifo fifos[NV_MAX_FIFO_NUMBER];
+
+ /* RAMFC and RAMRO offsets */
+ uint32_t ramht_offset;
+ uint32_t ramht_size;
+ uint32_t ramht_bits;
+ uint32_t ramfc_offset;
+ uint32_t ramfc_size;
+ uint32_t ramro_offset;
+ uint32_t ramro_size;
+
+ /* base physical adresses */
+ uint64_t fb_phys;
+ uint64_t agp_phys;
+
+ struct mem_block *agp_heap;
+ struct mem_block *fb_heap;
+ struct mem_block *fb_nomap_heap;
+ struct mem_block *ramin_heap;
+
+ struct nouveau_config config;
+}
+drm_nouveau_private_t;
+
+/* nouveau_state.c */
+extern void nouveau_preclose(drm_device_t * dev, DRMFILE filp);
+extern int nouveau_load(struct drm_device *dev, unsigned long flags);
+extern int nouveau_firstopen(struct drm_device *dev);
+extern int nouveau_unload(struct drm_device *dev);
+extern int nouveau_ioctl_getparam(DRM_IOCTL_ARGS);
+extern int nouveau_ioctl_setparam(DRM_IOCTL_ARGS);
+extern void nouveau_wait_for_idle(struct drm_device *dev);
+
+/* nouveau_mem.c */
+extern uint64_t nouveau_mem_fb_amount(struct drm_device *dev);
+extern void nouveau_mem_release(DRMFILE filp, struct mem_block *heap);
+extern int nouveau_ioctl_mem_alloc(DRM_IOCTL_ARGS);
+extern int nouveau_ioctl_mem_free(DRM_IOCTL_ARGS);
+extern struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment, uint64_t size, int flags, DRMFILE filp);
+extern void nouveau_mem_free(struct drm_device* dev, struct mem_block*);
+extern int nouveau_mem_init(struct drm_device *dev);
+extern void nouveau_mem_close(struct drm_device *dev);
+extern int nouveau_instmem_init(struct drm_device *dev,
+ uint32_t offset, uint32_t size);
+extern struct mem_block* nouveau_instmem_alloc(struct drm_device *dev,
+ uint32_t size, uint32_t align);
+extern void nouveau_instmem_free(struct drm_device *dev,
+ struct mem_block *block);
+
+/* nouveau_fifo.c */
+extern int nouveau_fifo_init(drm_device_t *dev);
+extern int nouveau_fifo_number(drm_device_t *dev);
+extern void nouveau_fifo_cleanup(drm_device_t *dev, DRMFILE filp);
+extern int nouveau_fifo_id_get(drm_device_t *dev, DRMFILE filp);
+
+/* nouveau_object.c */
+extern void nouveau_object_cleanup(drm_device_t *dev, DRMFILE filp);
+extern struct nouveau_object *nouveau_dma_object_create(drm_device_t *dev,
+ uint32_t offset, uint32_t size, int access, uint32_t target);
+extern int nouveau_ioctl_object_init(DRM_IOCTL_ARGS);
+extern int nouveau_ioctl_dma_object_init(DRM_IOCTL_ARGS);
+extern uint32_t nouveau_chip_instance_get(drm_device_t *dev, struct mem_block *mem);
+
+/* nouveau_irq.c */
+extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS);
+extern void nouveau_irq_preinstall(drm_device_t*);
+extern void nouveau_irq_postinstall(drm_device_t*);
+extern void nouveau_irq_uninstall(drm_device_t*);
+
+extern long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg);
+
+#if defined(__powerpc__)
+#define NV_READ(reg) in_be32((void __iomem *)(dev_priv->mmio)->handle + (reg) )
+#define NV_WRITE(reg,val) out_be32((void __iomem *)(dev_priv->mmio)->handle + (reg) , (val) )
+#else
+#define NV_READ(reg) DRM_READ32( dev_priv->mmio, (reg) )
+#define NV_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) )
+#endif
+
+#define INSTANCE_WR(mem,ofs,val) NV_WRITE(NV_RAMIN+(uint32_t)(mem)->start+((ofs)<<2),(val))
+#define INSTANCE_RD(mem,ofs) NV_READ(NV_RAMIN+(uint32_t)(mem)->start+((ofs)<<2))
+
+#endif /* __NOUVEAU_DRV_H__ */
+
diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c
new file mode 100644
index 00000000..a611e438
--- /dev/null
+++ b/shared-core/nouveau_fifo.c
@@ -0,0 +1,658 @@
+/*
+ * Copyright 2005-2006 Stephane Marchesin
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+
+/* returns the number of hw fifos */
+int nouveau_fifo_number(drm_device_t* dev)
+{
+ drm_nouveau_private_t *dev_priv=dev->dev_private;
+ switch(dev_priv->card_type)
+ {
+ case NV_03:
+ return 8;
+ case NV_04:
+ case NV_05:
+ return 16;
+ default:
+ return 32;
+ }
+}
+
+/* returns the size of fifo context */
+static int nouveau_fifo_ctx_size(drm_device_t* dev)
+{
+ drm_nouveau_private_t *dev_priv=dev->dev_private;
+
+ if (dev_priv->card_type >= NV_40)
+ return 128;
+ else if (dev_priv->card_type >= NV_10)
+ return 64;
+ else
+ return 32;
+}
+
+/***********************************
+ * functions doing the actual work
+ ***********************************/
+
+/* voir nv_xaa.c : NVResetGraphics
+ * mémoire mappée par nv_driver.c : NVMapMem
+ * voir nv_driver.c : NVPreInit
+ */
+
+static int nouveau_fifo_instmem_configure(drm_device_t *dev)
+{
+ drm_nouveau_private_t *dev_priv = dev->dev_private;
+ uint32_t obj_base, obj_size;
+ int i;
+
+ /* Clear RAMIN */
+ for (i=0x00710000; i<0x00800000; i++)
+ NV_WRITE(i, 0x00000000);
+
+ /* FIFO hash table (RAMHT)
+ * use 4k hash table at RAMIN+0x10000
+ * TODO: extend the hash table
+ */
+ dev_priv->ramht_offset = 0x10000;
+ dev_priv->ramht_bits = 9;
+ dev_priv->ramht_size = (1 << dev_priv->ramht_bits);
+ NV_WRITE(NV_PFIFO_RAMHT,
+ (0x03 << 24) /* search 128 */ |
+ ((dev_priv->ramht_bits - 9) << 16) |
+ (dev_priv->ramht_offset >> 8)
+ );
+ DRM_DEBUG("RAMHT offset=0x%x, size=%d\n",
+ dev_priv->ramht_offset,
+ dev_priv->ramht_size);
+
+ /* FIFO runout table (RAMRO) - 512k at 0x11200 */
+ dev_priv->ramro_offset = 0x11200;
+ dev_priv->ramro_size = 512;
+ NV_WRITE(NV_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
+ DRM_DEBUG("RAMRO offset=0x%x, size=%d\n",
+ dev_priv->ramro_offset,
+ dev_priv->ramro_size);
+
+ /* FIFO context table (RAMFC)
+ * NV40 : Not sure exactly how to position RAMFC on some cards,
+ * 0x30002 seems to position it at RAMIN+0x20000 on these
+ * cards. RAMFC is 4kb (32 fifos, 128byte entries).
+ * Others: Position RAMFC at RAMIN+0x11400
+ */
+ switch(dev_priv->card_type)
+ {
+ case NV_50:
+ case NV_40:
+ dev_priv->ramfc_offset = 0x20000;
+ dev_priv->ramfc_size = nouveau_fifo_number(dev) * nouveau_fifo_ctx_size(dev);
+ NV_WRITE(NV40_PFIFO_RAMFC, 0x30002);
+ break;
+ case NV_44:
+ dev_priv->ramfc_offset = 0x20000;
+ dev_priv->ramfc_size = nouveau_fifo_number(dev) * nouveau_fifo_ctx_size(dev);
+ NV_WRITE(NV40_PFIFO_RAMFC, ((nouveau_mem_fb_amount(dev)-512*1024+dev_priv->ramfc_offset)>>16) |
+ (2 << 16));
+ break;
+ case NV_30:
+ case NV_20:
+ case NV_10:
+ dev_priv->ramfc_offset = 0x11400;
+ dev_priv->ramfc_size = nouveau_fifo_number(dev) * nouveau_fifo_ctx_size(dev);
+ NV_WRITE(NV_PFIFO_RAMFC, (dev_priv->ramfc_offset>>8) |
+ (1 << 16) /* 64 Bytes entry*/);
+ break;
+ case NV_04:
+ case NV_03:
+ dev_priv->ramfc_offset = 0x11400;
+ dev_priv->ramfc_size = nouveau_fifo_number(dev) * nouveau_fifo_ctx_size(dev);
+ NV_WRITE(NV_PFIFO_RAMFC, dev_priv->ramfc_offset>>8);
+ break;
+ }
+ DRM_DEBUG("RAMFC offset=0x%x, size=%d\n",
+ dev_priv->ramfc_offset,
+ dev_priv->ramfc_size);
+
+ obj_base = dev_priv->ramfc_offset + dev_priv->ramfc_size;
+ obj_size = (512*1024) - obj_base; /*XXX: probably wrong on some cards*/
+ if (nouveau_instmem_init(dev, obj_base, obj_size))
+ return 1;
+ DRM_DEBUG("RAMIN object space: offset=0x%08x, size=%dKiB\n",
+ obj_base, obj_size>>10);
+
+ return 0;
+}
+
+int nouveau_fifo_init(drm_device_t *dev)
+{
+ drm_nouveau_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ NV_WRITE(NV_PFIFO_CACHES, 0x00000000);
+
+ ret = nouveau_fifo_instmem_configure(dev);
+ if (ret) {
+ DRM_ERROR("Failed to configure instance memory\n");
+ return ret;
+ }
+
+ /* FIXME remove all the stuff that's done in nouveau_fifo_alloc */
+
+ DRM_DEBUG("Setting defaults for remaining PFIFO regs\n");
+
+ /* All channels into PIO mode */
+ NV_WRITE(NV_PFIFO_MODE, 0x00000000);
+
+ NV_WRITE(NV_PFIFO_CACH1_PSH0, 0x00000000);
+ NV_WRITE(NV_PFIFO_CACH1_PUL0, 0x00000000);
+ /* Channel 0 active, PIO mode */
+ NV_WRITE(NV_PFIFO_CACH1_PSH1, 0x00000000);
+ /* PUT and GET to 0 */
+ NV_WRITE(NV_PFIFO_CACH1_DMAP, 0x00000000);
+ NV_WRITE(NV_PFIFO_CACH1_DMAP, 0x00000000);
+ /* No cmdbuf object */
+ NV_WRITE(NV_PFIFO_CACH1_DMAI, 0x00000000);
+ NV_WRITE(NV_PFIFO_CACH0_PSH0, 0x00000000);
+ NV_WRITE(NV_PFIFO_CACH0_PUL0, 0x00000000);
+ NV_WRITE(NV_PFIFO_SIZE, 0x0000FFFF);
+ NV_WRITE(NV_PFIFO_CACH1_HASH, 0x0000FFFF);
+ NV_WRITE(NV_PFIFO_CACH0_PUL1, 0x00000001);
+ NV_WRITE(NV_PFIFO_CACH1_DMAC, 0x00000000);
+ NV_WRITE(NV_PFIFO_CACH1_DMAS, 0x00000000);
+ NV_WRITE(NV_PFIFO_CACH1_ENG, 0x00000000);
+#ifdef __BIG_ENDIAN
+ NV_WRITE(NV_PFIFO_CACH1_DMAF, NV_PFIFO_CACH1_DMAF_TRIG_112_BYTES |
+ NV_PFIFO_CACH1_DMAF_SIZE_128_BYTES |
+ NV_PFIFO_CACH1_DMAF_MAX_REQS_4 |
+ NV_PFIFO_CACH1_BIG_ENDIAN);
+#else
+ NV_WRITE(NV_PFIFO_CACH1_DMAF, NV_PFIFO_CACH1_DMAF_TRIG_112_BYTES |
+ NV_PFIFO_CACH1_DMAF_SIZE_128_BYTES |
+ NV_PFIFO_CACH1_DMAF_MAX_REQS_4);
+#endif
+ NV_WRITE(NV_PFIFO_CACH1_DMAPSH, 0x00000001);
+ NV_WRITE(NV_PFIFO_CACH1_PSH0, 0x00000001);
+ NV_WRITE(NV_PFIFO_CACH1_PUL0, 0x00000001);
+ NV_WRITE(NV_PFIFO_CACH1_PUL1, 0x00000001);
+
+ NV_WRITE(NV_PGRAPH_CTX_USER, 0x0);
+ NV_WRITE(NV_PFIFO_DELAY_0, 0xff /* retrycount*/ );
+ if (dev_priv->card_type >= NV_40)
+ NV_WRITE(NV_PGRAPH_CTX_CONTROL, 0x00002001);
+ else
+ NV_WRITE(NV_PGRAPH_CTX_CONTROL, 0x10110000);
+
+ NV_WRITE(NV_PFIFO_DMA_TIMESLICE, 0x001fffff);
+ NV_WRITE(NV_PFIFO_CACHES, 0x00000001);
+
+ return 0;
+}
+
+static int nouveau_dma_init(struct drm_device *dev)
+{
+ drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct nouveau_config *config = &dev_priv->config;
+ struct mem_block *cb;
+ int cb_min_size = nouveau_fifo_number(dev) * max(NV03_FIFO_SIZE,PAGE_SIZE);
+
+ /* Defaults for unconfigured values */
+ if (!config->cmdbuf.location)
+ config->cmdbuf.location = NOUVEAU_MEM_FB;
+ if (!config->cmdbuf.size || config->cmdbuf.size < cb_min_size)
+ config->cmdbuf.size = cb_min_size;
+
+ cb = nouveau_mem_alloc(dev, 0, config->cmdbuf.size,
+ config->cmdbuf.location, (DRMFILE)-2);
+ /* Try defaults if that didn't succeed */
+ if (!cb) {
+ config->cmdbuf.location = NOUVEAU_MEM_FB;
+ config->cmdbuf.size = cb_min_size;
+ cb = nouveau_mem_alloc(dev, 0, config->cmdbuf.size,
+ config->cmdbuf.location, (DRMFILE)-2);
+ }
+ if (!cb) {
+ DRM_ERROR("Couldn't allocate DMA command buffer.\n");
+ return DRM_ERR(ENOMEM);
+ }
+
+ dev_priv->cmdbuf_ch_size = (uint32_t)cb->size / nouveau_fifo_number(dev);
+ dev_priv->cmdbuf_alloc = cb;
+
+ DRM_INFO("DMA command buffer is %dKiB at 0x%08x(%s)\n",
+ (uint32_t)cb->size>>10, (uint32_t)cb->start,
+ config->cmdbuf.location == NOUVEAU_MEM_FB ? "VRAM" : "AGP");
+ DRM_INFO("FIFO size is %dKiB\n", dev_priv->cmdbuf_ch_size>>10);
+
+ return 0;
+}
+
+static void nouveau_context_init(drm_device_t *dev,
+ drm_nouveau_fifo_alloc_t *init)
+{
+ drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct nouveau_object *cb_obj;
+ uint32_t ctx_addr, ctx_size = 32;
+ int i;
+
+ cb_obj = dev_priv->fifos[init->channel].cmdbuf_obj;
+
+ ctx_addr=NV_RAMIN+dev_priv->ramfc_offset+init->channel*ctx_size;
+ // clear the fifo context
+ for(i=0;i<ctx_size/4;i++)
+ NV_WRITE(ctx_addr+4*i,0x0);
+
+ NV_WRITE(ctx_addr,init->put_base);
+ NV_WRITE(ctx_addr+4,init->put_base);
+ // that's what is done in nvosdk, but that part of the code is buggy so...
+ NV_WRITE(ctx_addr+8, nouveau_chip_instance_get(dev, cb_obj->instance));
+#ifdef __BIG_ENDIAN
+ NV_WRITE(ctx_addr+16,NV_PFIFO_CACH1_DMAF_TRIG_112_BYTES|NV_PFIFO_CACH1_DMAF_SIZE_128_BYTES|NV_PFIFO_CACH1_DMAF_MAX_REQS_4|NV_PFIFO_CACH1_BIG_ENDIAN);
+#else
+ NV_WRITE(ctx_addr+16,NV_PFIFO_CACH1_DMAF_TRIG_112_BYTES|NV_PFIFO_CACH1_DMAF_SIZE_128_BYTES|NV_PFIFO_CACH1_DMAF_MAX_REQS_4);
+#endif
+}
+
+#define RAMFC_WR(offset, val) NV_WRITE(fifoctx + NV10_RAMFC_##offset, (val))
+static void nouveau_nv10_context_init(drm_device_t *dev,
+ drm_nouveau_fifo_alloc_t *init)
+{
+ drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct nouveau_object *cb_obj;
+ uint32_t fifoctx;
+ int i;
+
+ cb_obj = dev_priv->fifos[init->channel].cmdbuf_obj;
+ fifoctx = NV_RAMIN + dev_priv->ramfc_offset + init->channel*64;
+ for (i=0;i<64;i+=4)
+ NV_WRITE(fifoctx + i, 0);
+
+ /* Fill entries that are seen filled in dumps of nvidia driver just
+ * after channel's is put into DMA mode
+ */
+ RAMFC_WR(DMA_PUT , init->put_base);
+ RAMFC_WR(DMA_GET , init->put_base);
+ RAMFC_WR(DMA_INSTANCE , nouveau_chip_instance_get(dev,
+ cb_obj->instance));
+#ifdef __BIG_ENDIAN
+ RAMFC_WR(DMA_FETCH, NV_PFIFO_CACH1_DMAF_TRIG_112_BYTES |
+ NV_PFIFO_CACH1_DMAF_SIZE_128_BYTES |
+ NV_PFIFO_CACH1_DMAF_MAX_REQS_4 |
+ NV_PFIFO_CACH1_BIG_ENDIAN);
+#else
+ RAMFC_WR(DMA_FETCH, NV_PFIFO_CACH1_DMAF_TRIG_112_BYTES |
+ NV_PFIFO_CACH1_DMAF_SIZE_128_BYTES |
+ NV_PFIFO_CACH1_DMAF_MAX_REQS_4);
+#endif
+ RAMFC_WR(DMA_SUBROUTINE, 0);
+}
+
+static void nouveau_nv10_context_save(drm_device_t *dev)
+{
+ drm_nouveau_private_t *dev_priv = dev->dev_private;
+ uint32_t fifoctx;
+ int channel;
+
+ channel = NV_READ(NV_PFIFO_CACH1_PSH1) & (nouveau_fifo_number(dev)-1);
+ fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*64;
+
+ RAMFC_WR(DMA_PUT , NV_READ(NV_PFIFO_CACH1_DMAP));
+ RAMFC_WR(DMA_GET , NV_READ(NV_PFIFO_CACH1_DMAG));
+ RAMFC_WR(REF_CNT , NV_READ(NV_PFIFO_CACH1_REF_CNT));
+ RAMFC_WR(DMA_INSTANCE , NV_READ(NV_PFIFO_CACH1_DMAI));
+ RAMFC_WR(DMA_STATE , NV_READ(NV_PFIFO_CACH1_DMAS));
+ RAMFC_WR(DMA_FETCH , NV_READ(NV_PFIFO_CACH1_DMAF));
+ RAMFC_WR(ENGINE , NV_READ(NV_PFIFO_CACH1_ENG));
+ RAMFC_WR(PULL1_ENGINE , NV_READ(NV_PFIFO_CACH1_PUL1));
+ RAMFC_WR(ACQUIRE_VALUE , NV_READ(NV_PFIFO_CACH1_ACQUIRE_VALUE));
+ RAMFC_WR(ACQUIRE_TIMESTAMP, NV_READ(NV_PFIFO_CACH1_ACQUIRE_TIMESTAMP));
+ RAMFC_WR(ACQUIRE_TIMEOUT , NV_READ(NV_PFIFO_CACH1_ACQUIRE_TIMEOUT));
+ RAMFC_WR(SEMAPHORE , NV_READ(NV_PFIFO_CACH1_SEMAPHORE));
+ RAMFC_WR(DMA_SUBROUTINE , NV_READ(NV_PFIFO_CACH1_DMASR));
+}
+#undef RAMFC_WR
+
+#define RAMFC_WR(offset, val) NV_WRITE(fifoctx + NV40_RAMFC_##offset, (val))
+static void nouveau_nv40_context_init(drm_device_t *dev,
+ drm_nouveau_fifo_alloc_t *init)
+{
+ drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct nouveau_object *cb_obj;
+ uint32_t fifoctx;
+ int i;
+
+ cb_obj = dev_priv->fifos[init->channel].cmdbuf_obj;
+ fifoctx = NV_RAMIN + dev_priv->ramfc_offset + init->channel*128;
+ for (i=0;i<128;i+=4)
+ NV_WRITE(fifoctx + i, 0);
+
+ /* Fill entries that are seen filled in dumps of nvidia driver just
+ * after channel's is put into DMA mode
+ */
+ RAMFC_WR(DMA_PUT , init->put_base);
+ RAMFC_WR(DMA_GET , init->put_base);
+ RAMFC_WR(DMA_INSTANCE , nouveau_chip_instance_get(dev,
+ cb_obj->instance));
+ RAMFC_WR(DMA_FETCH , 0x30086078);
+ RAMFC_WR(DMA_SUBROUTINE, init->put_base);
+ RAMFC_WR(GRCTX_INSTANCE, 0); /* XXX */
+ RAMFC_WR(DMA_TIMESLICE , 0x0001FFFF);
+}
+
+static void nouveau_nv40_context_save(drm_device_t *dev)
+{
+ drm_nouveau_private_t *dev_priv = dev->dev_private;
+ uint32_t fifoctx;
+ int channel;
+
+ channel = NV_READ(NV_PFIFO_CACH1_PSH1) & (nouveau_fifo_number(dev)-1);
+ fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*128;
+
+ RAMFC_WR(DMA_PUT , NV_READ(NV_PFIFO_CACH1_DMAP));
+ RAMFC_WR(DMA_GET , NV_READ(NV_PFIFO_CACH1_DMAG));
+ RAMFC_WR(REF_CNT , NV_READ(NV_PFIFO_CACH1_REF_CNT));
+ RAMFC_WR(DMA_INSTANCE , NV_READ(NV_PFIFO_CACH1_DMAI));
+ RAMFC_WR(DMA_DCOUNT , NV_READ(NV_PFIFO_CACH1_DMA_DCOUNT));
+ RAMFC_WR(DMA_STATE , NV_READ(NV_PFIFO_CACH1_DMAS));
+ //fetch
+ RAMFC_WR(ENGINE , NV_READ(NV_PFIFO_CACH1_ENG));
+ RAMFC_WR(PULL1_ENGINE , NV_READ(NV_PFIFO_CACH1_PUL1));
+ RAMFC_WR(ACQUIRE_VALUE , NV_READ(NV_PFIFO_CACH1_ACQUIRE_VALUE));
+ RAMFC_WR(ACQUIRE_TIMESTAMP, NV_READ(NV_PFIFO_CACH1_ACQUIRE_TIMESTAMP));
+ RAMFC_WR(ACQUIRE_TIMEOUT , NV_READ(NV_PFIFO_CACH1_ACQUIRE_TIMEOUT));
+ RAMFC_WR(SEMAPHORE , NV_READ(NV_PFIFO_CACH1_SEMAPHORE));
+ RAMFC_WR(DMA_SUBROUTINE , NV_READ(NV_PFIFO_CACH1_DMAG));
+ RAMFC_WR(GRCTX_INSTANCE , NV_READ(NV40_PFIFO_GRCTX_INSTANCE));
+ RAMFC_WR(DMA_TIMESLICE , NV_READ(NV_PFIFO_DMA_TIMESLICE) & 0x1FFFF);
+ RAMFC_WR(UNK_40 , NV_READ(NV40_PFIFO_UNK32E4));
+}
+#undef RAMFC_WR
+
+/* allocates and initializes a fifo for user space consumption */
+static int nouveau_fifo_alloc(drm_device_t* dev,drm_nouveau_fifo_alloc_t* init, DRMFILE filp)
+{
+ int i;
+ int ret;
+ drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct nouveau_object *cb_obj;
+
+ /* Init cmdbuf on first FIFO init, this is delayed until now to
+ * give the ddx a chance to configure the cmdbuf with SETPARAM
+ */
+ if (!dev_priv->cmdbuf_alloc) {
+ ret = nouveau_dma_init(dev);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Alright, here is the full story
+ * Nvidia cards have multiple hw fifo contexts (praise them for that,
+ * no complicated crash-prone context switches)
+ * We allocate a new context for each app and let it write to it directly
+ * (woo, full userspace command submission !)
+ * When there are no more contexts, you lost
+ */
+ for(i=0;i<nouveau_fifo_number(dev);i++)
+ if (dev_priv->fifos[i].used==0)
+ break;
+
+ DRM_INFO("Allocating FIFO number %d\n", i);
+ /* no more fifos. you lost. */
+ if (i==nouveau_fifo_number(dev))
+ return DRM_ERR(EINVAL);
+
+ /* allocate a dma object for the command buffer */
+ if (dev_priv->cmdbuf_alloc->flags & NOUVEAU_MEM_AGP) {
+ cb_obj = nouveau_dma_object_create(dev,
+ dev_priv->cmdbuf_alloc->start,
+ dev_priv->cmdbuf_alloc->size,
+ NV_DMA_ACCESS_RO,
+ NV_DMA_TARGET_AGP);
+
+ } else if (dev_priv->card_type != NV_04) {
+ cb_obj = nouveau_dma_object_create(dev,
+ dev_priv->cmdbuf_alloc->start -
+ drm_get_resource_start(dev, 1),
+ dev_priv->cmdbuf_alloc->size,
+ NV_DMA_ACCESS_RO,
+ NV_DMA_TARGET_VIDMEM);
+ } else {
+ /* NV04 cmdbuf hack, from original ddx.. not sure of it's
+ * exact reason for existing :) PCI access to cmdbuf in
+ * VRAM.
+ */
+ cb_obj = nouveau_dma_object_create(dev,
+ dev_priv->cmdbuf_alloc->start,
+ dev_priv->cmdbuf_alloc->size,
+ NV_DMA_ACCESS_RO,
+ NV_DMA_TARGET_PCI);
+ }
+ if (!cb_obj) {
+ DRM_ERROR("unable to alloc object for command buffer\n");
+ return DRM_ERR(EINVAL);
+ }
+ dev_priv->fifos[i].cmdbuf_obj = cb_obj;
+
+ /* that fifo is used */
+ dev_priv->fifos[i].used=1;
+ dev_priv->fifos[i].filp=filp;
+
+ init->channel = i;
+ init->put_base = i*dev_priv->cmdbuf_ch_size;
+ dev_priv->cur_fifo = init->channel;
+
+ nouveau_wait_for_idle(dev);
+
+ /* disable the fifo caches */
+ NV_WRITE(NV_PFIFO_CACHES, 0x00000000);
+ NV_WRITE(NV_PFIFO_CACH1_DMAPSH, NV_READ(NV_PFIFO_CACH1_DMAPSH)&(~0x1));
+ NV_WRITE(NV_PFIFO_CACH1_PSH0, 0x00000000);
+ NV_WRITE(NV_PFIFO_CACH1_PUL0, 0x00000000);
+
+ /* Save current channel's state to it's RAMFC entry.
+ *
+ * Then, construct inital RAMFC for new channel, I'm not entirely
+ * sure this is needed if we activate the channel immediately.
+ * My understanding is that the GPU will fill RAMFC itself when
+ * it switches away from the channel
+ */
+ if (dev_priv->card_type < NV_10) {
+ nouveau_context_init(dev, init);
+ } else if (dev_priv->card_type < NV_40) {
+ nouveau_nv10_context_save(dev);
+ nouveau_nv10_context_init(dev, init);
+ } else {
+ nouveau_nv40_context_save(dev);
+ nouveau_nv40_context_init(dev, init);
+ }
+
+ /* enable the fifo dma operation */
+ NV_WRITE(NV_PFIFO_MODE,NV_READ(NV_PFIFO_MODE)|(1<<init->channel));
+
+ NV_WRITE(NV03_FIFO_REGS_DMAPUT(init->channel), init->put_base);
+ NV_WRITE(NV03_FIFO_REGS_DMAGET(init->channel), init->put_base);
+
+ // FIXME check if we need to refill the time quota with something like NV_WRITE(0x204C, 0x0003FFFF);
+
+ if (dev_priv->card_type >= NV_40)
+ NV_WRITE(NV_PFIFO_CACH1_PSH1, 0x00010000|dev_priv->cur_fifo);
+ else
+ NV_WRITE(NV_PFIFO_CACH1_PSH1, 0x00000100|dev_priv->cur_fifo);
+
+ NV_WRITE(NV_PFIFO_CACH1_DMAP, init->put_base);
+ NV_WRITE(NV_PFIFO_CACH1_DMAG, init->put_base);
+ NV_WRITE(NV_PFIFO_CACH1_DMAI,
+ nouveau_chip_instance_get(dev, cb_obj->instance));
+ NV_WRITE(NV_PFIFO_SIZE , 0x0000FFFF);
+ NV_WRITE(NV_PFIFO_CACH1_HASH, 0x0000FFFF);
+
+ NV_WRITE(NV_PFIFO_CACH0_PUL1, 0x00000001);
+ NV_WRITE(NV_PFIFO_CACH1_DMAC, 0x00000000);
+ NV_WRITE(NV_PFIFO_CACH1_DMAS, 0x00000000);
+ NV_WRITE(NV_PFIFO_CACH1_ENG, 0x00000000);
+#ifdef __BIG_ENDIAN
+ NV_WRITE(NV_PFIFO_CACH1_DMAF, NV_PFIFO_CACH1_DMAF_TRIG_112_BYTES|NV_PFIFO_CACH1_DMAF_SIZE_128_BYTES|NV_PFIFO_CACH1_DMAF_MAX_REQS_4|NV_PFIFO_CACH1_BIG_ENDIAN);
+#else
+ NV_WRITE(NV_PFIFO_CACH1_DMAF, NV_PFIFO_CACH1_DMAF_TRIG_112_BYTES|NV_PFIFO_CACH1_DMAF_SIZE_128_BYTES|NV_PFIFO_CACH1_DMAF_MAX_REQS_4);
+#endif
+ NV_WRITE(NV_PFIFO_CACH1_DMAPSH, 0x00000001);
+ NV_WRITE(NV_PFIFO_CACH1_PSH0, 0x00000001);
+ NV_WRITE(NV_PFIFO_CACH1_PUL0, 0x00000001);
+ NV_WRITE(NV_PFIFO_CACH1_PUL1, 0x00000001);
+
+ /* reenable the fifo caches */
+ NV_WRITE(NV_PFIFO_CACHES, 0x00000001);
+
+ /* make the fifo available to user space */
+ /* first, the fifo control regs */
+ init->ctrl = dev_priv->mmio->offset + NV03_FIFO_REGS(init->channel);
+ init->ctrl_size = NV03_FIFO_REGS_SIZE;
+ ret = drm_addmap(dev, init->ctrl, init->ctrl_size, _DRM_REGISTERS,
+ 0, &dev_priv->fifos[init->channel].regs);
+ if (ret != 0)
+ return ret;
+
+ /* then, the fifo itself */
+ init->cmdbuf = dev_priv->cmdbuf_alloc->start;
+ init->cmdbuf += init->channel * dev_priv->cmdbuf_ch_size;
+ init->cmdbuf_size = dev_priv->cmdbuf_ch_size;
+ ret = drm_addmap(dev, init->cmdbuf, init->cmdbuf_size, _DRM_REGISTERS,
+ 0, &dev_priv->fifos[init->channel].map);
+ if (ret != 0)
+ return ret;
+
+ /* FIFO has no objects yet */
+ dev_priv->fifos[init->channel].objs = NULL;
+ dev_priv->fifo_alloc_count++;
+
+ DRM_INFO("%s: initialised FIFO %d\n", __func__, init->channel);
+ return 0;
+}
+
+/* stops a fifo */
+void nouveau_fifo_free(drm_device_t* dev,int n)
+{
+ drm_nouveau_private_t *dev_priv = dev->dev_private;
+ int i;
+ int ctx_size = nouveau_fifo_ctx_size(dev);
+
+ dev_priv->fifos[n].used=0;
+ DRM_INFO("%s: freeing fifo %d\n", __func__, n);
+
+ /* disable the fifo caches */
+ NV_WRITE(NV_PFIFO_CACHES, 0x00000000);
+
+ NV_WRITE(NV_PFIFO_MODE,NV_READ(NV_PFIFO_MODE)&~(1<<n));
+ // FIXME XXX needs more code
+
+ /* Clean RAMFC */
+ for (i=0;i<ctx_size;i+=4) {
+ DRM_DEBUG("RAMFC +%02x: 0x%08x\n", i, NV_READ(NV_RAMIN +
+ dev_priv->ramfc_offset + n*ctx_size + i));
+ NV_WRITE(NV_RAMIN + dev_priv->ramfc_offset + n*ctx_size + i, 0);
+ }
+
+ /* reenable the fifo caches */
+ NV_WRITE(NV_PFIFO_CACHES, 0x00000001);
+
+ dev_priv->fifo_alloc_count--;
+}
+
+/* cleanups all the fifos from filp */
+void nouveau_fifo_cleanup(drm_device_t* dev, DRMFILE filp)
+{
+ int i;
+ drm_nouveau_private_t *dev_priv = dev->dev_private;
+
+ DRM_DEBUG("clearing FIFO enables from filp\n");
+ for(i=0;i<nouveau_fifo_number(dev);i++)
+ if (dev_priv->fifos[i].used && dev_priv->fifos[i].filp==filp)
+ nouveau_fifo_free(dev,i);
+
+ /* check we still point at an active channel */
+ if (dev_priv->fifos[dev_priv->cur_fifo].used == 0) {
+ DRM_DEBUG("%s: cur_fifo is no longer owned.\n", __func__);
+ for (i=0;i<nouveau_fifo_number(dev);i++)
+ if (dev_priv->fifos[i].used) break;
+ if (i==nouveau_fifo_number(dev))
+ i=0;
+ DRM_DEBUG("%s: new cur_fifo is %d\n", __func__, i);
+ dev_priv->cur_fifo = i;
+ }
+
+/* if (dev_priv->cmdbuf_alloc)
+ nouveau_fifo_init(dev);*/
+}
+
+int nouveau_fifo_id_get(drm_device_t* dev, DRMFILE filp)
+{
+ drm_nouveau_private_t *dev_priv=dev->dev_private;
+ int i;
+
+ for(i=0;i<nouveau_fifo_number(dev);i++)
+ if (dev_priv->fifos[i].used && dev_priv->fifos[i].filp == filp)
+ return i;
+ return -1;
+}
+
+/***********************************
+ * ioctls wrapping the functions
+ ***********************************/
+
+static int nouveau_ioctl_fifo_alloc(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_nouveau_fifo_alloc_t init;
+ int res;
+ DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_fifo_alloc_t __user *) data, sizeof(init));
+
+ res=nouveau_fifo_alloc(dev,&init,filp);
+ if (!res)
+ DRM_COPY_TO_USER_IOCTL((drm_nouveau_fifo_alloc_t __user *)data, init, sizeof(init));
+
+ return res;
+}
+
+/***********************************
+ * finally, the ioctl table
+ ***********************************/
+
+drm_ioctl_desc_t nouveau_ioctls[] = {
+ [DRM_IOCTL_NR(DRM_NOUVEAU_FIFO_ALLOC)] = {nouveau_ioctl_fifo_alloc, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_NOUVEAU_OBJECT_INIT)] = {nouveau_ioctl_object_init, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_NOUVEAU_DMA_OBJECT_INIT)] = {nouveau_ioctl_dma_object_init, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_NOUVEAU_MEM_ALLOC)] = {nouveau_ioctl_mem_alloc, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_NOUVEAU_MEM_FREE)] = {nouveau_ioctl_mem_free, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_NOUVEAU_GETPARAM)] = {nouveau_ioctl_getparam, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_NOUVEAU_SETPARAM)] = {nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+};
+
+int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
+
+
diff --git a/shared-core/nouveau_irq.c b/shared-core/nouveau_irq.c
new file mode 100644
index 00000000..160016ea
--- /dev/null
+++ b/shared-core/nouveau_irq.c
@@ -0,0 +1,411 @@
+/*
+ * Copyright (C) 2006 Ben Skeggs.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ * Ben Skeggs <darktama@iinet.net.au>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_reg.h"
+
+void nouveau_irq_preinstall(drm_device_t *dev)
+{
+ drm_nouveau_private_t *dev_priv = dev->dev_private;
+
+ DRM_DEBUG("IRQ: preinst\n");
+
+ /* Disable/Clear PFIFO interrupts */
+ NV_WRITE(NV_PFIFO_INTEN, 0);
+ NV_WRITE(NV_PFIFO_INTSTAT, 0xFFFFFFFF);
+ /* Disable/Clear PGRAPH interrupts */
+ if (dev_priv->card_type<NV_40)
+ NV_WRITE(NV04_PGRAPH_INTEN, 0);
+ else
+ NV_WRITE(NV40_PGRAPH_INTEN, 0);
+ NV_WRITE(NV_PGRAPH_INTSTAT, 0xFFFFFFFF);
+#if 0
+ /* Disable/Clear CRTC0/1 interrupts */
+ NV_WRITE(NV_CRTC0_INTEN, 0);
+ NV_WRITE(NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
+ NV_WRITE(NV_CRTC1_INTEN, 0);
+ NV_WRITE(NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
+#endif
+ /* Master disable */
+ NV_WRITE(NV_PMC_INTEN, 0);
+}
+
+void nouveau_irq_postinstall(drm_device_t *dev)
+{
+ drm_nouveau_private_t *dev_priv = dev->dev_private;
+
+ DRM_DEBUG("IRQ: postinst\n");
+
+ /* Enable PFIFO error reporting */
+ NV_WRITE(NV_PFIFO_INTEN ,
+ NV_PFIFO_INTR_CACHE_ERROR |
+ NV_PFIFO_INTR_RUNOUT |
+ NV_PFIFO_INTR_RUNOUT_OVERFLOW |
+ NV_PFIFO_INTR_DMA_PUSHER |
+ NV_PFIFO_INTR_DMA_PT |
+ NV_PFIFO_INTR_SEMAPHORE |
+ NV_PFIFO_INTR_ACQUIRE_TIMEOUT
+ );
+ NV_WRITE(NV_PFIFO_INTSTAT, 0xFFFFFFFF);
+
+ /* Enable PGRAPH interrupts */
+ if (dev_priv->card_type<NV_40)
+ NV_WRITE(NV04_PGRAPH_INTEN,
+ NV_PGRAPH_INTR_NOTIFY |
+ NV_PGRAPH_INTR_MISSING_HW |
+ NV_PGRAPH_INTR_CONTEXT_SWITCH |
+ NV_PGRAPH_INTR_BUFFER_NOTIFY |
+ NV_PGRAPH_INTR_ERROR
+ );
+ else
+ NV_WRITE(NV40_PGRAPH_INTEN,
+ NV_PGRAPH_INTR_NOTIFY |
+ NV_PGRAPH_INTR_MISSING_HW |
+ NV_PGRAPH_INTR_CONTEXT_SWITCH |
+ NV_PGRAPH_INTR_BUFFER_NOTIFY |
+ NV_PGRAPH_INTR_ERROR
+ );
+ NV_WRITE(NV_PGRAPH_INTSTAT, 0xFFFFFFFF);
+
+#if 0
+ /* Enable CRTC0/1 interrupts */
+ NV_WRITE(NV_CRTC0_INTEN, NV_CRTC_INTR_VBLANK);
+ NV_WRITE(NV_CRTC1_INTEN, NV_CRTC_INTR_VBLANK);
+#endif
+
+ /* Master enable */
+ NV_WRITE(NV_PMC_INTEN, NV_PMC_INTEN_MASTER_ENABLE);
+}
+
+void nouveau_irq_uninstall(drm_device_t *dev)
+{
+ drm_nouveau_private_t *dev_priv = dev->dev_private;
+
+ DRM_DEBUG("IRQ: uninst\n");
+
+ /* Disable PFIFO interrupts */
+ NV_WRITE(NV_PFIFO_INTEN, 0);
+ /* Disable PGRAPH interrupts */
+ if (dev_priv->card_type<NV_40)
+ NV_WRITE(NV04_PGRAPH_INTEN, 0);
+ else
+ NV_WRITE(NV40_PGRAPH_INTEN, 0);
+#if 0
+ /* Disable CRTC0/1 interrupts */
+ NV_WRITE(NV_CRTC0_INTEN, 0);
+ NV_WRITE(NV_CRTC1_INTEN, 0);
+#endif
+ /* Master disable */
+ NV_WRITE(NV_PMC_INTEN, 0);
+}
+
+static void nouveau_fifo_irq_handler(drm_device_t *dev)
+{
+ uint32_t status, chmode, chstat, channel;
+ drm_nouveau_private_t *dev_priv = dev->dev_private;
+
+ status = NV_READ(NV_PFIFO_INTSTAT);
+ if (!status)
+ return;
+ chmode = NV_READ(NV_PFIFO_MODE);
+ chstat = NV_READ(NV_PFIFO_DMA);
+ channel=NV_READ(NV_PFIFO_CACH1_PSH1)&(nouveau_fifo_number(dev)-1);
+
+ DRM_DEBUG("NV: PFIFO interrupt! Channel=%d, INTSTAT=0x%08x/MODE=0x%08x/PEND=0x%08x\n", channel, status, chmode, chstat);
+
+ if (status & NV_PFIFO_INTR_CACHE_ERROR) {
+ uint32_t c1get, c1method, c1data;
+
+ DRM_ERROR("NV: PFIFO error interrupt\n");
+
+ c1get = NV_READ(NV_PFIFO_CACH1_GET) >> 2;
+ if (dev_priv->card_type < NV_40) {
+ /* Untested, so it may not work.. */
+ c1method = NV_READ(NV_PFIFO_CACH1_METHOD(c1get));
+ c1data = NV_READ(NV_PFIFO_CACH1_DATA(c1get));
+ } else {
+ c1method = NV_READ(NV40_PFIFO_CACH1_METHOD(c1get));
+ c1data = NV_READ(NV40_PFIFO_CACH1_DATA(c1get));
+ }
+
+ DRM_ERROR("NV: Channel %d/%d - Method 0x%04x, Data 0x%08x\n",
+ channel, (c1method >> 13) & 7,
+ c1method & 0x1ffc, c1data
+ );
+
+ status &= ~NV_PFIFO_INTR_CACHE_ERROR;
+ NV_WRITE(NV_PFIFO_INTSTAT, NV_PFIFO_INTR_CACHE_ERROR);
+ }
+
+ if (status & NV_PFIFO_INTR_DMA_PUSHER) {
+ DRM_INFO("NV: PFIFO DMA pusher interrupt\n");
+
+ status &= ~NV_PFIFO_INTR_DMA_PUSHER;
+ NV_WRITE(NV_PFIFO_INTSTAT, NV_PFIFO_INTR_DMA_PUSHER);
+
+ NV_WRITE(NV_PFIFO_CACH1_DMAS, 0x00000000);
+ if (NV_READ(NV_PFIFO_CACH1_DMAP)!=NV_READ(NV_PFIFO_CACH1_DMAG))
+ {
+ uint32_t getval=NV_READ(NV_PFIFO_CACH1_DMAG)+4;
+ NV_WRITE(NV_PFIFO_CACH1_DMAG,getval);
+ }
+ }
+
+ if (status) {
+ DRM_INFO("NV: unknown PFIFO interrupt. status=0x%08x\n", status);
+
+ NV_WRITE(NV_PFIFO_INTSTAT, status);
+ }
+
+ NV_WRITE(NV_PMC_INTSTAT, NV_PMC_INTSTAT_PFIFO_PENDING);
+}
+
+static void nouveau_nv04_context_switch(drm_device_t *dev)
+{
+ drm_nouveau_private_t *dev_priv = dev->dev_private;
+ uint32_t channel,i;
+ uint32_t max=0;
+ NV_WRITE(NV_PGRAPH_FIFO,0x0);
+ channel=NV_READ(NV_PFIFO_CACH1_PSH1)&(nouveau_fifo_number(dev)-1);
+ //DRM_INFO("raw PFIFO_CACH1_PHS1 reg is %x\n",NV_READ(NV_PFIFO_CACH1_PSH1));
+ //DRM_INFO("currently on channel %d\n",channel);
+ for (i=0;i<nouveau_fifo_number(dev);i++)
+ if ((dev_priv->fifos[i].used)&&(i!=channel)) {
+ uint32_t put,get,pending;
+ //put=NV_READ(dev_priv->ramfc_offset+i*32);
+ //get=NV_READ(dev_priv->ramfc_offset+4+i*32);
+ put=NV_READ(NV03_FIFO_REGS_DMAPUT(i));
+ get=NV_READ(NV03_FIFO_REGS_DMAGET(i));
+ pending=NV_READ(NV_PFIFO_DMA);
+ //DRM_INFO("Channel %d (put/get %x/%x)\n",i,put,get);
+ /* mark all pending channels as such */
+ if ((put!=get)&!(pending&(1<<i)))
+ {
+ pending|=(1<<i);
+ NV_WRITE(NV_PFIFO_DMA,pending);
+ }
+ max++;
+ }
+ nouveau_wait_for_idle(dev);
+
+#if 1
+ /* 2-channel commute */
+ // NV_WRITE(NV_PFIFO_CACH1_PSH1,channel|0x100);
+ if (channel==0)
+ channel=1;
+ else
+ channel=0;
+ // dev_priv->cur_fifo=channel;
+ NV_WRITE(0x2050,channel|0x100);
+#endif
+ //NV_WRITE(NV_PFIFO_CACH1_PSH1,max|0x100);
+ //NV_WRITE(0x2050,max|0x100);
+
+ NV_WRITE(NV_PGRAPH_FIFO,0x1);
+
+}
+
+static void nouveau_nv10_context_switch(drm_device_t *dev)
+{
+ drm_nouveau_private_t *dev_priv = dev->dev_private;
+ int channel;
+
+ channel=NV_READ(NV_PFIFO_CACH1_PSH1)&(nouveau_fifo_number(dev)-1);
+ /* 2-channel commute */
+// if (channel==0)
+// channel=1;
+// else
+// channel=0;
+// dev_priv->cur_fifo=channel;
+
+// NV_WRITE(NV_PGRAPH_CTX_CONTROL, 0x10000100);
+ NV_WRITE(NV_PGRAPH_CTX_USER, NV_READ(NV_PGRAPH_CTX_USER)|0x1F000000);
+// NV_WRITE(NV_PGRAPH_FFINTFC_ST2, NV_READ(NV_PGRAPH_FFINTFC_ST2)&0xCFFFFFFF);
+ /* touch PGRAPH_CTX_SWITCH* here ? */
+ NV_WRITE(NV_PGRAPH_CTX_CONTROL, 0x10000100);
+}
+
+static void nouveau_pgraph_irq_handler(drm_device_t *dev)
+{
+ uint32_t status;
+ drm_nouveau_private_t *dev_priv = dev->dev_private;
+
+ status = NV_READ(NV_PGRAPH_INTSTAT);
+ if (!status)
+ return;
+
+ if (status & NV_PGRAPH_INTR_NOTIFY) {
+ uint32_t nsource, nstatus, instance, notify;
+ DRM_DEBUG("NV: PGRAPH notify interrupt\n");
+
+ nstatus = NV_READ(0x00400104);
+ nsource = NV_READ(0x00400108);
+ DRM_DEBUG("nsource:0x%08x\tnstatus:0x%08x\n", nsource, nstatus);
+
+ instance = NV_READ(0x00400158);
+ notify = NV_READ(0x00400150) >> 16;
+ DRM_DEBUG("instance:0x%08x\tnotify:0x%08x\n", nsource, nstatus);
+
+ status &= ~NV_PGRAPH_INTR_NOTIFY;
+ NV_WRITE(NV_PGRAPH_INTSTAT, NV_PGRAPH_INTR_NOTIFY);
+ }
+
+ if (status & NV_PGRAPH_INTR_BUFFER_NOTIFY) {
+ uint32_t nsource, nstatus, instance, notify;
+ DRM_DEBUG("NV: PGRAPH buffer notify interrupt\n");
+
+ nstatus = NV_READ(0x00400104);
+ nsource = NV_READ(0x00400108);
+ DRM_DEBUG("nsource:0x%08x\tnstatus:0x%08x\n", nsource, nstatus);
+
+ instance = NV_READ(0x00400158);
+ notify = NV_READ(0x00400150) >> 16;
+ DRM_DEBUG("instance:0x%08x\tnotify:0x%08x\n", instance, notify);
+
+ status &= ~NV_PGRAPH_INTR_BUFFER_NOTIFY;
+ NV_WRITE(NV_PGRAPH_INTSTAT, NV_PGRAPH_INTR_BUFFER_NOTIFY);
+ }
+
+ if (status & NV_PGRAPH_INTR_MISSING_HW) {
+ DRM_ERROR("NV: PGRAPH missing hw interrupt\n");
+
+ status &= ~NV_PGRAPH_INTR_MISSING_HW;
+ NV_WRITE(NV_PGRAPH_INTSTAT, NV_PGRAPH_INTR_MISSING_HW);
+ }
+
+ if (status & NV_PGRAPH_INTR_ERROR) {
+ uint32_t nsource, nstatus, instance;
+ uint32_t address;
+ uint32_t channel;
+ uint32_t method, subc, data;
+
+ DRM_ERROR("NV: PGRAPH error interrupt\n");
+
+ nstatus = NV_READ(0x00400104);
+ nsource = NV_READ(0x00400108);
+ DRM_DEBUG("nsource:0x%08x\tnstatus:0x%08x\n", nsource, nstatus);
+
+ instance = NV_READ(0x00400158);
+ DRM_DEBUG("instance:0x%08x\n", instance);
+
+ address = NV_READ(0x400704);
+ data = NV_READ(0x400708);
+ channel = (address >> 20) & 0x1F;
+ subc = (address >> 16) & 0x7;
+ method = address & 0x1FFC;
+ DRM_DEBUG("NV: 0x400704 = 0x%08x\n", address);
+ DRM_ERROR("NV: Channel %d/%d (class 0x%04x) -"
+ "Method 0x%04x, Data 0x%08x\n",
+ channel, subc,
+ NV_READ(0x400160+subc*4) & 0xFFFF,
+ method, data
+ );
+
+ status &= ~NV_PGRAPH_INTR_ERROR;
+ NV_WRITE(NV_PGRAPH_INTSTAT, NV_PGRAPH_INTR_ERROR);
+ }
+
+ if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
+ uint32_t channel=NV_READ(NV_PFIFO_CACH1_PSH1)&(nouveau_fifo_number(dev)-1);
+ DRM_INFO("NV: PGRAPH context switch interrupt channel %x\n",channel);
+ switch(dev_priv->card_type)
+ {
+ case NV_04:
+ case NV_05:
+ nouveau_nv04_context_switch(dev);
+ break;
+ case NV_10:
+ nouveau_nv10_context_switch(dev);
+ break;
+ default:
+ DRM_INFO("NV: Context switch not implemented\n");
+ break;
+ }
+
+ status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+ NV_WRITE(NV_PGRAPH_INTSTAT, NV_PGRAPH_INTR_CONTEXT_SWITCH);
+ }
+
+ if (status) {
+ DRM_INFO("NV: Unknown PGRAPH interrupt! STAT=0x%08x\n", status);
+ NV_WRITE(NV_PGRAPH_INTSTAT, status);
+ }
+
+ NV_WRITE(NV_PMC_INTSTAT, NV_PMC_INTSTAT_PGRAPH_PENDING);
+}
+
+static void nouveau_crtc_irq_handler(drm_device_t *dev, int crtc)
+{
+ drm_nouveau_private_t *dev_priv = dev->dev_private;
+ if (crtc&1) {
+ NV_WRITE(NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
+ }
+
+ if (crtc&2) {
+ NV_WRITE(NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
+ }
+}
+
+irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS)
+{
+ drm_device_t *dev = (drm_device_t*)arg;
+ drm_nouveau_private_t *dev_priv = dev->dev_private;
+ uint32_t status;
+
+ status = NV_READ(NV_PMC_INTSTAT);
+ if (!status)
+ return IRQ_NONE;
+
+ DRM_DEBUG("PMC INTSTAT: 0x%08x\n", status);
+
+ if (status & NV_PMC_INTSTAT_PFIFO_PENDING) {
+ nouveau_fifo_irq_handler(dev);
+ status &= ~NV_PMC_INTSTAT_PFIFO_PENDING;
+ }
+ if (status & NV_PMC_INTSTAT_PGRAPH_PENDING) {
+ nouveau_pgraph_irq_handler(dev);
+ status &= ~NV_PMC_INTSTAT_PGRAPH_PENDING;
+ }
+ if (status & NV_PMC_INTSTAT_CRTCn_PENDING) {
+ nouveau_crtc_irq_handler(dev, (status>>24)&3);
+ status &= ~NV_PMC_INTSTAT_CRTCn_PENDING;
+ }
+
+ if (status)
+ DRM_ERROR("Unhandled PMC INTR status bits 0x%08x\n", status);
+
+ return IRQ_HANDLED;
+}
+
diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c
new file mode 100644
index 00000000..82221c8f
--- /dev/null
+++ b/shared-core/nouveau_mem.c
@@ -0,0 +1,553 @@
+/*
+ * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
+ * Copyright 2005 Stephane Marchesin
+ *
+ * The Weather Channel (TM) funded Tungsten Graphics to develop the
+ * initial release of the Radeon 8500 driver under the XFree86 license.
+ * This notice must be preserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_sarea.h"
+#include "nouveau_drv.h"
+
+static int meminit_ok=0;
+
+static struct mem_block *split_block(struct mem_block *p, uint64_t start, uint64_t size,
+ DRMFILE filp)
+{
+ /* Maybe cut off the start of an existing block */
+ if (start > p->start) {
+ struct mem_block *newblock =
+ drm_alloc(sizeof(*newblock), DRM_MEM_BUFS);
+ if (!newblock)
+ goto out;
+ newblock->start = start;
+ newblock->size = p->size - (start - p->start);
+ newblock->filp = NULL;
+ newblock->next = p->next;
+ newblock->prev = p;
+ p->next->prev = newblock;
+ p->next = newblock;
+ p->size -= newblock->size;
+ p = newblock;
+ }
+
+ /* Maybe cut off the end of an existing block */
+ if (size < p->size) {
+ struct mem_block *newblock =
+ drm_alloc(sizeof(*newblock), DRM_MEM_BUFS);
+ if (!newblock)
+ goto out;
+ newblock->start = start + size;
+ newblock->size = p->size - size;
+ newblock->filp = NULL;
+ newblock->next = p->next;
+ newblock->prev = p;
+ p->next->prev = newblock;
+ p->next = newblock;
+ p->size = size;
+ }
+
+out:
+ /* Our block is in the middle */
+ p->filp = filp;
+ return p;
+}
+
+static struct mem_block *alloc_block(struct mem_block *heap, uint64_t size,
+ int align2, DRMFILE filp)
+{
+ struct mem_block *p;
+ uint64_t mask = (1 << align2) - 1;
+
+ if (!heap)
+ return NULL;
+
+ list_for_each(p, heap) {
+ uint64_t start = (p->start + mask) & ~mask;
+ if (p->filp == 0 && start + size <= p->start + p->size)
+ return split_block(p, start, size, filp);
+ }
+
+ return NULL;
+}
+
+static struct mem_block *find_block(struct mem_block *heap, uint64_t start)
+{
+ struct mem_block *p;
+
+ list_for_each(p, heap)
+ if (p->start == start)
+ return p;
+
+ return NULL;
+}
+
+static void free_block(struct mem_block *p)
+{
+ p->filp = NULL;
+
+ /* Assumes a single contiguous range. Needs a special filp in
+ * 'heap' to stop it being subsumed.
+ */
+ if (p->next->filp == 0) {
+ struct mem_block *q = p->next;
+ p->size += q->size;
+ p->next = q->next;
+ p->next->prev = p;
+ drm_free(q, sizeof(*q), DRM_MEM_BUFS);
+ }
+
+ if (p->prev->filp == 0) {
+ struct mem_block *q = p->prev;
+ q->size += p->size;
+ q->next = p->next;
+ q->next->prev = q;
+ drm_free(p, sizeof(*q), DRM_MEM_BUFS);
+ }
+}
+
+/* Initialize. How to check for an uninitialized heap?
+ */
+static int init_heap(struct mem_block **heap, uint64_t start, uint64_t size)
+{
+ struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS);
+
+ if (!blocks)
+ return DRM_ERR(ENOMEM);
+
+ *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS);
+ if (!*heap) {
+ drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS);
+ return DRM_ERR(ENOMEM);
+ }
+
+ blocks->start = start;
+ blocks->size = size;
+ blocks->filp = NULL;
+ blocks->next = blocks->prev = *heap;
+
+ memset(*heap, 0, sizeof(**heap));
+ (*heap)->filp = (DRMFILE) - 1;
+ (*heap)->next = (*heap)->prev = blocks;
+ return 0;
+}
+
+/*
+ * Free all blocks associated with the releasing filp
+ */
+void nouveau_mem_release(DRMFILE filp, struct mem_block *heap)
+{
+ struct mem_block *p;
+
+ if (!heap || !heap->next)
+ return;
+
+ list_for_each(p, heap) {
+ if (p->filp == filp)
+ p->filp = NULL;
+ }
+
+ /* Assumes a single contiguous range. Needs a special filp in
+ * 'heap' to stop it being subsumed.
+ */
+ list_for_each(p, heap) {
+ while ((p->filp == 0) && (p->next->filp == 0) && (p->next!=heap)) {
+ struct mem_block *q = p->next;
+ p->size += q->size;
+ p->next = q->next;
+ p->next->prev = p;
+ drm_free(q, sizeof(*q), DRM_MEM_DRIVER);
+ }
+ }
+}
+
+/*
+ * Cleanup everything
+ */
+static void nouveau_mem_takedown(struct mem_block **heap)
+{
+ struct mem_block *p;
+
+ if (!*heap)
+ return;
+
+ for (p = (*heap)->next; p != *heap;) {
+ struct mem_block *q = p;
+ p = p->next;
+ drm_free(q, sizeof(*q), DRM_MEM_DRIVER);
+ }
+
+ drm_free(*heap, sizeof(**heap), DRM_MEM_DRIVER);
+ *heap = NULL;
+}
+
+void nouveau_mem_close(struct drm_device *dev)
+{
+ drm_nouveau_private_t *dev_priv = dev->dev_private;
+ nouveau_mem_takedown(&dev_priv->agp_heap);
+ nouveau_mem_takedown(&dev_priv->fb_heap);
+}
+
+/* returns the amount of FB ram in bytes */
+uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
+{
+ drm_nouveau_private_t *dev_priv=dev->dev_private;
+ switch(dev_priv->card_type)
+ {
+ case NV_03:
+ switch(NV_READ(NV03_BOOT_0)&NV03_BOOT_0_RAM_AMOUNT)
+ {
+ case NV03_BOOT_0_RAM_AMOUNT_8MB:
+ case NV03_BOOT_0_RAM_AMOUNT_8MB_SDRAM:
+ return 8*1024*1024;
+ case NV03_BOOT_0_RAM_AMOUNT_4MB:
+ return 4*1024*1024;
+ case NV03_BOOT_0_RAM_AMOUNT_2MB:
+ return 2*1024*1024;
+ }
+ break;
+ case NV_04:
+ case NV_05:
+ if (NV_READ(NV03_BOOT_0) & 0x00000100) {
+ return (((NV_READ(NV03_BOOT_0) >> 12) & 0xf)*2+2)*1024*1024;
+ } else
+ switch(NV_READ(NV03_BOOT_0)&NV03_BOOT_0_RAM_AMOUNT)
+ {
+ case NV04_BOOT_0_RAM_AMOUNT_32MB:
+ return 32*1024*1024;
+ case NV04_BOOT_0_RAM_AMOUNT_16MB:
+ return 16*1024*1024;
+ case NV04_BOOT_0_RAM_AMOUNT_8MB:
+ return 8*1024*1024;
+ case NV04_BOOT_0_RAM_AMOUNT_4MB:
+ return 4*1024*1024;
+ }
+ break;
+ case NV_10:
+ case NV_20:
+ case NV_30:
+ case NV_40:
+ case NV_44:
+ case NV_50:
+ default:
+ // XXX won't work on BSD because of pci_read_config_dword
+ if (dev_priv->flags&NV_NFORCE) {
+ uint32_t mem;
+ pci_read_config_dword(dev->pdev, 0x7C, &mem);
+ return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024;
+ } else if(dev_priv->flags&NV_NFORCE2) {
+ uint32_t mem;
+ pci_read_config_dword(dev->pdev, 0x84, &mem);
+ return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024;
+ } else {
+ uint64_t mem;
+ mem=(NV_READ(NV04_FIFO_DATA)&NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >> NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT;
+ return mem*1024*1024;
+ }
+ break;
+ }
+
+ DRM_ERROR("Unable to detect video ram size. Please report your setup to " DRIVER_EMAIL "\n");
+ return 0;
+}
+
+
+
+int nouveau_mem_init(struct drm_device *dev)
+{
+ drm_nouveau_private_t *dev_priv = dev->dev_private;
+ dev_priv->agp_phys=0;
+ dev_priv->fb_phys=0;
+
+ /* init AGP */
+ dev_priv->agp_heap=NULL;
+ if (drm_device_is_agp(dev))
+ {
+ int err;
+ drm_agp_info_t info;
+ drm_agp_mode_t mode;
+ drm_agp_buffer_t agp_req;
+ drm_agp_binding_t bind_req;
+
+ err = drm_agp_acquire(dev);
+ if (err) {
+ DRM_ERROR("Unable to acquire AGP: %d\n", err);
+ goto no_agp;
+ }
+
+ err = drm_agp_info(dev, &info);
+ if (err) {
+ DRM_ERROR("Unable to get AGP info: %d\n", err);
+ goto no_agp;
+ }
+
+ /* see agp.h for the AGPSTAT_* modes available */
+ mode.mode = info.mode;
+ err = drm_agp_enable(dev, mode);
+ if (err) {
+ DRM_ERROR("Unable to enable AGP: %d\n", err);
+ goto no_agp;
+ }
+
+ agp_req.size = info.aperture_size;
+ agp_req.type = 0;
+ err = drm_agp_alloc(dev, &agp_req);
+ if (err) {
+ DRM_ERROR("Unable to alloc AGP: %d\n", err);
+ goto no_agp;
+ }
+
+ bind_req.handle = agp_req.handle;
+ bind_req.offset = 0;
+ err = drm_agp_bind(dev, &bind_req);
+ if (err) {
+ DRM_ERROR("Unable to bind AGP: %d\n", err);
+ goto no_agp;
+ }
+
+ if (init_heap(&dev_priv->agp_heap, info.aperture_base, info.aperture_size))
+ goto no_agp;
+
+ dev_priv->agp_phys=info.aperture_base;
+ }
+no_agp:
+
+ /* Init FB */
+ dev_priv->fb_phys=drm_get_resource_start(dev,1);
+ if (nouveau_mem_fb_amount(dev)>256*1024*1024) {
+ /* On cards with > 256Mb, you can't map everything.
+ * So we create a second FB heap for that type of memory */
+ if (init_heap(&dev_priv->fb_heap, drm_get_resource_start(dev,1), 256*1024*1024))
+ return DRM_ERR(ENOMEM);
+ if (init_heap(&dev_priv->fb_nomap_heap, drm_get_resource_start(dev,1)+256*1024*1024, nouveau_mem_fb_amount(dev)-256*1024*1024))
+ return DRM_ERR(ENOMEM);
+ } else {
+ if (init_heap(&dev_priv->fb_heap, drm_get_resource_start(dev,1), nouveau_mem_fb_amount(dev)))
+ return DRM_ERR(ENOMEM);
+ dev_priv->fb_nomap_heap=NULL;
+ }
+
+ return 0;
+}
+
+struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment, uint64_t size, int flags, DRMFILE filp)
+{
+ struct mem_block *block;
+ int type;
+ drm_nouveau_private_t *dev_priv = dev->dev_private;
+
+ /*
+ * Init memory if needed
+ */
+ if (meminit_ok==0)
+ {
+ nouveau_mem_init(dev);
+ meminit_ok=1;
+ }
+
+ /*
+ * Make things easier on ourselves: all allocations are page-aligned.
+ * We need that to map allocated regions into the user space
+ */
+ if (alignment < PAGE_SHIFT)
+ alignment = PAGE_SHIFT;
+
+ /*
+ * Warn about 0 sized allocations, but let it go through. It'll return 1 page
+ */
+ if (size == 0)
+ DRM_INFO("warning : 0 byte allocation\n");
+
+ /*
+ * Keep alloc size a multiple of the page size to keep drm_addmap() happy
+ */
+ if (size & (~PAGE_MASK))
+ size = ((size/PAGE_SIZE) + 1) * PAGE_SIZE;
+
+ if (flags&NOUVEAU_MEM_AGP) {
+ type=NOUVEAU_MEM_AGP;
+ block = alloc_block(dev_priv->agp_heap, size, alignment, filp);
+ if (block) goto alloc_ok;
+ }
+ if (flags&(NOUVEAU_MEM_FB|NOUVEAU_MEM_FB_ACCEPTABLE)) {
+ type=NOUVEAU_MEM_FB;
+ if (!(flags&NOUVEAU_MEM_MAPPED)) {
+ block = alloc_block(dev_priv->fb_nomap_heap, size, alignment, filp);
+ if (block) goto alloc_ok;
+ }
+ block = alloc_block(dev_priv->fb_heap, size, alignment, filp);
+ if (block) goto alloc_ok;
+ }
+ if (flags&NOUVEAU_MEM_AGP_ACCEPTABLE) {
+ type=NOUVEAU_MEM_AGP;
+ block = alloc_block(dev_priv->agp_heap, size, alignment, filp);
+ if (block) goto alloc_ok;
+ }
+
+ return NULL;
+
+alloc_ok:
+ block->flags=type;
+
+ if (flags&NOUVEAU_MEM_MAPPED)
+ {
+ int ret;
+ block->flags|=NOUVEAU_MEM_MAPPED;
+
+ if (type == NOUVEAU_MEM_AGP)
+ ret = drm_addmap(dev, block->start - dev->agp->base, block->size,
+ _DRM_AGP, 0, &block->map);
+ else
+ ret = drm_addmap(dev, block->start, block->size,
+ _DRM_FRAME_BUFFER, 0, &block->map);
+ if (ret) {
+ free_block(block);
+ return NULL;
+ }
+ }
+
+ DRM_INFO("allocated 0x%llx\n", block->start);
+ return block;
+}
+
+void nouveau_mem_free(struct drm_device* dev, struct mem_block* block)
+{
+ DRM_INFO("freeing 0x%llx\n", block->start);
+ if (meminit_ok==0)
+ {
+ DRM_ERROR("%s called without init\n", __FUNCTION__);
+ return;
+ }
+ if (block->flags&NOUVEAU_MEM_MAPPED)
+ drm_rmmap(dev, block->map);
+ free_block(block);
+}
+
+int nouveau_instmem_init(struct drm_device *dev, uint32_t offset,
+ uint32_t size)
+{
+ drm_nouveau_private_t *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = init_heap(&dev_priv->ramin_heap, offset, size);
+ if (ret) {
+ dev_priv->ramin_heap = NULL;
+ DRM_ERROR("Failed to init RAMIN heap\n");
+ }
+
+ return ret;
+}
+
+struct mem_block *nouveau_instmem_alloc(struct drm_device *dev,
+ uint32_t size, uint32_t align)
+{
+ drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct mem_block *block;
+
+ if (!dev_priv->ramin_heap) {
+ DRM_ERROR("instmem alloc called without init\n");
+ return NULL;
+ }
+
+ block = alloc_block(dev_priv->ramin_heap, size, align, (DRMFILE)-2);
+ if (block) {
+ block->flags = NOUVEAU_MEM_INSTANCE;
+ DRM_DEBUG("instance(size=%d, align=%d) alloc'd at 0x%08x\n",
+ size, (1<<align), (uint32_t)block->start);
+ }
+
+ return block;
+}
+
+void nouveau_instmem_free(struct drm_device *dev, struct mem_block *block)
+{
+ if (dev && block) {
+ free_block(block);
+ }
+}
+
+/*
+ * Ioctls
+ */
+
+int nouveau_ioctl_mem_alloc(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_nouveau_private_t *dev_priv = dev->dev_private;
+ drm_nouveau_mem_alloc_t alloc;
+ struct mem_block *block;
+
+ if (!dev_priv) {
+ DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ return DRM_ERR(EINVAL);
+ }
+
+ DRM_COPY_FROM_USER_IOCTL(alloc, (drm_nouveau_mem_alloc_t __user *) data,
+ sizeof(alloc));
+
+ block=nouveau_mem_alloc(dev, alloc.alignment, alloc.size, alloc.flags, filp);
+ if (!block)
+ return DRM_ERR(ENOMEM);
+ alloc.region_offset=block->start;
+
+ DRM_COPY_TO_USER_IOCTL((drm_nouveau_mem_alloc_t __user *) data, alloc, sizeof(alloc));
+
+ return 0;
+}
+
+int nouveau_ioctl_mem_free(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_nouveau_private_t *dev_priv = dev->dev_private;
+ drm_nouveau_mem_free_t memfree;
+ struct mem_block *block;
+
+ if (!dev_priv) {
+ DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ return DRM_ERR(EINVAL);
+ }
+
+ DRM_COPY_FROM_USER_IOCTL(memfree, (drm_nouveau_mem_free_t __user *) data,
+ sizeof(memfree));
+
+ block=NULL;
+ if (memfree.flags&NOUVEAU_MEM_FB)
+ block = find_block(dev_priv->fb_heap, memfree.region_offset);
+ else if (memfree.flags&NOUVEAU_MEM_AGP)
+ block = find_block(dev_priv->agp_heap, memfree.region_offset);
+ if (!block)
+ return DRM_ERR(EFAULT);
+ if (block->filp != filp)
+ return DRM_ERR(EPERM);
+
+ nouveau_mem_free(dev, block);
+ return 0;
+}
+
+
diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c
new file mode 100644
index 00000000..c11b05eb
--- /dev/null
+++ b/shared-core/nouveau_object.c
@@ -0,0 +1,578 @@
+/*
+ * Copyright (C) 2006 Ben Skeggs.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ * Ben Skeggs <darktama@iinet.net.au>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+/* TODO
+ * - Check object class, deny unsafe objects (add card-specific versioning?)
+ * - Get rid of DMA object creation, this should be wrapped by MM routines.
+ */
+
+/* Translate a RAMIN offset into a value the card understands, will be useful
+ * in the future when we can access more instance ram which isn't mapped into
+ * the PRAMIN aperture
+ */
+uint32_t nouveau_chip_instance_get(drm_device_t *dev,
+ struct mem_block *mem)
+{
+ uint32_t inst = (uint32_t)mem->start >> 4;
+ DRM_DEBUG("****** on-chip instance for 0x%016llx = 0x%08x\n",
+ mem->start, inst);
+ return inst;
+}
+
+static void nouveau_object_link(drm_device_t *dev, int fifo_num,
+ struct nouveau_object *obj)
+{
+ drm_nouveau_private_t *dev_priv=dev->dev_private;
+ struct nouveau_fifo *fifo = &dev_priv->fifos[fifo_num];
+
+ if (!fifo->objs) {
+ fifo->objs = obj;
+ return;
+ }
+
+ obj->prev = NULL;
+ obj->next = fifo->objs;
+
+ fifo->objs->prev = obj;
+ fifo->objs = obj;
+}
+
+static void nouveau_object_unlink(drm_device_t *dev, int fifo_num,
+ struct nouveau_object *obj)
+{
+ drm_nouveau_private_t *dev_priv=dev->dev_private;
+ struct nouveau_fifo *fifo = &dev_priv->fifos[fifo_num];
+
+ if (obj->prev == NULL) {
+ if (obj->next)
+ obj->next->prev = NULL;
+ fifo->objs = obj->next;
+ } else if (obj->next == NULL) {
+ if (obj->prev)
+ obj->prev->next = NULL;
+ } else {
+ obj->prev->next = obj->next;
+ obj->next->prev = obj->prev;
+ }
+}
+
+static struct nouveau_object *
+nouveau_object_handle_find(drm_device_t *dev, int fifo_num, uint32_t handle)
+{
+ drm_nouveau_private_t *dev_priv=dev->dev_private;
+ struct nouveau_fifo *fifo = &dev_priv->fifos[fifo_num];
+ struct nouveau_object *obj = fifo->objs;
+
+ if (!handle)
+ return NULL;
+
+ DRM_DEBUG("Looking for handle 0x%08x\n", handle);
+ while (obj) {
+ if (obj->handle == handle)
+ return obj;
+ obj = obj->next;
+ }
+
+ DRM_DEBUG("...couldn't find handle\n");
+ return NULL;
+}
+
+/* NVidia uses context objects to drive drawing operations.
+
+ Context objects can be selected into 8 subchannels in the FIFO,
+ and then used via DMA command buffers.
+
+ A context object is referenced by a user defined handle (CARD32). The HW
+ looks up graphics objects in a hash table in the instance RAM.
+
+ An entry in the hash table consists of 2 CARD32. The first CARD32 contains
+ the handle, the second one a bitfield, that contains the address of the
+ object in instance RAM.
+
+ The format of the second CARD32 seems to be:
+
+ NV4 to NV30:
+
+ 15: 0 instance_addr >> 4
+ 17:16 engine (here uses 1 = graphics)
+ 28:24 channel id (here uses 0)
+ 31 valid (use 1)
+
+ NV40:
+
+ 15: 0 instance_addr >> 4 (maybe 19-0)
+ 21:20 engine (here uses 1 = graphics)
+ I'm unsure about the other bits, but using 0 seems to work.
+
+ The key into the hash table depends on the object handle and channel id and
+ is given as:
+*/
+static uint32_t nouveau_handle_hash(drm_device_t* dev, uint32_t handle,
+ int fifo)
+{
+ drm_nouveau_private_t *dev_priv=dev->dev_private;
+ uint32_t hash = 0;
+ int i;
+
+ for (i=32;i>0;i-=dev_priv->ramht_bits) {
+ hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1));
+ handle >>= dev_priv->ramht_bits;
+ }
+ hash ^= fifo << (dev_priv->ramht_bits - 4);
+ return hash << 3;
+}
+
+static int nouveau_hash_table_insert(drm_device_t* dev, int fifo,
+ struct nouveau_object *obj)
+{
+ drm_nouveau_private_t *dev_priv=dev->dev_private;
+ int ht_base = NV_RAMIN + dev_priv->ramht_offset;
+ int ht_end = ht_base + dev_priv->ramht_size;
+ int o_ofs, ofs;
+
+ o_ofs = ofs = nouveau_handle_hash(dev, obj->handle, fifo);
+
+ while (NV_READ(ht_base + ofs)) {
+ ofs += 8;
+ if (ofs == ht_end) ofs = ht_base;
+ if (ofs == o_ofs) {
+ DRM_ERROR("no free hash table entries\n");
+ return 1;
+ }
+ }
+ ofs += ht_base;
+
+ DRM_DEBUG("Channel %d - Handle 0x%08x at 0x%08x\n",
+ fifo, obj->handle, ofs);
+
+ NV_WRITE(NV_RAMHT_HANDLE_OFFSET + ofs, obj->handle);
+ if (dev_priv->card_type >= NV_40)
+ NV_WRITE(NV_RAMHT_CONTEXT_OFFSET + ofs,
+ (fifo << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
+ (obj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT) |
+ nouveau_chip_instance_get(dev, obj->instance)
+ );
+ else
+ NV_WRITE(NV_RAMHT_CONTEXT_OFFSET + ofs,
+ NV_RAMHT_CONTEXT_VALID |
+ (fifo << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
+ (obj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT) |
+ nouveau_chip_instance_get(dev, obj->instance)
+ );
+
+ obj->ht_loc = ofs;
+ return 0;
+}
+
+static void nouveau_hash_table_remove(drm_device_t* dev,
+ struct nouveau_object *obj)
+{
+ drm_nouveau_private_t *dev_priv = dev->dev_private;
+
+ DRM_DEBUG("Remove handle 0x%08x at 0x%08x from HT\n",
+ obj->handle, obj->ht_loc);
+ if (obj->ht_loc) {
+ DRM_DEBUG("... HT entry was: 0x%08x/0x%08x\n",
+ NV_READ(obj->ht_loc), NV_READ(obj->ht_loc+4));
+ NV_WRITE(obj->ht_loc , 0x00000000);
+ NV_WRITE(obj->ht_loc+4, 0x00000000);
+ }
+}
+
+static struct nouveau_object *nouveau_instance_alloc(drm_device_t* dev)
+{
+ drm_nouveau_private_t *dev_priv=dev->dev_private;
+ struct nouveau_object *obj;
+
+ /* Create object struct */
+ obj = drm_calloc(1, sizeof(struct nouveau_object), DRM_MEM_DRIVER);
+ if (!obj) {
+ DRM_ERROR("couldn't alloc memory for object\n");
+ return NULL;
+ }
+ obj->instance = nouveau_instmem_alloc(dev,
+ (dev_priv->card_type >= NV_40 ? 32 : 16), 4);
+ if (!obj->instance) {
+ DRM_ERROR("couldn't alloc RAMIN for object\n");
+ drm_free(obj, sizeof(struct nouveau_object), DRM_MEM_DRIVER);
+ return NULL;
+ }
+
+ return obj;
+}
+
+static void nouveau_object_instance_free(drm_device_t *dev,
+ struct nouveau_object *obj)
+{
+ drm_nouveau_private_t *dev_priv=dev->dev_private;
+ int count, i;
+
+ if (dev_priv->card_type >= NV_40)
+ count = 8;
+ else
+ count = 4;
+
+ /* Clean RAMIN entry */
+ DRM_DEBUG("Instance entry for 0x%08x"
+ "(engine %d, class 0x%x) before destroy:\n",
+ obj->handle, obj->engine, obj->class);
+ for (i=0;i<count;i++) {
+ DRM_DEBUG(" +0x%02x: 0x%08x\n", (i*4),
+ INSTANCE_RD(obj->instance, i));
+ INSTANCE_WR(obj->instance, i, 0x00000000);
+ }
+
+ /* Free RAMIN */
+ nouveau_instmem_free(dev, obj->instance);
+}
+
+/*
+ DMA objects are used to reference a piece of memory in the
+ framebuffer, PCI or AGP address space. Each object is 16 bytes big
+ and looks as follows:
+
+ entry[0]
+ 11:0 class (seems like I can always use 0 here)
+ 12 page table present?
+ 13 page entry linear?
+ 15:14 access: 0 rw, 1 ro, 2 wo
+ 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
+ 31:20 dma adjust (bits 0-11 of the address)
+ entry[1]
+ dma limit
+ entry[2]
+ 1 0 readonly, 1 readwrite
+ 31:12 dma frame address (bits 12-31 of the address)
+
+ Non linear page tables seem to need a list of frame addresses afterwards,
+ the rivatv project has some info on this.
+
+ The method below creates a DMA object in instance RAM and returns a handle
+ to it that can be used to set up context objects.
+*/
+struct nouveau_object *nouveau_dma_object_create(drm_device_t* dev,
+ uint32_t offset, uint32_t size,
+ int access, uint32_t target)
+{
+ drm_nouveau_private_t *dev_priv=dev->dev_private;
+ struct nouveau_object *obj;
+ uint32_t frame, adjust;
+
+ DRM_DEBUG("offset:0x%08x, size:0x%08x, target:%d, access:%d\n",
+ offset, size, target, access);
+
+ frame = offset & ~0x00000FFF;
+ adjust = offset & 0x00000FFF;
+
+ obj = nouveau_instance_alloc(dev);
+ if (!obj) {
+ DRM_ERROR("couldn't allocate DMA object\n");
+ return obj;
+ }
+
+ obj->engine = 0;
+ obj->class = 0;
+
+ INSTANCE_WR(obj->instance, 0, ((1<<12) | (1<<13) |
+ (adjust << 20) |
+ (access << 14) |
+ (target << 16) |
+ 0x3D /* DMA_IN_MEMORY */));
+ INSTANCE_WR(obj->instance, 1, size-1);
+ INSTANCE_WR(obj->instance, 2,
+ frame | ((access != NV_DMA_ACCESS_RO) ? (1<<1) : 0));
+ /* I don't actually know what this is, the DMA objects I see
+ * in renouveau dumps usually have this as the same as +8
+ */
+ INSTANCE_WR(obj->instance, 3,
+ frame | ((access != NV_DMA_ACCESS_RO) ? (1<<1) : 0));
+
+ return obj;
+}
+
+
+/* Context objects in the instance RAM have the following structure.
+ * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
+
+ NV4 - NV30:
+
+ entry[0]
+ 11:0 class
+ 12 chroma key enable
+ 13 user clip enable
+ 14 swizzle enable
+ 17:15 patch config:
+ scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
+ 18 synchronize enable
+ 19 endian: 1 big, 0 little
+ 21:20 dither mode
+ 23 single step enable
+ 24 patch status: 0 invalid, 1 valid
+ 25 context_surface 0: 1 valid
+ 26 context surface 1: 1 valid
+ 27 context pattern: 1 valid
+ 28 context rop: 1 valid
+ 29,30 context beta, beta4
+ entry[1]
+ 7:0 mono format
+ 15:8 color format
+ 31:16 notify instance address
+ entry[2]
+ 15:0 dma 0 instance address
+ 31:16 dma 1 instance address
+ entry[3]
+ dma method traps
+
+ NV40:
+ No idea what the exact format is. Here's what can be deducted:
+
+ entry[0]:
+ 11:0 class (maybe uses more bits here?)
+ 17 user clip enable
+ 21:19 patch config
+ 25 patch status valid ?
+ entry[1]:
+ 15:0 DMA notifier (maybe 20:0)
+ entry[2]:
+ 15:0 DMA 0 instance (maybe 20:0)
+ 24 big endian
+ entry[3]:
+ 15:0 DMA 1 instance (maybe 20:0)
+ entry[4]:
+ entry[5]:
+ set to 0?
+*/
+static struct nouveau_object *nouveau_context_object_create(drm_device_t* dev,
+ int class, uint32_t flags,
+ struct nouveau_object *dma0,
+ struct nouveau_object *dma1,
+ struct nouveau_object *dma_notifier)
+{
+ drm_nouveau_private_t *dev_priv=dev->dev_private;
+ struct nouveau_object *obj;
+ uint32_t d0, d1, dn;
+ uint32_t flags0,flags1,flags2;
+ flags0=0;flags1=0;flags2=0;
+
+ if (dev_priv->card_type >= NV_40) {
+ if (flags & NV_DMA_CONTEXT_FLAGS_PATCH_ROP_AND)
+ flags0 |= 0x02080000;
+ else if (flags & NV_DMA_CONTEXT_FLAGS_PATCH_SRCCOPY)
+ flags0 |= 0x02080000;
+ if (flags & NV_DMA_CONTEXT_FLAGS_CLIP_ENABLE)
+ flags0 |= 0x00020000;
+#ifdef __BIG_ENDIAN
+ if (flags & NV_DMA_CONTEXT_FLAGS_MONO)
+ flags1 |= 0x01000000;
+ flags2 |= 0x01000000;
+#else
+ if (flags & NV_DMA_CONTEXT_FLAGS_MONO)
+ flags1 |= 0x02000000;
+#endif
+ } else {
+ if (flags & NV_DMA_CONTEXT_FLAGS_PATCH_ROP_AND)
+ flags0 |= 0x01008000;
+ else if (flags & NV_DMA_CONTEXT_FLAGS_PATCH_SRCCOPY)
+ flags0 |= 0x01018000;
+ if (flags & NV_DMA_CONTEXT_FLAGS_CLIP_ENABLE)
+ flags0 |= 0x00002000;
+#ifdef __BIG_ENDIAN
+ flags0 |= 0x00080000;
+ if (flags & NV_DMA_CONTEXT_FLAGS_MONO)
+ flags1 |= 0x00000001;
+#else
+ if (flags & NV_DMA_CONTEXT_FLAGS_MONO)
+ flags1 |= 0x00000002;
+#endif
+ }
+
+ DRM_DEBUG("class=%x, dma0=%08x, dma1=%08x, dman=%08x\n",
+ class,
+ dma0 ? dma0->handle : 0,
+ dma1 ? dma1->handle : 0,
+ dma_notifier ? dma_notifier->handle : 0);
+
+ obj = nouveau_instance_alloc(dev);
+ if (!obj) {
+ DRM_ERROR("couldn't allocate context object\n");
+ return obj;
+ }
+
+ obj->engine = 1;
+ obj->class = class;
+
+ d0 = dma0 ? nouveau_chip_instance_get(dev, dma0->instance) : 0;
+ d1 = dma1 ? nouveau_chip_instance_get(dev, dma1->instance) : 0;
+ dn = dma_notifier ?
+ nouveau_chip_instance_get(dev, dma_notifier->instance) : 0;
+
+ if (dev_priv->card_type >= NV_40) {
+ INSTANCE_WR(obj->instance, 0, class | flags0);
+ INSTANCE_WR(obj->instance, 1, dn | flags1);
+ INSTANCE_WR(obj->instance, 2, d0 | flags2);
+ INSTANCE_WR(obj->instance, 3, d1);
+ INSTANCE_WR(obj->instance, 4, 0x00000000);
+ INSTANCE_WR(obj->instance, 5, 0x00000000);
+ INSTANCE_WR(obj->instance, 6, 0x00000000);
+ INSTANCE_WR(obj->instance, 7, 0x00000000);
+ } else {
+ INSTANCE_WR(obj->instance, 0, class | flags0);
+ INSTANCE_WR(obj->instance, 1, (dn << 16) | flags1);
+ INSTANCE_WR(obj->instance, 2, d0 | (d1 << 16));
+ INSTANCE_WR(obj->instance, 3, 0);
+ }
+
+ return obj;
+}
+
+static void
+nouveau_object_free(drm_device_t *dev, int fifo_num, struct nouveau_object *obj)
+{
+ nouveau_object_unlink(dev, fifo_num, obj);
+
+ nouveau_object_instance_free(dev, obj);
+ nouveau_hash_table_remove(dev, obj);
+
+ drm_free(obj, sizeof(struct nouveau_object), DRM_MEM_DRIVER);
+ return;
+}
+
+void nouveau_object_cleanup(drm_device_t *dev, DRMFILE filp)
+{
+ drm_nouveau_private_t *dev_priv=dev->dev_private;
+ int fifo;
+
+ fifo = nouveau_fifo_id_get(dev, filp);
+ if (fifo == -1)
+ return;
+
+ while (dev_priv->fifos[fifo].objs)
+ nouveau_object_free(dev, fifo, dev_priv->fifos[fifo].objs);
+}
+
+int nouveau_ioctl_object_init(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_nouveau_object_init_t init;
+ struct nouveau_object *obj, *dma0, *dma1, *dman;
+ int fifo;
+
+ fifo = nouveau_fifo_id_get(dev, filp);
+ if (fifo == -1)
+ return DRM_ERR(EINVAL);
+
+ DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_object_init_t __user *)
+ data, sizeof(init));
+
+ //FIXME: check args, only allow trusted objects to be created
+
+ if (nouveau_object_handle_find(dev, fifo, init.handle)) {
+ DRM_ERROR("Channel %d: handle 0x%08x already exists\n",
+ fifo, init.handle);
+ return DRM_ERR(EINVAL);
+ }
+
+ dma0 = nouveau_object_handle_find(dev, fifo, init.dma0);
+ if (init.dma0 && !dma0) {
+ DRM_ERROR("context dma0 - invalid handle 0x%08x\n", init.dma0);
+ return DRM_ERR(EINVAL);
+ }
+ dma1 = nouveau_object_handle_find(dev, fifo, init.dma1);
+ if (init.dma1 && !dma1) {
+ DRM_ERROR("context dma1 - invalid handle 0x%08x\n", init.dma0);
+ return DRM_ERR(EINVAL);
+ }
+ dman = nouveau_object_handle_find(dev, fifo, init.dma_notifier);
+ if (init.dma_notifier && !dman) {
+ DRM_ERROR("context dman - invalid handle 0x%08x\n",
+ init.dma_notifier);
+ return DRM_ERR(EINVAL);
+ }
+
+ obj = nouveau_context_object_create(dev, init.class, init.flags,
+ dma0, dma1, dman);
+ if (!obj)
+ return DRM_ERR(ENOMEM);
+
+ obj->handle = init.handle;
+
+ if (nouveau_hash_table_insert(dev, fifo, obj)) {
+ nouveau_object_free(dev, fifo, obj);
+ return DRM_ERR(ENOMEM);
+ }
+
+ nouveau_object_link(dev, fifo, obj);
+
+ return 0;
+}
+
+int nouveau_ioctl_dma_object_init(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_nouveau_dma_object_init_t init;
+ struct nouveau_object *obj;
+ int fifo;
+
+ fifo = nouveau_fifo_id_get(dev, filp);
+ if (fifo == -1)
+ return DRM_ERR(EINVAL);
+
+ DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_dma_object_init_t __user *)
+ data, sizeof(init));
+
+ if (nouveau_object_handle_find(dev, fifo, init.handle)) {
+ DRM_ERROR("Channel %d: handle 0x%08x already exists\n",
+ fifo, init.handle);
+ return DRM_ERR(EINVAL);
+ }
+
+ obj = nouveau_dma_object_create(dev, init.offset, init.size,
+ init.access, init.target);
+ if (!obj)
+ return DRM_ERR(ENOMEM);
+
+ obj->handle = init.handle;
+ if (nouveau_hash_table_insert(dev, fifo, obj)) {
+ nouveau_object_free(dev, fifo, obj);
+ return DRM_ERR(ENOMEM);
+ }
+
+ nouveau_object_link(dev, fifo, obj);
+
+ return 0;
+}
+
diff --git a/shared-core/nouveau_reg.h b/shared-core/nouveau_reg.h
new file mode 100644
index 00000000..e56630f0
--- /dev/null
+++ b/shared-core/nouveau_reg.h
@@ -0,0 +1,232 @@
+
+
+#define NV03_BOOT_0 0x00100000
+# define NV03_BOOT_0_RAM_AMOUNT 0x00000003
+# define NV03_BOOT_0_RAM_AMOUNT_8MB 0x00000000
+# define NV03_BOOT_0_RAM_AMOUNT_2MB 0x00000001
+# define NV03_BOOT_0_RAM_AMOUNT_4MB 0x00000002
+# define NV03_BOOT_0_RAM_AMOUNT_8MB_SDRAM 0x00000003
+# define NV04_BOOT_0_RAM_AMOUNT_32MB 0x00000000
+# define NV04_BOOT_0_RAM_AMOUNT_4MB 0x00000001
+# define NV04_BOOT_0_RAM_AMOUNT_8MB 0x00000002
+# define NV04_BOOT_0_RAM_AMOUNT_16MB 0x00000003
+
+#define NV04_FIFO_DATA 0x0010020c
+# define NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK 0xfff00000
+# define NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT 20
+
+#define NV03_PGRAPH_STATUS 0x004006b0
+#define NV04_PGRAPH_STATUS 0x00400700
+
+#define NV_RAMIN 0x00700000
+
+#define NV_RAMHT_HANDLE_OFFSET 0
+#define NV_RAMHT_CONTEXT_OFFSET 4
+# define NV_RAMHT_CONTEXT_VALID (1<<31)
+# define NV_RAMHT_CONTEXT_CHANNEL_SHIFT 24
+# define NV_RAMHT_CONTEXT_ENGINE_SHIFT 16
+# define NV_RAMHT_CONTEXT_ENGINE_SOFTWARE 0
+# define NV_RAMHT_CONTEXT_ENGINE_GRAPHICS 1
+# define NV_RAMHT_CONTEXT_INSTANCE_SHIFT 0
+# define NV40_RAMHT_CONTEXT_CHANNEL_SHIFT 23
+# define NV40_RAMHT_CONTEXT_ENGINE_SHIFT 20
+# define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT 0
+
+#define NV_DMA_ACCESS_RW 0
+#define NV_DMA_ACCESS_RO 1
+#define NV_DMA_ACCESS_WO 2
+#define NV_DMA_TARGET_VIDMEM 0
+#define NV_DMA_TARGET_AGP 3
+
+#define NV03_FIFO_SIZE 0x8000UL
+#define NV_MAX_FIFO_NUMBER 32
+#define NV03_FIFO_REGS_SIZE 0x10000
+#define NV03_FIFO_REGS(i) (0x00800000+i*NV03_FIFO_REGS_SIZE)
+# define NV03_FIFO_REGS_DMAPUT(i) (NV03_FIFO_REGS(i)+0x40)
+# define NV03_FIFO_REGS_DMAGET(i) (NV03_FIFO_REGS(i)+0x44)
+
+#define NV_PMC_INTSTAT 0x00000100
+# define NV_PMC_INTSTAT_PFIFO_PENDING (1<< 8)
+# define NV_PMC_INTSTAT_PGRAPH_PENDING (1<<12)
+# define NV_PMC_INTSTAT_CRTC0_PENDING (1<<24)
+# define NV_PMC_INTSTAT_CRTC1_PENDING (1<<25)
+# define NV_PMC_INTSTAT_CRTCn_PENDING (3<<24)
+#define NV_PMC_INTEN 0x00000140
+# define NV_PMC_INTEN_MASTER_ENABLE (1<< 0)
+
+#define NV_PGRAPH_INTSTAT 0x00400100
+#define NV04_PGRAPH_INTEN 0x00400140
+#define NV40_PGRAPH_INTEN 0x0040013C
+# define NV_PGRAPH_INTR_NOTIFY (1<< 0)
+# define NV_PGRAPH_INTR_MISSING_HW (1<< 4)
+# define NV_PGRAPH_INTR_CONTEXT_SWITCH (1<<12)
+# define NV_PGRAPH_INTR_BUFFER_NOTIFY (1<<16)
+# define NV_PGRAPH_INTR_ERROR (1<<20)
+#define NV_PGRAPH_CTX_CONTROL 0x00400144
+#define NV_PGRAPH_NV40_UNK220 0x00400220
+# define NV_PGRAPH_NV40_UNK220_FB_INSTANCE
+#define NV_PGRAPH_CTX_USER 0x00400148
+#define NV_PGRAPH_CTX_SWITCH1 0x0040014C
+#define NV_PGRAPH_FIFO 0x00400720
+#define NV_PGRAPH_FFINTFC_ST2 0x00400764
+
+/* It's a guess that this works on NV03. Confirmed on NV04, though */
+#define NV_PFIFO_DELAY_0 0x00002040
+#define NV_PFIFO_DMA_TIMESLICE 0x00002044
+#define NV_PFIFO_INTSTAT 0x00002100
+#define NV_PFIFO_INTEN 0x00002140
+# define NV_PFIFO_INTR_CACHE_ERROR (1<< 0)
+# define NV_PFIFO_INTR_RUNOUT (1<< 4)
+# define NV_PFIFO_INTR_RUNOUT_OVERFLOW (1<< 8)
+# define NV_PFIFO_INTR_DMA_PUSHER (1<<12)
+# define NV_PFIFO_INTR_DMA_PT (1<<16)
+# define NV_PFIFO_INTR_SEMAPHORE (1<<20)
+# define NV_PFIFO_INTR_ACQUIRE_TIMEOUT (1<<24)
+#define NV_PFIFO_RAMHT 0x00002210
+#define NV_PFIFO_RAMFC 0x00002214
+#define NV_PFIFO_RAMRO 0x00002218
+#define NV40_PFIFO_RAMFC 0x00002220
+#define NV_PFIFO_CACHES 0x00002500
+#define NV_PFIFO_MODE 0x00002504
+#define NV_PFIFO_DMA 0x00002508
+#define NV_PFIFO_SIZE 0x0000250c
+#define NV_PFIFO_CACH0_PSH0 0x00003000
+#define NV_PFIFO_CACH0_PUL0 0x00003050
+#define NV_PFIFO_CACH0_PUL1 0x00003054
+#define NV_PFIFO_CACH1_PSH0 0x00003200
+#define NV_PFIFO_CACH1_PSH1 0x00003204
+#define NV_PFIFO_CACH1_DMAPSH 0x00003220
+#define NV_PFIFO_CACH1_DMAF 0x00003224
+# define NV_PFIFO_CACH1_DMAF_TRIG_8_BYTES 0x00000000
+# define NV_PFIFO_CACH1_DMAF_TRIG_16_BYTES 0x00000008
+# define NV_PFIFO_CACH1_DMAF_TRIG_24_BYTES 0x00000010
+# define NV_PFIFO_CACH1_DMAF_TRIG_32_BYTES 0x00000018
+# define NV_PFIFO_CACH1_DMAF_TRIG_40_BYTES 0x00000020
+# define NV_PFIFO_CACH1_DMAF_TRIG_48_BYTES 0x00000028
+# define NV_PFIFO_CACH1_DMAF_TRIG_56_BYTES 0x00000030
+# define NV_PFIFO_CACH1_DMAF_TRIG_64_BYTES 0x00000038
+# define NV_PFIFO_CACH1_DMAF_TRIG_72_BYTES 0x00000040
+# define NV_PFIFO_CACH1_DMAF_TRIG_80_BYTES 0x00000048
+# define NV_PFIFO_CACH1_DMAF_TRIG_88_BYTES 0x00000050
+# define NV_PFIFO_CACH1_DMAF_TRIG_96_BYTES 0x00000058
+# define NV_PFIFO_CACH1_DMAF_TRIG_104_BYTES 0x00000060
+# define NV_PFIFO_CACH1_DMAF_TRIG_112_BYTES 0x00000068
+# define NV_PFIFO_CACH1_DMAF_TRIG_120_BYTES 0x00000070
+# define NV_PFIFO_CACH1_DMAF_TRIG_128_BYTES 0x00000078
+# define NV_PFIFO_CACH1_DMAF_TRIG_136_BYTES 0x00000080
+# define NV_PFIFO_CACH1_DMAF_TRIG_144_BYTES 0x00000088
+# define NV_PFIFO_CACH1_DMAF_TRIG_152_BYTES 0x00000090
+# define NV_PFIFO_CACH1_DMAF_TRIG_160_BYTES 0x00000098
+# define NV_PFIFO_CACH1_DMAF_TRIG_168_BYTES 0x000000A0
+# define NV_PFIFO_CACH1_DMAF_TRIG_176_BYTES 0x000000A8
+# define NV_PFIFO_CACH1_DMAF_TRIG_184_BYTES 0x000000B0
+# define NV_PFIFO_CACH1_DMAF_TRIG_192_BYTES 0x000000B8
+# define NV_PFIFO_CACH1_DMAF_TRIG_200_BYTES 0x000000C0
+# define NV_PFIFO_CACH1_DMAF_TRIG_208_BYTES 0x000000C8
+# define NV_PFIFO_CACH1_DMAF_TRIG_216_BYTES 0x000000D0
+# define NV_PFIFO_CACH1_DMAF_TRIG_224_BYTES 0x000000D8
+# define NV_PFIFO_CACH1_DMAF_TRIG_232_BYTES 0x000000E0
+# define NV_PFIFO_CACH1_DMAF_TRIG_240_BYTES 0x000000E8
+# define NV_PFIFO_CACH1_DMAF_TRIG_248_BYTES 0x000000F0
+# define NV_PFIFO_CACH1_DMAF_TRIG_256_BYTES 0x000000F8
+# define NV_PFIFO_CACH1_DMAF_SIZE 0x0000E000
+# define NV_PFIFO_CACH1_DMAF_SIZE_32_BYTES 0x00000000
+# define NV_PFIFO_CACH1_DMAF_SIZE_64_BYTES 0x00002000
+# define NV_PFIFO_CACH1_DMAF_SIZE_96_BYTES 0x00004000
+# define NV_PFIFO_CACH1_DMAF_SIZE_128_BYTES 0x00006000
+# define NV_PFIFO_CACH1_DMAF_SIZE_160_BYTES 0x00008000
+# define NV_PFIFO_CACH1_DMAF_SIZE_192_BYTES 0x0000A000
+# define NV_PFIFO_CACH1_DMAF_SIZE_224_BYTES 0x0000C000
+# define NV_PFIFO_CACH1_DMAF_SIZE_256_BYTES 0x0000E000
+# define NV_PFIFO_CACH1_DMAF_MAX_REQS 0x001F0000
+# define NV_PFIFO_CACH1_DMAF_MAX_REQS_0 0x00000000
+# define NV_PFIFO_CACH1_DMAF_MAX_REQS_1 0x00010000
+# define NV_PFIFO_CACH1_DMAF_MAX_REQS_2 0x00020000
+# define NV_PFIFO_CACH1_DMAF_MAX_REQS_3 0x00030000
+# define NV_PFIFO_CACH1_DMAF_MAX_REQS_4 0x00040000
+# define NV_PFIFO_CACH1_DMAF_MAX_REQS_5 0x00050000
+# define NV_PFIFO_CACH1_DMAF_MAX_REQS_6 0x00060000
+# define NV_PFIFO_CACH1_DMAF_MAX_REQS_7 0x00070000
+# define NV_PFIFO_CACH1_DMAF_MAX_REQS_8 0x00080000
+# define NV_PFIFO_CACH1_DMAF_MAX_REQS_9 0x00090000
+# define NV_PFIFO_CACH1_DMAF_MAX_REQS_10 0x000A0000
+# define NV_PFIFO_CACH1_DMAF_MAX_REQS_11 0x000B0000
+# define NV_PFIFO_CACH1_DMAF_MAX_REQS_12 0x000C0000
+# define NV_PFIFO_CACH1_DMAF_MAX_REQS_13 0x000D0000
+# define NV_PFIFO_CACH1_DMAF_MAX_REQS_14 0x000E0000
+# define NV_PFIFO_CACH1_DMAF_MAX_REQS_15 0x000F0000
+# define NV_PFIFO_CACH1_ENDIAN 0x80000000
+# define NV_PFIFO_CACH1_LITTLE_ENDIAN 0x7FFFFFFF
+# define NV_PFIFO_CACH1_BIG_ENDIAN 0x80000000
+#define NV_PFIFO_CACH1_DMAS 0x00003228
+#define NV_PFIFO_CACH1_DMAI 0x0000322c
+#define NV_PFIFO_CACH1_DMAC 0x00003230
+#define NV_PFIFO_CACH1_DMAP 0x00003240
+#define NV_PFIFO_CACH1_DMAG 0x00003244
+#define NV_PFIFO_CACH1_REF_CNT 0x00003248
+#define NV_PFIFO_CACH1_DMASR 0x0000324C
+#define NV_PFIFO_CACH1_PUL0 0x00003250
+#define NV_PFIFO_CACH1_PUL1 0x00003254
+#define NV_PFIFO_CACH1_HASH 0x00003258
+#define NV_PFIFO_CACH1_ACQUIRE_TIMEOUT 0x00003260
+#define NV_PFIFO_CACH1_ACQUIRE_TIMESTAMP 0x00003264
+#define NV_PFIFO_CACH1_ACQUIRE_VALUE 0x00003268
+#define NV_PFIFO_CACH1_SEMAPHORE 0x0000326C
+#define NV_PFIFO_CACH1_GET 0x00003270
+#define NV_PFIFO_CACH1_ENG 0x00003280
+#define NV_PFIFO_CACH1_DMA_DCOUNT 0x000032A0
+#define NV40_PFIFO_GRCTX_INSTANCE 0x000032E0
+#define NV40_PFIFO_UNK32E4 0x000032E4
+#define NV_PFIFO_CACH1_METHOD(i) (0x00003800+(i*8))
+#define NV_PFIFO_CACH1_DATA(i) (0x00003804+(i*8))
+#define NV40_PFIFO_CACH1_METHOD(i) (0x00090000+(i*8))
+#define NV40_PFIFO_CACH1_DATA(i) (0x00090004+(i*8))
+
+#define NV_CRTC0_INTSTAT 0x00600100
+#define NV_CRTC0_INTEN 0x00600140
+#define NV_CRTC1_INTSTAT 0x00602100
+#define NV_CRTC1_INTEN 0x00602140
+# define NV_CRTC_INTR_VBLANK (1<<0)
+
+/* Fifo commands. These are not regs, neither masks */
+#define NV03_FIFO_CMD_JUMP 0x20000000
+#define NV03_FIFO_CMD_JUMP_OFFSET_MASK 0x1ffffffc
+#define NV03_FIFO_CMD_REWIND (NV03_FIFO_CMD_JUMP | (0 & NV03_FIFO_CMD_JUMP_OFFSET_MASK))
+
+/* RAMFC offsets */
+#define NV10_RAMFC_DMA_PUT 0x00
+#define NV10_RAMFC_DMA_GET 0x04
+#define NV10_RAMFC_REF_CNT 0x08
+#define NV10_RAMFC_DMA_INSTANCE 0x0C
+#define NV10_RAMFC_DMA_STATE 0x10
+#define NV10_RAMFC_DMA_FETCH 0x14
+#define NV10_RAMFC_ENGINE 0x18
+#define NV10_RAMFC_PULL1_ENGINE 0x1C
+#define NV10_RAMFC_ACQUIRE_VALUE 0x20
+#define NV10_RAMFC_ACQUIRE_TIMESTAMP 0x24
+#define NV10_RAMFC_ACQUIRE_TIMEOUT 0x28
+#define NV10_RAMFC_SEMAPHORE 0x2C
+#define NV10_RAMFC_DMA_SUBROUTINE 0x30
+
+#define NV40_RAMFC_DMA_PUT 0x00
+#define NV40_RAMFC_DMA_GET 0x04
+#define NV40_RAMFC_REF_CNT 0x08
+#define NV40_RAMFC_DMA_INSTANCE 0x0C
+#define NV40_RAMFC_DMA_DCOUNT /* ? */ 0x10
+#define NV40_RAMFC_DMA_STATE 0x14
+#define NV40_RAMFC_DMA_FETCH 0x18
+#define NV40_RAMFC_ENGINE 0x1C
+#define NV40_RAMFC_PULL1_ENGINE 0x20
+#define NV40_RAMFC_ACQUIRE_VALUE 0x24
+#define NV40_RAMFC_ACQUIRE_TIMESTAMP 0x28
+#define NV40_RAMFC_ACQUIRE_TIMEOUT 0x2C
+#define NV40_RAMFC_SEMAPHORE 0x30
+#define NV40_RAMFC_DMA_SUBROUTINE 0x34
+#define NV40_RAMFC_GRCTX_INSTANCE /* guess */ 0x38
+#define NV40_RAMFC_DMA_TIMESLICE 0x3C
+#define NV40_RAMFC_UNK_40 0x40
+#define NV40_RAMFC_UNK_44 0x44
+#define NV40_RAMFC_UNK_48 0x48
+#define NV40_RAMFC_2088 0x4C
+#define NV40_RAMFC_3300 0x50
+
diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c
new file mode 100644
index 00000000..914d1453
--- /dev/null
+++ b/shared-core/nouveau_state.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright 2005 Stephane Marchesin
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_sarea.h"
+#include "nouveau_drv.h"
+
+/* here a client dies, release the stuff that was allocated for its filp */
+void nouveau_preclose(drm_device_t * dev, DRMFILE filp)
+{
+ drm_nouveau_private_t *dev_priv = dev->dev_private;
+
+ nouveau_mem_release(filp,dev_priv->fb_heap);
+ nouveau_mem_release(filp,dev_priv->agp_heap);
+ nouveau_object_cleanup(dev, filp);
+ nouveau_fifo_cleanup(dev, filp);
+}
+
+/* first module load, setup the mmio/fb mapping */
+int nouveau_firstopen(struct drm_device *dev)
+{
+ int ret;
+ drm_nouveau_private_t *dev_priv = dev->dev_private;
+
+ /* resource 0 is mmio regs */
+ /* resource 1 is linear FB */
+ /* resource 2 is ??? (mmio regs + 0x1000000) */
+ /* resource 6 is bios */
+
+ /* map the mmio regs */
+ ret = drm_addmap(dev, drm_get_resource_start(dev, 0), drm_get_resource_len(dev, 0),
+ _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio);
+ if (dev_priv->mmio)
+ {
+ DRM_INFO("regs mapped ok at 0x%lx\n",dev_priv->mmio->offset);
+ }
+ else
+ {
+ DRM_ERROR("Unable to initialize the mmio mapping. Please report your setup to " DRIVER_EMAIL "\n");
+ return 1;
+ }
+
+ DRM_INFO("%lld MB of video ram detected\n",nouveau_mem_fb_amount(dev)>>20);
+
+ /* Clear RAMIN
+ * Determine locations for RAMHT/FC/RO
+ * Initialise PFIFO
+ */
+ ret = nouveau_fifo_init(dev);
+ if (ret) return ret;
+
+ /* FIXME: doesn't belong here, and have no idea what it's for.. */
+ if (dev_priv->card_type >= NV_40) {
+ uint32_t pg0220_inst;
+
+ dev_priv->fb_obj = nouveau_dma_object_create(dev,
+ 0, nouveau_mem_fb_amount(dev),
+ NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM);
+
+ pg0220_inst = nouveau_chip_instance_get(dev,
+ dev_priv->fb_obj->instance);
+ NV_WRITE(NV_PGRAPH_NV40_UNK220, pg0220_inst);
+ }
+
+ return 0;
+}
+
+int nouveau_load(struct drm_device *dev, unsigned long flags)
+{
+ drm_nouveau_private_t *dev_priv;
+
+ if (flags==NV_UNKNOWN)
+ return DRM_ERR(EINVAL);
+
+ dev_priv = drm_alloc(sizeof(drm_nouveau_private_t), DRM_MEM_DRIVER);
+ if (!dev_priv)
+ return DRM_ERR(ENOMEM);
+
+ memset(dev_priv, 0, sizeof(drm_nouveau_private_t));
+ dev_priv->card_type=flags&NOUVEAU_FAMILY;
+ dev_priv->flags=flags&NOUVEAU_FLAGS;
+
+ dev->dev_private = (void *)dev_priv;
+
+ return 0;
+}
+
+int nouveau_unload(struct drm_device *dev)
+{
+ drm_free(dev->dev_private, sizeof(*dev->dev_private), DRM_MEM_DRIVER);
+ dev->dev_private = NULL;
+ return 0;
+}
+
+int nouveau_ioctl_getparam(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_nouveau_private_t *dev_priv = dev->dev_private;
+ drm_nouveau_getparam_t getparam;
+
+ DRM_COPY_FROM_USER_IOCTL(getparam, (drm_nouveau_getparam_t __user *)data,
+ sizeof(getparam));
+
+ switch (getparam.param) {
+ case NOUVEAU_GETPARAM_PCI_VENDOR:
+ getparam.value=dev->pci_vendor;
+ break;
+ case NOUVEAU_GETPARAM_PCI_DEVICE:
+ getparam.value=dev->pci_device;
+ break;
+ case NOUVEAU_GETPARAM_BUS_TYPE:
+ if (drm_device_is_agp(dev))
+ getparam.value=NV_AGP;
+ else if (drm_device_is_pcie(dev))
+ getparam.value=NV_PCIE;
+ else
+ getparam.value=NV_PCI;
+ break;
+ case NOUVEAU_GETPARAM_FB_PHYSICAL:
+ getparam.value=dev_priv->fb_phys;
+ break;
+ case NOUVEAU_GETPARAM_AGP_PHYSICAL:
+ getparam.value=dev_priv->agp_phys;
+ break;
+ default:
+ DRM_ERROR("unknown parameter %d\n", getparam.param);
+ return DRM_ERR(EINVAL);
+ }
+
+ DRM_COPY_TO_USER_IOCTL((drm_nouveau_getparam_t __user *)data, getparam,
+ sizeof(getparam));
+ return 0;
+}
+
+int nouveau_ioctl_setparam(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ drm_nouveau_private_t *dev_priv = dev->dev_private;
+ drm_nouveau_setparam_t setparam;
+
+ DRM_COPY_FROM_USER_IOCTL(setparam, (drm_nouveau_setparam_t __user *)data,
+ sizeof(setparam));
+
+ switch (setparam.param) {
+ case NOUVEAU_SETPARAM_CMDBUF_LOCATION:
+ switch (setparam.value) {
+ case NOUVEAU_MEM_AGP:
+ case NOUVEAU_MEM_FB:
+ break;
+ default:
+ DRM_ERROR("invalid CMDBUF_LOCATION value=%d\n", setparam.value);
+ return DRM_ERR(EINVAL);
+ }
+ dev_priv->config.cmdbuf.location = setparam.value;
+ break;
+ case NOUVEAU_SETPARAM_CMDBUF_SIZE:
+ dev_priv->config.cmdbuf.size = setparam.value;
+ break;
+ default:
+ DRM_ERROR("unknown parameter %d\n", setparam.param);
+ return DRM_ERR(EINVAL);
+ }
+
+ return 0;
+}
+
+/* waits for idle */
+void nouveau_wait_for_idle(struct drm_device *dev)
+{
+ drm_nouveau_private_t *dev_priv=dev->dev_private;
+ switch(dev_priv->card_type)
+ {
+ case NV_03:
+ while(NV_READ(NV03_PGRAPH_STATUS));
+ break;
+ default:
+ while(NV_READ(NV04_PGRAPH_STATUS));
+ break;
+ }
+}
+